diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 2eebd0cc56c66..51ed51d1a696a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -95,6 +95,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -1760,6 +1762,7 @@ static List getDefaultNamedXContents() { map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index dc9f246c7b878..2044a5ac56c92 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -182,6 +182,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowIndicesLevel() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); diff --git a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc new file mode 100644 index 0000000000000..ac173ec2b002f --- /dev/null +++ b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc @@ -0,0 +1,185 @@ +[[search-aggregations-bucket-geotilegrid-aggregation]] +=== GeoTile Grid Aggregation + +A multi-bucket aggregation that works on `geo_point` fields and groups points into +buckets that represent cells in a grid. The resulting grid can be sparse and only +contains cells that have matching data. Each cell corresponds to a +https://en.wikipedia.org/wiki/Tiled_web_map[map tile] as used by many online map +sites. Each cell is labeled using a "{zoom}/{x}/{y}" format, where zoom is equal +to the user-specified precision. + +* High precision keys have a larger range for x and y, and represent tiles that +cover only a small area. +* Low precision keys have a smaller range for x and y, and represent tiles that +each cover a large area. + +See https://wiki.openstreetmap.org/wiki/Zoom_levels[Zoom level documentation] +on how precision (zoom) correlates to size on the ground. Precision for this +aggregation can be between 0 and 29, inclusive. + +WARNING: The highest-precision geotile of length 29 produces cells that cover +less than a 10cm by 10cm of land and so high-precision requests can be very +costly in terms of RAM and result sizes. Please see the example below on how +to first filter the aggregation to a smaller geographic area before requesting +high-levels of detail. + +The specified field must be of type `geo_point` (which can only be set +explicitly in the mappings) and it can also hold an array of `geo_point` +fields, in which case all points will be taken into account during aggregation. + + +==== Simple low-precision request + +[source,js] +-------------------------------------------------- +PUT /museums +{ + "mappings": { + "properties": { + "location": { + "type": "geo_point" + } + } + } +} + +POST /museums/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 +{ + "aggregations" : { + "large-grid" : { + "geotile_grid" : { + "field" : "location", + "precision" : 8 + } + } + } +} +-------------------------------------------------- +// CONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations": { + "large-grid": { + "buckets": [ + { + "key" : "8/131/84", + "doc_count" : 3 + }, + { + "key" : "8/129/88", + "doc_count" : 2 + }, + { + "key" : "8/131/85", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + +==== High-precision requests + +When requesting detailed buckets (typically for displaying a "zoomed in" map) +a filter like <> should be +applied to narrow the subject area otherwise potentially millions of buckets +will be created and returned. + +[source,js] +-------------------------------------------------- +POST /museums/_search?size=0 +{ + "aggregations" : { + "zoomed-in" : { + "filter" : { + "geo_bounding_box" : { + "location" : { + "top_left" : "52.4, 4.9", + "bottom_right" : "52.3, 5.0" + } + } + }, + "aggregations":{ + "zoom1":{ + "geotile_grid" : { + "field": "location", + "precision": 22 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "zoomed-in" : { + "doc_count" : 3, + "zoom1" : { + "buckets" : [ + { + "key" : "22/2154412/1378379", + "doc_count" : 1 + }, + { + "key" : "22/2154385/1378332", + "doc_count" : 1 + }, + { + "key" : "22/2154259/1378425", + "doc_count" : 1 + } + ] + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] + + +==== Options + +[horizontal] +field:: Mandatory. The name of the field indexed with GeoPoints. + +precision:: Optional. The integer zoom of the key used to define + cells/buckets in the results. Defaults to 7. + Values outside of [0,29] will be rejected. + +size:: Optional. The maximum number of geohash buckets to return + (defaults to 10,000). When results are trimmed, buckets are + prioritised based on the volumes of documents they contain. + +shard_size:: Optional. To allow for more accurate counting of the top cells + returned in the final result the aggregation defaults to + returning `max(10,(size x number-of-shards))` buckets from each + shard. If this heuristic is undesirable, the number considered + from each shard can be over-ridden using this parameter. diff --git a/docs/reference/ingest/processors/user-agent.asciidoc b/docs/reference/ingest/processors/user-agent.asciidoc index f6b6d46fe7b9d..942ba9f148799 100644 --- a/docs/reference/ingest/processors/user-agent.asciidoc +++ b/docs/reference/ingest/processors/user-agent.asciidoc @@ -67,7 +67,9 @@ Which returns "version": "10.10.5", "full": "Mac OS X 10.10.5" }, - "device": "Other" + "device" : { + "name" : "Other" + }, } } } diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 5b7138ffa3f78..db8b3a3d17c53 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -4,7 +4,8 @@ IMPORTANT: Indices created in Elasticsearch 6.0.0 or later may only contain a single <>. Indices created in 5.x with multiple mapping types will continue to function as before in Elasticsearch 6.x. -Mapping types will be completely removed in Elasticsearch 7.0.0. +Types will be deprecated in APIs in Elasticsearch 7.0.0, and completely +removed in 8.0.0. [float] === What are mapping types? @@ -161,12 +162,12 @@ You can achieve the same thing by adding a custom `type` field as follows: [source,js] ---- -PUT twitter?include_type_name=true <1> +PUT twitter { "mappings": { "_doc": { "properties": { - "type": { "type": "keyword" }, <2> + "type": { "type": "keyword" }, <1> "name": { "type": "text" }, "user_name": { "type": "keyword" }, "email": { "type": "keyword" }, @@ -204,7 +205,7 @@ GET twitter/_search }, "filter": { "match": { - "type": "tweet" <2> + "type": "tweet" <1> } } } @@ -212,9 +213,7 @@ GET twitter/_search } ---- // NOTCONSOLE -<1> Use `include_type_name=true` in case need to use the "old" syntax including the "_doc" object like -in this example -<2> The explicit `type` field takes the place of the implicit `_type` field. +<1> The explicit `type` field takes the place of the implicit `_type` field. [float] ==== Parent/Child without mapping types @@ -258,30 +257,28 @@ Elasticsearch 6.x:: * The `_default_` mapping type is deprecated. +* In 6.7, the index creation, index template, and mapping APIs support a query + string parameter (`include_type_name`) which indicates whether requests and + responses should include a type name. It defaults to `true`, and not setting + `include_type_name=false` will result in a deprecation warning. Indices which + don't have an explicit type will use the dummy type name `_doc`. + Elasticsearch 7.x:: -* The `type` parameter in URLs are deprecated. For instance, indexing - a document no longer requires a document `type`. The new index APIs +* Specifying types in requests is deprecated. For instance, indexing a + document no longer requires a document `type`. The new index APIs are `PUT {index}/_doc/{id}` in case of explicit ids and `POST {index}/_doc` for auto-generated ids. -* The index creation, `GET|PUT _mapping` and document APIs support a query - string parameter (`include_type_name`) which indicates whether requests and - responses should include a type name. It defaults to `true`. - 7.x indices which don't have an explicit type will use the dummy type name - `_doc`. Not setting `include_type_name=false` will result in a deprecation - warning. +* The `include_type_name` parameter in the index creation, index template, + and mapping APIs will default to `false`. Setting the parameter will result + in a deprecation warning. * The `_default_` mapping type is removed. Elasticsearch 8.x:: -* The `type` parameter is no longer supported in URLs. - -* The `include_type_name` parameter is deprecated, default to `false` and fails - the request when set to `true`. - -Elasticsearch 9.x:: +* Specifying types in requests is no longer supported. * The `include_type_name` parameter is removed. @@ -427,17 +424,26 @@ POST _reindex // NOTCONSOLE [float] -=== Use `include_type_name=false` to prepare for upgrade to 8.0 +=== Typeless APIs in 7.0 -Index creation and mapping APIs support a new `include_type_name` url parameter -starting with version 6.7. It will default to `true` in version 6.7, default to -`false` in version 7.0 and will be removed in version 8.0. When set to `true`, -this parameter enables the pre-7.0 behavior of using type names in the API. +In Elasticsearch 7.0, each API will support typeless requests, +and specifying a type will produce a deprecation warning. -See some examples of interactions with Elasticsearch with this option turned off: +NOTE: Typeless APIs work even if the target index contains a custom type. +For example, if an index has the the custom type name `my_type`, we can add +documents to it using typeless `index` calls, and load documents with typeless +`get` calls. [float] -==== Index creation +==== Indices APIs + +Index creation, index template, and mapping APIs support a new `include_type_name` +url parameter that specifies whether mapping definitions in requests and responses +should contain the type name. The parameter defaults to `true` in version 6.7 to +match the pre-7.0 behavior of using type names in mappings. It defaults to `false` +in version 7.0 and will be removed in version 8.0. + +See some examples of interactions with Elasticsearch with this option provided: [source,js] -------------------------------------------------- @@ -455,27 +461,27 @@ PUT index?include_type_name=false // CONSOLE <1> Mappings are included directly under the `mappings` key, without a type name. -[float] -==== PUT and GET mappings - [source,js] -------------------------------------------------- -PUT index - PUT index/_mappings?include_type_name=false { "properties": { <1> - "foo": { - "type": "keyword" + "bar": { + "type": "text" } } } - -GET index/_mappings?include_type_name=false -------------------------------------------------- // CONSOLE +// TEST[continued] <1> Mappings are included directly under the `mappings` key, without a type name. +[source,js] +-------------------------------------------------- +GET index/_mappings?include_type_name=false +-------------------------------------------------- +// CONSOLE +// TEST[continued] The above call returns @@ -487,6 +493,9 @@ The above call returns "properties": { <1> "foo": { "type": "keyword" + }, + "bar": { + "type": "text" } } } @@ -499,14 +508,14 @@ The above call returns [float] ==== Document APIs -Index APIs must be called with the `{index}/_doc` path for automatic generation of -the `_id` and `{index}/_doc/{id}` with explicit ids. +In 7.0, index APIs must be called with the `{index}/_doc` path for automatic +generation of the `_id` and `{index}/_doc/{id}` with explicit ids. [source,js] -------------------------------------------------- PUT index/_doc/1 { - "foo": "bar" + "foo": "baz" } -------------------------------------------------- // CONSOLE @@ -514,7 +523,7 @@ PUT index/_doc/1 [source,js] -------------------------------------------------- { - "_index": "index", <1> + "_index": "index", "_id": "1", "_type": "_doc", "_version": 1, @@ -529,14 +538,98 @@ PUT index/_doc/1 } -------------------------------------------------- // TESTRESPONSE -<1> The response does not include a `_type`. -The <>, <>, <> and <> APIs -will continue to return a `_type` key in the response in 7.0, but it is considered deprecated and will be -removed in 8.0. +Similarly, the `get` and `delete` APIs use the path `{index}/_doc/{id}`: + +[source,js] +-------------------------------------------------- +GET index/_doc/1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +For API paths that contain both a type and endpoint name like `_update`, +in 7.0 the endpoint will immediately follow the index name: + +[source,js] +-------------------------------------------------- +POST index/_update/1 +{ + "doc" : { + "foo" : "qux" + } +} + +GET /index/_source/1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Types should also no longer appear in the body of requests. The following +example of bulk indexing omits the type both in the URL, and in the individual +bulk commands: + +[source,js] +-------------------------------------------------- +POST _bulk +{ "index" : { "_index" : "index", "_id" : "3" } } +{ "foo" : "baz" } +{ "index" : { "_index" : "index", "_id" : "4" } } +{ "foo" : "qux" } +-------------------------------------------------- +// CONSOLE + +[float] +==== Search APIs + +When calling a search API such `_search`, `_msearch`, or `_explain`, types +should not be included in the URL. Additionally, the `_type` field should not +be used in queries, aggregations, or scripts. + +[float] +==== Types in responses + +The document and search APIs will continue to return a `_type` key in +responses, to avoid breaks to response parsing. However, the key is +considered deprecated and should no longer be referenced. Types will +be completely removed from responses in 8.0. + +Note that when a deprecated typed API is used, the index's mapping type will be +returned as normal, but that typeless APIs will return the dummy type `_doc` +in the response. For example, the following typeless `get` call will always +return `_doc` as the type, even if the mapping has a custom type name like +`my_type`: + +[source,js] +-------------------------------------------------- +PUT index/my_type/1 +{ + "foo": "baz" +} + +GET index/_doc/1 +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +{ + "_index" : "index", + "_type" : "_doc", + "_id" : "1", + "_version" : 1, + "_seq_no" : 0, + "_primary_term" : 1, + "found": true, + "_source" : { + "foo" : "baz" + } +} +-------------------------------------------------- +// TESTRESPONSE [float] -=== Index templates +==== Index templates It is recommended to make index templates typeless before upgrading to 7.0 by re-adding them with `include_type_name` set to `false`. @@ -608,3 +701,16 @@ In case of implicit index creation, because of documents that get indexed in an index that doesn't exist yet, the template is always honored. This is usually not a problem due to the fact that typeless index calls work on typed indices. + +[float] +==== Mixed-version clusters + +In a cluster composed of both 6.7 and 7.0 nodes, the parameter +`include_type_name` should be specified in indices APIs like index +creation. This is because the parameter has a different default between +6.7 and 7.0, so the same mapping definition will not be valid for both +node versions. + +Typeless document APIs such as `bulk` and `update` are only available as of +7.0, and will not work with 6.7 nodes. This also holds true for the typeless +versions of queries that perform document lookups, such as `terms`. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 0f3dcf9771c3d..afe96fd8851a9 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -159,8 +159,8 @@ instead which is a more appropriate value for a scenario where scores are not av [float] ==== Negative boosts are not allowed -Setting a negative `boost` in a query, deprecated in 6x, are not allowed in this version. -To deboost a specific query you can use a `boost` comprise between 0 and 1. +Setting a negative `boost` for a query or a field, deprecated in 6x, is not allowed in this version. +To deboost a specific query or field you can use a `boost` comprise between 0 and 1. [float] ==== Negative scores are not allowed in Function Score Query diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index c6874856011ce..0b18c267748b5 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -145,6 +145,22 @@ You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` se xpack.security.http.ssl.supported_protocols: [ "TLSv1.2", "TLSv1.1", "TLSv1" ] -------------------------------------------------- +[float] +[[trial-explicit-security]] +==== Security on Trial Licenses + +On trial licenses, `xpack.security.enabled` defaults to `false`. + +In prior versions, a trial license would automatically enable security if either + +* `xpack.security.transport.enabled` was `true`; _or_ +* the trial license was generated on a version of X-Pack from 6.2 or earlier. + +This behaviour has been now removed, so security is only enabled if: + +* `xpack.security.enabled` is `true`; _or_ +* `xpack.security.enabled` is not set, and a gold or platinum license is installed. + [float] [[watcher-notifications-account-settings]] ==== Watcher notifications account settings diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index b59f74198c3e8..186c8e8ee3837 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -65,7 +65,6 @@ GET /cluster_one:twitter/_search { "took": 150, "timed_out": false, - "num_reduce_phases": 2, "_shards": { "total": 1, "successful": 1, diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 39b7c191131ff..8104580e2998c 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -70,6 +70,12 @@ When doing aggregations (`GROUP BY`) {es-sql} relies on {es}'s `composite` aggre But this type of aggregation does come with a limitation: sorting can only be applied on the key used for the aggregation's buckets. This means that queries like `SELECT * FROM test GROUP BY age ORDER BY COUNT(*)` are not possible. +[float] +=== Using aggregation functions on top of scalar functions + +Aggregation functions like <>, <>, etc. can only be used +directly on fields, and so queries like `SELECT MAX(abs(age)) FROM test` are not possible. + [float] === Using a sub-select @@ -92,7 +98,7 @@ But, if the sub-select would include a `GROUP BY` or `HAVING` or the enclosing ` FROM (SELECT ...) WHERE [simple_condition]`, this is currently **un-supported**. [float] -=== Use <>/<> aggregation functions in `HAVING` clause +=== Using <>/<> aggregation functions in `HAVING` clause Using `FIRST` and `LAST` in the `HAVING` clause is not supported. The same applies to <> and <> when their target column diff --git a/libs/ssl-config/src/main/eclipse.build.gradle b/libs/ssl-config/src/main/eclipse.build.gradle new file mode 100644 index 0000000000000..58b2d7077120a --- /dev/null +++ b/libs/ssl-config/src/main/eclipse.build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' diff --git a/libs/ssl-config/src/test/eclipse.build.gradle b/libs/ssl-config/src/test/eclipse.build.gradle new file mode 100644 index 0000000000000..f8265e3dfed08 --- /dev/null +++ b/libs/ssl-config/src/test/eclipse.build.gradle @@ -0,0 +1,5 @@ +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' +dependencies { + testCompile project(':libs:elasticsearch-ssl-config') +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 8629f5f1fa321..05aa75944d2f9 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -21,10 +21,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; -import org.elasticsearch.common.time.DateUtils; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; import java.time.Instant; import java.time.LocalDate; @@ -48,26 +44,26 @@ enum DateFormat { Iso8601 { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return ISODateTimeFormat.dateTimeParser().withZone(timezone)::parseDateTime; + Function getFunction(String format, ZoneId timezone, Locale locale) { + return (date) -> DateFormatters.from(DateFormatter.forPattern("strict_date_time").parse(date)).withZoneSameInstant(timezone); } }, Unix { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime((long)(Double.parseDouble(date) * 1000), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli((long) (Double.parseDouble(date) * 1000.0)).atZone(timezone); } }, UnixMs { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime(Long.parseLong(date), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli(Long.parseLong(date)).atZone(timezone); } }, Tai64n { @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { - return (date) -> new DateTime(parseMillis(date), timezone); + Function getFunction(String format, ZoneId timezone, Locale locale) { + return date -> Instant.ofEpochMilli(parseMillis(date)).atZone(timezone); } private long parseMillis(String date) { @@ -85,13 +81,12 @@ private long parseMillis(String date) { Arrays.asList(NANO_OF_SECOND, SECOND_OF_DAY, MINUTE_OF_DAY, HOUR_OF_DAY, DAY_OF_MONTH, MONTH_OF_YEAR); @Override - Function getFunction(String format, DateTimeZone timezone, Locale locale) { + Function getFunction(String format, ZoneId zoneId, Locale locale) { // support the 6.x BWC compatible way of parsing java 8 dates if (format.startsWith("8")) { format = format.substring(1); } - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(timezone); int year = LocalDate.now(ZoneOffset.UTC).getYear(); DateFormatter formatter = DateFormatter.forPattern(format) .withLocale(locale) @@ -111,13 +106,12 @@ Function getFunction(String format, DateTimeZone timezone, Loc accessor = newTime.withZoneSameLocal(zoneId); } - long millis = DateFormatters.from(accessor).toInstant().toEpochMilli(); - return new DateTime(millis, timezone); + return DateFormatters.from(accessor); }; } }; - abstract Function getFunction(String format, DateTimeZone timezone, Locale locale); + abstract Function getFunction(String format, ZoneId timezone, Locale locale); static DateFormat fromString(String format) { switch (format) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java index ca429375f792e..e8e79c3d869ce 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateIndexNameProcessor.java @@ -19,25 +19,25 @@ package org.elasticsearch.ingest.common; -import java.util.ArrayList; -import java.util.Collections; -import java.util.IllformedLocaleException; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; - import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; + +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.IllformedLocaleException; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; public final class DateIndexNameProcessor extends AbstractProcessor { @@ -47,10 +47,10 @@ public final class DateIndexNameProcessor extends AbstractProcessor { private final TemplateScript.Factory indexNamePrefixTemplate; private final TemplateScript.Factory dateRoundingTemplate; private final TemplateScript.Factory indexNameFormatTemplate; - private final DateTimeZone timezone; - private final List> dateFormats; + private final ZoneId timezone; + private final List> dateFormats; - DateIndexNameProcessor(String tag, String field, List> dateFormats, DateTimeZone timezone, + DateIndexNameProcessor(String tag, String field, List> dateFormats, ZoneId timezone, TemplateScript.Factory indexNamePrefixTemplate, TemplateScript.Factory dateRoundingTemplate, TemplateScript.Factory indexNameFormatTemplate) { super(tag); @@ -72,9 +72,9 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { date = obj.toString(); } - DateTime dateTime = null; + ZonedDateTime dateTime = null; Exception lastException = null; - for (Function dateParser : dateFormats) { + for (Function dateParser : dateFormats) { try { dateTime = dateParser.apply(date); } catch (Exception e) { @@ -90,13 +90,15 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { String indexNameFormat = ingestDocument.renderTemplate(indexNameFormatTemplate); String dateRounding = ingestDocument.renderTemplate(dateRoundingTemplate); - DateTimeFormatter formatter = DateTimeFormat.forPattern(indexNameFormat); + DateFormatter formatter = DateFormatter.forPattern(indexNameFormat); + // use UTC instead of Z is string representation of UTC, so behaviour is the same between 6.x and 7 + String zone = timezone.equals(ZoneOffset.UTC) ? "UTC" : timezone.getId(); StringBuilder builder = new StringBuilder() .append('<') .append(indexNamePrefix) .append('{') - .append(formatter.print(dateTime)).append("||/").append(dateRounding) - .append('{').append(indexNameFormat).append('|').append(timezone).append('}') + .append(formatter.format(dateTime)).append("||/").append(dateRounding) + .append('{').append(indexNameFormat).append('|').append(zone).append('}') .append('}') .append('>'); String dynamicIndexName = builder.toString(); @@ -125,11 +127,11 @@ TemplateScript.Factory getIndexNameFormatTemplate() { return indexNameFormatTemplate; } - DateTimeZone getTimezone() { + ZoneId getTimezone() { return timezone; } - List> getDateFormats() { + List> getDateFormats() { return dateFormats; } @@ -146,7 +148,7 @@ public DateIndexNameProcessor create(Map registry, St Map config) throws Exception { String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "locale"); String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "timezone"); - DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString); + ZoneId timezone = timezoneString == null ? ZoneOffset.UTC : ZoneId.of(timezoneString); Locale locale = Locale.ENGLISH; if (localeString != null) { try { @@ -159,7 +161,7 @@ public DateIndexNameProcessor create(Map registry, St if (dateFormatStrings == null) { dateFormatStrings = Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSSXX"); } - List> dateFormats = new ArrayList<>(dateFormatStrings.size()); + List> dateFormats = new ArrayList<>(dateFormatStrings.size()); for (String format : dateFormatStrings) { DateFormat dateFormat = DateFormat.fromString(format); dateFormats.add(dateFormat.getFunction(format, timezone, locale)); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index dd6e6006eeb6d..e7ad1356977e0 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -21,6 +21,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -28,10 +29,10 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -42,13 +43,14 @@ public final class DateProcessor extends AbstractProcessor { public static final String TYPE = "date"; static final String DEFAULT_TARGET_FIELD = "@timestamp"; + public static final DateFormatter FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); private final TemplateScript.Factory timezone; private final TemplateScript.Factory locale; private final String field; private final String targetField; private final List formats; - private final List, Function>> dateParsers; + private final List, Function>> dateParsers; DateProcessor(String tag, @Nullable TemplateScript.Factory timezone, @Nullable TemplateScript.Factory locale, String field, List formats, String targetField) { @@ -65,8 +67,8 @@ public final class DateProcessor extends AbstractProcessor { } } - private DateTimeZone newDateTimeZone(Map params) { - return timezone == null ? DateTimeZone.UTC : DateTimeZone.forID(timezone.newInstance(params).execute()); + private ZoneId newDateTimeZone(Map params) { + return timezone == null ? ZoneOffset.UTC : ZoneId.of(timezone.newInstance(params).execute()); } private Locale newLocale(Map params) { @@ -82,9 +84,9 @@ public IngestDocument execute(IngestDocument ingestDocument) { value = obj.toString(); } - DateTime dateTime = null; + ZonedDateTime dateTime = null; Exception lastException = null; - for (Function, Function> dateParser : dateParsers) { + for (Function, Function> dateParser : dateParsers) { try { dateTime = dateParser.apply(ingestDocument.getSourceAndMetadata()).apply(value); } catch (Exception e) { @@ -97,7 +99,7 @@ public IngestDocument execute(IngestDocument ingestDocument) { throw new IllegalArgumentException("unable to parse date [" + value + "]", lastException); } - ingestDocument.setFieldValue(targetField, ISODateTimeFormat.dateTime().print(dateTime)); + ingestDocument.setFieldValue(targetField, FORMATTER.format(dateTime)); return ingestDocument; } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java index 32874aa6a5776..136c9f7f69a0a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java @@ -21,10 +21,7 @@ import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -38,9 +35,9 @@ public class DateFormatTests extends ESTestCase { public void testParseJava() { - Function javaFunction = DateFormat.Java.getFunction("MMM dd HH:mm:ss Z", - DateTimeZone.forOffsetHours(-8), Locale.ENGLISH); - assertThat(Instant.ofEpochMilli(javaFunction.apply("Nov 24 01:29:01 -0800").getMillis()) + Function javaFunction = DateFormat.Java.getFunction("MMM dd HH:mm:ss Z", + ZoneOffset.ofHours(-8), Locale.ENGLISH); + assertThat(javaFunction.apply("Nov 24 01:29:01 -0800").toInstant() .atZone(ZoneId.of("GMT-8")) .format(DateTimeFormatter.ofPattern("MM dd HH:mm:ss", Locale.ENGLISH)), equalTo("11 24 01:29:01")); @@ -48,33 +45,35 @@ public void testParseJava() { public void testParseJavaDefaultYear() { String format = randomFrom("8dd/MM", "dd/MM"); - DateTimeZone timezone = DateUtils.zoneIdToDateTimeZone(ZoneId.of("Europe/Amsterdam")); - Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.ENGLISH); + ZoneId timezone = DateUtils.of("Europe/Amsterdam"); + Function javaFunction = DateFormat.Java.getFunction(format, timezone, Locale.ENGLISH); int year = ZonedDateTime.now(ZoneOffset.UTC).getYear(); - DateTime dateTime = javaFunction.apply("12/06"); + ZonedDateTime dateTime = javaFunction.apply("12/06"); assertThat(dateTime.getYear(), is(year)); - assertThat(dateTime.toString(), is(year + "-06-12T00:00:00.000+02:00")); } public void testParseUnixMs() { - assertThat(DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null).apply("1000500").getMillis(), equalTo(1000500L)); + assertThat(DateFormat.UnixMs.getFunction(null, ZoneOffset.UTC, null).apply("1000500").toInstant().toEpochMilli(), + equalTo(1000500L)); } public void testParseUnix() { - assertThat(DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null).apply("1000.5").getMillis(), equalTo(1000500L)); + assertThat(DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null).apply("1000.5").toInstant().toEpochMilli(), + equalTo(1000500L)); } public void testParseUnixWithMsPrecision() { - assertThat(DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null).apply("1495718015").getMillis(), equalTo(1495718015000L)); + assertThat(DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null).apply("1495718015").toInstant().toEpochMilli(), + equalTo(1495718015000L)); } public void testParseISO8601() { - assertThat(DateFormat.Iso8601.getFunction(null, DateTimeZone.UTC, null).apply("2001-01-01T00:00:00-0800").getMillis(), + assertThat(DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null).apply("2001-01-01T00:00:00-0800").toInstant().toEpochMilli(), equalTo(978336000000L)); } public void testParseISO8601Failure() { - Function function = DateFormat.Iso8601.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null); try { function.apply("2001-01-0:00-0800"); fail("parse should have failed"); @@ -86,7 +85,7 @@ public void testParseISO8601Failure() { public void testTAI64NParse() { String input = "4000000050d506482dbdf024"; String expected = "2012-12-22T03:00:46.767+02:00"; - assertThat(DateFormat.Tai64n.getFunction(null, DateTimeZone.forOffsetHours(2), null) + assertThat(DateFormat.Tai64n.getFunction(null, ZoneOffset.ofHours(2), null) .apply((randomBoolean() ? "@" : "") + input).toString(), equalTo(expected)); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java index 2735cf55776b0..67027f6ab6784 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -44,7 +44,7 @@ public void testDefaults() throws Exception { assertThat(processor.getIndexNamePrefixTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("")); assertThat(processor.getDateRoundingTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("y")); assertThat(processor.getIndexNameFormatTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("yyyy-MM-dd")); - assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.UTC)); + assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.UTC)); } public void testSpecifyOptionalSettings() throws Exception { @@ -74,7 +74,7 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("timezone", "+02:00"); processor = factory.create(null, null, config); - assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.forOffsetHours(2))); + assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.ofHours(2))); config = new HashMap<>(); config.put("field", "_field"); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index 63d3e0416cd2c..3d891ffb81f4f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.ingest.common; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -35,9 +36,9 @@ public class DateIndexNameProcessorTests extends ESTestCase { public void testJavaPattern() throws Exception { - Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSXX", DateTimeZone.UTC, Locale.ROOT); + Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSXX", ZoneOffset.UTC, Locale.ROOT); DateIndexNameProcessor processor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "y", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "y", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z")); processor.execute(document); @@ -45,9 +46,9 @@ public void testJavaPattern() throws Exception { } public void testTAI64N()throws Exception { - Function function = DateFormat.Tai64n.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Tai64n.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024")); dateProcessor.execute(document); @@ -55,9 +56,9 @@ public void testTAI64N()throws Exception { } public void testUnixMs()throws Exception { - Function function = DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.UnixMs.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000500")); dateProcessor.execute(document); @@ -70,9 +71,9 @@ public void testUnixMs()throws Exception { } public void testUnix()throws Exception { - Function function = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); + Function function = DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", Collections.singletonList(function), - DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); + ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000.5")); dateProcessor.execute(document); @@ -84,10 +85,10 @@ public void testTemplatedFields() throws Exception { String dateRounding = randomFrom("y", "M", "w", "d", "h", "m", "s"); String indexNameFormat = randomFrom("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", "yyyyMMdd", "MM/dd/yyyy"); String date = Integer.toString(randomInt()); - Function dateTimeFunction = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); + Function dateTimeFunction = DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null); DateIndexNameProcessor dateProcessor = createProcessor("_field", - Collections.singletonList(dateTimeFunction), DateTimeZone.UTC, indexNamePrefix, + Collections.singletonList(dateTimeFunction), ZoneOffset.UTC, indexNamePrefix, dateRounding, indexNameFormat); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, @@ -95,12 +96,12 @@ public void testTemplatedFields() throws Exception { dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), - equalTo("<"+indexNamePrefix+"{"+DateTimeFormat.forPattern(indexNameFormat) - .print(dateTimeFunction.apply(date))+"||/"+dateRounding+"{"+indexNameFormat+"|UTC}}>")); + equalTo("<"+indexNamePrefix+"{" + DateFormatter.forPattern(indexNameFormat) + .format(dateTimeFunction.apply(date))+"||/"+dateRounding+"{"+indexNameFormat+"|UTC}}>")); } - private DateIndexNameProcessor createProcessor(String field, List> dateFormats, - DateTimeZone timezone, String indexNamePrefix, String dateRounding, + private DateIndexNameProcessor createProcessor(String field, List> dateFormats, + ZoneId timezone, String indexNamePrefix, String dateRounding, String indexNameFormat) { return new DateIndexNameProcessor(randomAlphaOfLength(10), field, dateFormats, timezone, new TestTemplateService.MockTemplateScript.Factory(indexNamePrefix), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java index 2cf11f6d215d0..7d227b222696f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java @@ -22,9 +22,9 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; import org.junit.Before; +import java.time.ZoneId; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -105,10 +105,10 @@ public void testParseTimezone() throws Exception { config.put("field", sourceField); config.put("formats", Collections.singletonList("dd/MM/yyyyy")); - DateTimeZone timezone = randomDateTimeZone(); - config.put("timezone", timezone.getID()); + ZoneId timezone = randomZone(); + config.put("timezone", timezone.getId()); DateProcessor processor = factory.create(null, null, config); - assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getID())); + assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getId())); } public void testParseMatchFormats() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index 6157e3e9e50f9..c9ab07d82bbcc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -45,9 +45,7 @@ private TemplateScript.Factory templatize(Locale locale) { } private TemplateScript.Factory templatize(ZoneId timezone) { - // prevent writing "UTC" as string, as joda time does not parse it - String id = timezone.equals(ZoneOffset.UTC) ? "UTC" : timezone.getId(); - return new TestTemplateService.MockTemplateScript.Factory(id); + return new TestTemplateService.MockTemplateScript.Factory(timezone.getId()); } public void testJavaPattern() { @@ -186,7 +184,7 @@ public void testInvalidTimezone() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(RandomDocumentPicks.randomIngestDocument(random(), document))); assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); - assertThat(e.getCause().getMessage(), equalTo("The datetime zone id 'invalid_timezone' is not recognised")); + assertThat(e.getCause().getMessage(), equalTo("Unknown time-zone ID: invalid_timezone")); } public void testInvalidLocale() { diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index 6f2518eede673..d83762a5e47ab 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -134,11 +134,13 @@ public IngestDocument execute(IngestDocument ingestDocument) { } break; case DEVICE: + Map deviceDetails = new HashMap<>(1); if (uaClient.device != null && uaClient.device.name != null) { - uaDetails.put("device", uaClient.device.name); + deviceDetails.put("name", uaClient.device.name); } else { - uaDetails.put("device", "Other"); + deviceDetails.put("name", "Other"); } + uaDetails.put("device", deviceDetails); break; } } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java index 3938fccd832a3..f043cc5369a26 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java @@ -110,7 +110,9 @@ public void testCommonBrowser() throws Exception { os.put("version", "10.9.2"); os.put("full", "Mac OS X 10.9.2"); assertThat(target.get("os"), is(os)); - assertThat(target.get("device"), is("Other")); + Map device = new HashMap<>(); + device.put("name", "Other"); + assertThat(target.get("device"), is(device)); } @SuppressWarnings("unchecked") @@ -136,7 +138,9 @@ public void testUncommonDevice() throws Exception { os.put("full", "Android 3.0"); assertThat(target.get("os"), is(os)); - assertThat(target.get("device"), is("Motorola Xoom")); + Map device = new HashMap<>(); + device.put("name", "Motorola Xoom"); + assertThat(target.get("device"), is(device)); } @SuppressWarnings("unchecked") @@ -157,7 +161,9 @@ public void testSpider() throws Exception { assertNull(target.get("version")); assertNull(target.get("os")); - assertThat(target.get("device"), is("Spider")); + Map device = new HashMap<>(); + device.put("name", "Spider"); + assertThat(target.get("device"), is(device)); } @SuppressWarnings("unchecked") @@ -180,7 +186,8 @@ public void testUnknown() throws Exception { assertNull(target.get("build")); assertNull(target.get("os")); - - assertThat(target.get("device"), is("Other")); + Map device = new HashMap<>(); + device.put("name", "Other"); + assertThat(target.get("device"), is(device)); } } diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml index fc44d7261e80f..a7fe57c557008 100644 --- a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml +++ b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml @@ -32,7 +32,7 @@ - match: { _source.user_agent.original: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.149 Safari/537.36" } - match: { _source.user_agent.os: {"name":"Mac OS X", "version":"10.9.2", "full":"Mac OS X 10.9.2"} } - match: { _source.user_agent.version: "33.0.1750" } - - match: { _source.user_agent.device: "Other" } + - match: { _source.user_agent.device: {"name": "Other" }} --- "Test user agent processor with parameters": diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml index ac90a3457fa65..763bea0ee4da0 100644 --- a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml +++ b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml @@ -30,6 +30,6 @@ id: 1 - match: { _source.field1: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.149 Safari/537.36" } - match: { _source.user_agent.name: "Test" } - - match: { _source.user_agent.device: "Other" } + - match: { _source.user_agent.device: {"name": "Other" }} - is_false: _source.user_agent.os - is_false: _source.user_agent.version diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 88afa57e83e23..f26b02696e7e5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Priority; @@ -41,10 +40,8 @@ import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import static org.hamcrest.core.Is.is; @@ -59,30 +56,13 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(TestZenDiscovery.USE_ZEN2.getKey(), true).build(); } - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - final Settings firstNodeSettings = allNodesSettings.get(0); - final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); - final List masterNodeNames = allNodesSettings.stream() - .filter(org.elasticsearch.node.Node.NODE_MASTER_SETTING::get) - .map(org.elasticsearch.node.Node.NODE_NAME_SETTING::get) - .collect(Collectors.toList()); - final List updatedSettings = new ArrayList<>(); - - updatedSettings.add(Settings.builder().put(firstNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames) - .build()); - updatedSettings.addAll(otherNodesSettings); - - return updatedSettings; - } - @Override protected boolean addMockHttpTransport() { return false; // enable http } public void testRollingRestartOfTwoNodeCluster() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(1); final List nodes = internalCluster().startNodes(2); createIndex("test", Settings.builder() @@ -142,6 +122,7 @@ public Settings onNodeStopped(String nodeName) throws IOException { } public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); RestClient restClient = getRestClient(); Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodes.get(2))); @@ -154,6 +135,7 @@ public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { } public void testClearVotingTombstonesWaitingForRemoval() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); RestClient restClient = getRestClient(); String nodeToWithdraw = nodes.get(randomIntBetween(0, 2)); @@ -167,6 +149,7 @@ public void testClearVotingTombstonesWaitingForRemoval() throws Exception { } public void testFailsOnUnknownNode() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().startNodes(3); RestClient restClient = getRestClient(); try { diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1b2503ccb99d5..f997255959256 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -68,6 +68,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; @@ -970,10 +971,14 @@ public void testHistoryUUIDIsAdded() throws Exception { createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); client().performRequest(createIndex); } else { + ensureGreenLongWait(index); + Request statsRequest = new Request("GET", index + "/_stats"); statsRequest.addParameter("level", "shards"); Response response = client().performRequest(statsRequest); List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + assertThat(shardStats, notNullValue()); + assertThat("Expected stats for 2 shards", shardStats, hasSize(2)); String globalHistoryUUID = null; for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 4499a60bfe24a..fa4ca0588940c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -36,6 +36,7 @@ terms: field: f1.keyword + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -63,6 +64,7 @@ terms: field: f1.keyword + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -83,6 +85,7 @@ terms: field: f1.keyword + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -103,6 +106,7 @@ terms: field: f1.keyword + - is_false: num_reduce_phases - is_false: _clusters - match: { _shards.total: 2 } - match: { hits.total: 5} @@ -133,6 +137,7 @@ rest_total_hits_as_int: true index: test_remote_cluster:test_index + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -162,6 +167,7 @@ rest_total_hits_as_int: true index: "*:test_index" + - match: { num_reduce_phases: 3 } - match: {_clusters.total: 2} - match: {_clusters.successful: 2} - match: {_clusters.skipped: 0} @@ -176,6 +182,7 @@ rest_total_hits_as_int: true index: my_remote_cluster:aliased_test_index + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -192,6 +199,7 @@ rest_total_hits_as_int: true index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -208,6 +216,7 @@ rest_total_hits_as_int: true index: "my_remote_cluster:single_doc_index" + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml index 6a7fe3c5356c0..ea404702db529 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml @@ -12,6 +12,7 @@ query: match_all: {} + - is_false: num_reduce_phases - match: {_clusters.total: 1} - match: {_clusters.successful: 1} - match: {_clusters.skipped: 0} @@ -28,6 +29,7 @@ rest_total_hits_as_int: true body: { "scroll_id": "$scroll_id", "scroll": "1m"} + - is_false: num_reduce_phases - is_false: _clusters - match: {hits.total: 6 } - length: {hits.hits: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index bb9157fe684f8..50a5239e70675 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -2,8 +2,8 @@ "Create a typeless index while there is a typed template": - skip: - version: " - 6.99.99" - reason: needs change to be backported to 6.7 + version: " - 6.6.99" + reason: Merging typeless/typed mappings/templates was added in 6.7 - do: indices.put_template: @@ -41,8 +41,8 @@ "Create a typed index while there is a typeless template": - skip: - version: " - 6.99.99" - reason: needs change to be backported to 6.7 + version: " - 6.6.99" + reason: Merging typeless/typed mappings/templates was added in 6.7 - do: indices.put_template: @@ -81,7 +81,7 @@ - skip: version: " - 6.99.99" - reason: needs change to be backported to 6.7 + reason: include_type_name only supported as of 6.7 - do: indices.put_template: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index d964a382137f8..13cb3321841cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: " - 6.99.99" - reason: Backport first + version: " - 6.6.99" + reason: include_type_name is only supported as of 6.7 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index d442672bf8bed..88e0ecff29608 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -700,3 +700,74 @@ setup: - is_false: aggregations.str_terms.buckets.1.key_as_string - match: { aggregations.str_terms.buckets.1.doc_count: 2 } + +--- +"Global ordinals are not loaded with the map execution hint": + + - skip: + version: " - 6.99.99" + reason: bug fixed in 7.0 + + - do: + index: + refresh: true + index: test_1 + id: 1 + routing: 1 + body: { "str": "abc" } + + - do: + index: + refresh: true + index: test_1 + id: 2 + routing: 1 + body: { "str": "abc" } + + - do: + index: + refresh: true + index: test_1 + id: 3 + routing: 1 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + index: test_1 + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "execution_hint" : "map" } } } } + + - match: { hits.total.value: 3} + - length: { aggregations.str_terms.buckets: 2 } + + - do: + indices.stats: + index: test_1 + metric: fielddata + fielddata_fields: str + + - match: { indices.test_1.total.fielddata.memory_size_in_bytes: 0} + + - do: + search: + index: test_1 + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "execution_hint" : "global_ordinals" } } } } + + - match: { hits.total.value: 3} + - length: { aggregations.str_terms.buckets: 2 } + + - do: + indices.stats: + index: test_1 + metric: fielddata + fielddata_fields: str + + - gt: { indices.test_1.total.fielddata.memory_size_in_bytes: 0} + + + + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml new file mode 100644 index 0000000000000..2db498a0cacf0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/290_geotile_grid.yml @@ -0,0 +1,65 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "added in 7.0.0" + - do: + indices.create: + include_type_name: false + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + location: + type: geo_point + +--- +"Basic test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: "52.374081,4.912350" + - index: + _index: test_1 + _id: 2 + - location: "52.369219,4.901618" + - index: + _index: test_1 + _id: 3 + - location: "52.371667,4.914722" + - index: + _index: test_1 + _id: 4 + - location: "51.222900,4.405200" + - index: + _index: test_1 + _id: 5 + - location: "48.861111,2.336389" + - index: + _index: test_1 + _id: 6 + - location: "48.860000,2.327000" + + - do: + search: + rest_total_hits_as_int: true + body: + aggregations: + grid: + geotile_grid: + field: location + precision: 8 + + + - match: { hits.total: 6 } + - match: { aggregations.grid.buckets.0.key: "8/131/84" } + - match: { aggregations.grid.buckets.0.doc_count: 3 } + - match: { aggregations.grid.buckets.1.key: "8/129/88" } + - match: { aggregations.grid.buckets.1.doc_count: 2 } + - match: { aggregations.grid.buckets.2.key: "8/131/85" } + - match: { aggregations.grid.buckets.2.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 67f33398bba68..e5c5b17414b96 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -714,20 +714,18 @@ InitialSearchPhase.ArraySearchPhaseResults newSearchPhaseResu final boolean hasAggs = source != null && source.aggregations() != null; final boolean hasTopDocs = source == null || source.size() != 0; final int trackTotalHitsUpTo = resolveTrackTotalHits(request); - final boolean finalReduce = request.getLocalClusterAlias() == null; - if (isScrollRequest == false && (hasAggs || hasTopDocs)) { // no incremental reduce if scroll is used - we only hit a single shard or sometimes more... if (request.getBatchedReduceSize() < numShards) { // only use this if there are aggs and if there are more shards than we should reduce at once return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize(), hasTopDocs, hasAggs, - trackTotalHitsUpTo, finalReduce); + trackTotalHitsUpTo, request.isFinalReduce()); } } return new InitialSearchPhase.ArraySearchPhaseResults(numShards) { @Override ReducedQueryPhase reduce() { - return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHitsUpTo, finalReduce); + return reducedQueryPhase(results.asList(), isScrollRequest, trackTotalHitsUpTo, request.isFinalReduce()); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 55122b6806fd2..602a7123d0014 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -67,6 +67,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private final String localClusterAlias; private final long absoluteStartMillis; + private final boolean finalReduce; private SearchType searchType = SearchType.DEFAULT; @@ -102,13 +103,15 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + this.finalReduce = true; } /** * Constructs a new search request from the provided search request */ public SearchRequest(SearchRequest searchRequest) { - this(searchRequest, searchRequest.indices, searchRequest.localClusterAlias, searchRequest.absoluteStartMillis); + this(searchRequest, searchRequest.indices, searchRequest.localClusterAlias, + searchRequest.absoluteStartMillis, searchRequest.finalReduce); } /** @@ -132,25 +135,30 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { } /** - * Creates a new search request by providing the search request to copy all fields from, the indices to search against, - * the alias of the cluster where it will be executed, as well as the start time in milliseconds from the epoch time. - * Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request performing local reduction - * on each cluster. The coordinating CCS node provides the original search request, the indices to search against as well as the - * alias to prefix index names with in the returned search results, and the absolute start time to be used on the remote clusters - * to ensure that the same value is used. + * Creates a new search request by providing the search request to copy all fields from, the indices to search against, the alias of + * the cluster where it will be executed, as well as the start time in milliseconds from the epoch time and whether the reduction + * should be final or not. Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request + * performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters. + * + * @param originalSearchRequest the original search request + * @param indices the indices to search against + * @param localClusterAlias the alias to prefix index names with in the returned search results + * @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used + * @param finalReduce whether the reduction should be final or not */ static SearchRequest withLocalReduction(SearchRequest originalSearchRequest, String[] indices, - String localClusterAlias, long absoluteStartMillis) { + String localClusterAlias, long absoluteStartMillis, boolean finalReduce) { Objects.requireNonNull(originalSearchRequest, "search request must not be null"); validateIndices(indices); Objects.requireNonNull(localClusterAlias, "cluster alias must not be null"); if (absoluteStartMillis < 0) { throw new IllegalArgumentException("absoluteStartMillis must not be negative but was [" + absoluteStartMillis + "]"); } - return new SearchRequest(originalSearchRequest, indices, localClusterAlias, absoluteStartMillis); + return new SearchRequest(originalSearchRequest, indices, localClusterAlias, absoluteStartMillis, finalReduce); } - private SearchRequest(SearchRequest searchRequest, String[] indices, String localClusterAlias, long absoluteStartMillis) { + private SearchRequest(SearchRequest searchRequest, String[] indices, String localClusterAlias, long absoluteStartMillis, + boolean finalReduce) { this.allowPartialSearchResults = searchRequest.allowPartialSearchResults; this.batchedReduceSize = searchRequest.batchedReduceSize; this.ccsMinimizeRoundtrips = searchRequest.ccsMinimizeRoundtrips; @@ -167,6 +175,7 @@ private SearchRequest(SearchRequest searchRequest, String[] indices, String loca this.types = searchRequest.types; this.localClusterAlias = localClusterAlias; this.absoluteStartMillis = absoluteStartMillis; + this.finalReduce = finalReduce; } /** @@ -203,6 +212,12 @@ public SearchRequest(StreamInput in) throws IOException { localClusterAlias = null; absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; } + //TODO move to the 6_7_0 branch once backported to 6.x + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + finalReduce = in.readBoolean(); + } else { + finalReduce = true; + } if (in.getVersion().onOrAfter(Version.V_7_0_0)) { ccsMinimizeRoundtrips = in.readBoolean(); } @@ -232,6 +247,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); } } + //TODO move to the 6_7_0 branch once backported to 6.x + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(finalReduce); + } if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeBoolean(ccsMinimizeRoundtrips); } @@ -277,11 +296,18 @@ String getLocalClusterAlias() { return localClusterAlias; } + /** + * Returns whether the reduction phase that will be performed needs to be final or not. + */ + boolean isFinalReduce() { + return finalReduce; + } + /** * Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search - * request. When created through {@link #withLocalReduction(SearchRequest, String[], String, long)}, this method returns the provided - * current time, otherwise it will return {@link System#currentTimeMillis()}. + * request. When created through {@link #withLocalReduction(SearchRequest, String[], String, long, boolean)}, this method returns + * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. * */ long getOrCreateAbsoluteStartMillis() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index dd0d4de07d6f4..6ae5e1a553eb6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.search; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; @@ -35,8 +36,10 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; @@ -47,6 +50,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -497,4 +501,12 @@ public String toString() { return "Clusters{total=" + total + ", successful=" + successful + ", skipped=" + skipped + '}'; } } + + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, + InternalAggregations.EMPTY, null, null, false, null, 0); + return new SearchResponse(internalSearchResponse, null, 0, 0, 0, tookInMillisSupplier.get(), + ShardSearchFailure.EMPTY_ARRAY, clusters); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 567040246c50f..3b28ca19477ab 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -115,11 +115,7 @@ SearchResponse getMergedResponse(Clusters clusters) { //if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, //we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { - SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, - InternalAggregations.EMPTY, null, null, false, null, 0); - return new SearchResponse(internalSearchResponse, null, 0, 0, 0, searchTimeProvider.buildTookInMillis(), - ShardSearchFailure.EMPTY_ARRAY, clusters); + return SearchResponse.empty(searchTimeProvider::buildTookInMillis, clusters); } int totalShards = 0; int skippedShards = 0; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 48ae3f1249522..519f2c88e0e58 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -48,9 +48,13 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -253,30 +257,66 @@ static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIn SearchTimeProvider timeProvider, Function reduceContext, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener listener, BiConsumer> localSearchConsumer) { - SearchResponseMerger searchResponseMerger = createSearchResponseMerger(searchRequest.source(), timeProvider, reduceContext); - AtomicInteger skippedClusters = new AtomicInteger(0); - final AtomicReference exceptions = new AtomicReference<>(); - int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); - final CountDown countDown = new CountDown(totalClusters); - for (Map.Entry entry : remoteIndices.entrySet()) { + + if (localIndices == null && remoteIndices.size() == 1) { + //if we are searching against a single remote cluster, we simply forward the original search request to such cluster + //and we directly perform final reduction in the remote cluster + Map.Entry entry = remoteIndices.entrySet().iterator().next(); String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); SearchRequest ccsSearchRequest = SearchRequest.withLocalReduction(searchRequest, indices.indices(), - clusterAlias, timeProvider.getAbsoluteStartMillis()); - ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, - skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + clusterAlias, timeProvider.getAbsoluteStartMillis(), true); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); - remoteClusterClient.search(ccsSearchRequest, ccsListener); - } - if (localIndices != null) { - ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, - false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); - //here we provide the empty string a cluster alias, which means no prefix in index name, - //but the coord node will perform non final reduce as it's not null. - SearchRequest ccsLocalSearchRequest = SearchRequest.withLocalReduction(searchRequest, localIndices.indices(), - RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis()); - localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); + remoteClusterClient.search(ccsSearchRequest, new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + Map profileResults = searchResponse.getProfileResults(); + SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() + ? null : new SearchProfileShardResults(profileResults); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchResponse.getHits(), + (InternalAggregations) searchResponse.getAggregations(), searchResponse.getSuggest(), profile, + searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases()); + listener.onResponse(new SearchResponse(internalSearchResponse, searchResponse.getScrollId(), + searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), + timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0))); + } + + @Override + public void onFailure(Exception e) { + if (skipUnavailable) { + listener.onResponse(SearchResponse.empty(timeProvider::buildTookInMillis, new SearchResponse.Clusters(1, 0, 1))); + } else { + listener.onFailure(wrapRemoteClusterFailure(clusterAlias, e)); + } + } + }); + } else { + SearchResponseMerger searchResponseMerger = createSearchResponseMerger(searchRequest.source(), timeProvider, reduceContext); + AtomicInteger skippedClusters = new AtomicInteger(0); + final AtomicReference exceptions = new AtomicReference<>(); + int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); + final CountDown countDown = new CountDown(totalClusters); + for (Map.Entry entry : remoteIndices.entrySet()) { + String clusterAlias = entry.getKey(); + boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + OriginalIndices indices = entry.getValue(); + SearchRequest ccsSearchRequest = SearchRequest.withLocalReduction(searchRequest, indices.indices(), + clusterAlias, timeProvider.getAbsoluteStartMillis(), false); + ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, + skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + remoteClusterClient.search(ccsSearchRequest, ccsListener); + } + if (localIndices != null) { + ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); + //here we provide the empty string a cluster alias, which means no prefix in index name, + //but the coord node will perform non final reduce as it's not null. + SearchRequest ccsLocalSearchRequest = SearchRequest.withLocalReduction(searchRequest, localIndices.indices(), + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); + localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); + } } } @@ -297,9 +337,6 @@ static SearchResponseMerger createSearchResponseMerger(SearchSourceBuilder sourc //here we modify the original source so we can re-use it by setting it to each outgoing search request source.from(0); source.size(from + size); - //TODO when searching only against a remote cluster, we could ask directly for the final number of results and let - //the remote cluster do a final reduction, yet that is not possible as we are providing a localClusterAlias which - //will automatically make the reduction non final } return new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, reduceContextFunction); } @@ -604,7 +641,7 @@ public final void onFailure(Exception e) { } else { Exception exception = e; if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { - exception = new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + exception = wrapRemoteClusterFailure(clusterAlias, e); } if (exceptions.compareAndSet(null, exception) == false) { exceptions.accumulateAndGet(exception, (previous, current) -> { @@ -636,4 +673,8 @@ private void maybeFinish() { abstract FinalResponse createFinalResponse(); } + + private static RemoteTransportException wrapRemoteClusterFailure(String clusterAlias, Exception e) { + return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index cc58628b53893..67d2103ce672d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -167,6 +167,12 @@ String getDescription() { assert clusterState.getLastCommittedConfiguration().isEmpty() == false; + if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { + return String.format(Locale.ROOT, + "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", + foundPeers, discoveryWillContinueDescription); + } + final String quorumDescription; if (clusterState.getLastAcceptedConfiguration().equals(clusterState.getLastCommittedConfiguration())) { quorumDescription = describeQuorum(clusterState.getLastAcceptedConfiguration()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java index 01ef85b656d1e..b63cb07feff99 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetaData.java @@ -325,6 +325,8 @@ public String toString() { public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); + public static final VotingConfiguration MUST_JOIN_ELECTED_MASTER = new VotingConfiguration(Collections.singleton( + "_must_join_elected_master_")); private final Set nodeIds; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java new file mode 100644 index 0000000000000..6bd41ccf37f0c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.nio.file.Path; + +public class DetachClusterCommand extends ElasticsearchNodeCommand { + + static final String NODE_DETACHED_MSG = "Node was successfully detached from the cluster"; + static final String CONFIRMATION_MSG = + "-------------------------------------------------------------------------------\n" + + "\n" + + "You should run this tool only if you have permanently lost all\n" + + "your master-eligible nodes, and you cannot restore the cluster\n" + + "from a snapshot, or you have already run `elasticsearch-node unsafe-bootstrap`\n" + + "on a master-eligible node that formed a cluster with this node.\n" + + "This tool can cause arbitrary data loss and its use should be your last resort.\n" + + "Do you want to proceed?\n"; + + public DetachClusterCommand() { + super("Detaches this node from its cluster, allowing it to unsafely join a new cluster"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + super.execute(terminal, options, env); + + processNodePathsWithLock(terminal, options, env); + + terminal.println(NODE_DETACHED_MSG); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { + final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); + final Manifest manifest = manifestMetaDataTuple.v1(); + final MetaData metaData = manifestMetaDataTuple.v2(); + + confirm(terminal, CONFIRMATION_MSG); + + writeNewMetaData(terminal, manifest, updateCurrentTerm(), metaData, updateMetaData(metaData), dataPaths); + } + + // package-private for tests + static MetaData updateMetaData(MetaData oldMetaData) { + final CoordinationMetaData coordinationMetaData = CoordinationMetaData.builder() + .lastAcceptedConfiguration(CoordinationMetaData.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) + .lastCommittedConfiguration(CoordinationMetaData.VotingConfiguration.MUST_JOIN_ELECTED_MASTER) + .term(0) + .build(); + return MetaData.builder(oldMetaData) + .coordinationMetaData(coordinationMetaData) + .clusterUUIDCommitted(false) + .build(); + } + + //package-private for tests + static long updateCurrentTerm() { + return 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java new file mode 100644 index 0000000000000..9ef75879e9275 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.LockObtainFailedException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Objects; + +public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { + private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); + protected final NamedXContentRegistry namedXContentRegistry; + static final String STOP_WARNING_MSG = + "--------------------------------------------------------------------------\n" + + "\n" + + " WARNING: Elasticsearch MUST be stopped before running this tool." + + "\n"; + static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; + static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; + static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + + "bootstrapped?"; + static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; + static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; + static final String ABORTED_BY_USER_MSG = "aborted by user"; + final OptionSpec nodeOrdinalOption; + + public ElasticsearchNodeCommand(String description) { + super(description); + nodeOrdinalOption = parser.accepts("ordinal", "Optional node ordinal, 0 if not specified") + .withRequiredArg().ofType(Integer.class); + namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); + } + + protected void processNodePathsWithLock(Terminal terminal, OptionSet options, Environment env) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); + Integer nodeOrdinal = nodeOrdinalOption.value(options); + if (nodeOrdinal == null) { + nodeOrdinal = 0; + } + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { + final Path[] dataPaths = + Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); + if (dataPaths.length == 0) { + throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); + } + processNodePaths(terminal, dataPaths); + } catch (LockObtainFailedException ex) { + throw new ElasticsearchException( + FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } + } + + protected Tuple loadMetaData(Terminal terminal, Path[] dataPaths) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); + final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); + + if (manifest == null) { + throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); + } + if (manifest.isGlobalGenerationMissing()) { + throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); + } + terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); + final MetaData metaData = MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), + dataPaths); + if (metaData == null) { + throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); + } + + return Tuple.tuple(manifest, metaData); + } + + protected void confirm(Terminal terminal, String msg) { + terminal.println(msg); + String text = terminal.readText("Confirm [y/N] "); + if (text.equalsIgnoreCase("y") == false) { + throw new ElasticsearchException(ABORTED_BY_USER_MSG); + } + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(STOP_WARNING_MSG); + } + + protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException; + + + protected void writeNewMetaData(Terminal terminal, Manifest oldManifest, long newCurrentTerm, + MetaData oldMetaData, MetaData newMetaData, Path[] dataPaths) { + try { + terminal.println(Terminal.Verbosity.VERBOSE, + "[clusterUUID = " + oldMetaData.clusterUUID() + ", committed = " + oldMetaData.clusterUUIDCommitted() + "] => " + + "[clusterUUID = " + newMetaData.clusterUUID() + ", committed = " + newMetaData.clusterUUIDCommitted() + "]"); + terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is " + newMetaData.coordinationMetaData()); + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); + long newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); + Manifest newManifest = new Manifest(newCurrentTerm, oldManifest.getClusterStateVersion(), newGeneration, + oldManifest.getIndexGenerations()); + terminal.println(Terminal.Verbosity.VERBOSE, "New manifest is " + newManifest); + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); + Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); + MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); + } catch (Exception e) { + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); + MetaData.FORMAT.cleanupOldFiles(oldManifest.getGlobalGeneration(), dataPaths); + throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); + } + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index d8fb77433faef..e2a94f1140b92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -35,6 +35,7 @@ public NodeToolCli() { super("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes", ()->{}); CommandLoggingConfigurator.configureLoggingWithoutConfig(); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); + subcommands.put("detach-cluster", new DetachClusterCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index 9db750c2a1f08..72afe8ec70428 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -21,40 +21,27 @@ import joptsimple.OptionSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetaData; import org.elasticsearch.node.Node; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; import java.util.Collections; import java.util.Locale; -import java.util.Objects; -public class UnsafeBootstrapMasterCommand extends EnvironmentAwareCommand { +public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { private static final Logger logger = LogManager.getLogger(UnsafeBootstrapMasterCommand.class); - private final NamedXContentRegistry namedXContentRegistry; - static final String STOP_WARNING_MSG = - "--------------------------------------------------------------------------\n" + - "\n" + - " WARNING: Elasticsearch MUST be stopped before running this tool." + - "\n"; static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; static final String CONFIRMATION_MSG = @@ -62,35 +49,29 @@ public class UnsafeBootstrapMasterCommand extends EnvironmentAwareCommand { "\n" + "You should run this tool only if you have permanently lost half\n" + "or more of the master-eligible nodes, and you cannot restore the cluster\n" + - "from a snapshot. This tool can result in arbitrary data loss and\n" + - "should be the last resort.\n" + + "from a snapshot. This tool can cause arbitrary data loss and its use " + + "should be your last resort.\n" + "If you have multiple survived master eligible nodes, consider running\n" + "this tool on the node with the highest cluster state (term, version) pair.\n" + "Do you want to proceed?\n"; - static final String ABORTED_BY_USER_MSG = "aborted by user"; static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; - static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; - static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; - static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; - static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + - "bootstrapped?"; - static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; + static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; - static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; + static final String MASTER_NODE_BOOTSTRAPPED_MSG = "Master node was successfully bootstrapped"; static final Setting UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_META_DATA.getConcreteSetting("cluster.metadata.unsafe-bootstrap"); UnsafeBootstrapMasterCommand() { super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes"); - namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); } @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - terminal.println(STOP_WARNING_MSG); + super.execute(terminal, options, env); Settings settings = env.settings(); terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); @@ -98,27 +79,13 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th if (master == false) { throw new ElasticsearchException(NOT_MASTER_NODE_MSG); } - final int nodeOrdinal = 0; - - terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); - try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { - processNodePaths(logger, terminal, lock.getNodePaths()); - } catch (LockObtainFailedException ex) { - throw new ElasticsearchException( - FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); - } + processNodePathsWithLock(terminal, options, env); terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); } - private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment.NodePath[] nodePaths) throws IOException { - final Path[] dataPaths = - Arrays.stream(nodePaths).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); - if (dataPaths.length == 0) { - throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); - } - + protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { terminal.println(Terminal.Verbosity.VERBOSE, "Loading node metadata"); final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); if (nodeMetaData == null) { @@ -127,21 +94,10 @@ private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment. String nodeId = nodeMetaData.nodeId(); terminal.println(Terminal.Verbosity.VERBOSE, "Current nodeId is " + nodeId); - terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); - if (manifest == null) { - throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); - } - if (manifest.isGlobalGenerationMissing()) { - throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); - } - terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); - final MetaData metaData = MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), - dataPaths); - if (metaData == null) { - throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); - } + final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); + final Manifest manifest = manifestMetaDataTuple.v1(); + final MetaData metaData = manifestMetaDataTuple.v2(); final CoordinationMetaData coordinationMetaData = metaData.coordinationMetaData(); if (coordinationMetaData == null || coordinationMetaData.getLastCommittedConfiguration() == null || @@ -151,45 +107,26 @@ private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment. terminal.println(String.format(Locale.ROOT, CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, coordinationMetaData.term(), metaData.version())); - terminal.println(CONFIRMATION_MSG); - String text = terminal.readText("Confirm [y/N] "); - if (text.equalsIgnoreCase("y") == false) { - throw new ElasticsearchException(ABORTED_BY_USER_MSG); - } + confirm(terminal, CONFIRMATION_MSG); CoordinationMetaData newCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) .clearVotingConfigExclusions() .lastAcceptedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) .lastCommittedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) .build(); - terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is constructed " + newCoordinationMetaData); + Settings persistentSettings = Settings.builder() .put(metaData.persistentSettings()) .put(UNSAFE_BOOTSTRAP.getKey(), true) .build(); MetaData newMetaData = MetaData.builder(metaData) + .clusterUUID(MetaData.UNKNOWN_CLUSTER_UUID) + .generateClusterUuidIfNeeded() + .clusterUUIDCommitted(true) .persistentSettings(persistentSettings) .coordinationMetaData(newCoordinationMetaData) .build(); - writeNewMetaData(terminal, manifest, newMetaData, dataPaths); - } - private void writeNewMetaData(Terminal terminal, Manifest manifest, MetaData newMetaData, Path[] dataPaths) { - try { - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); - long newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); - long newCurrentTerm = manifest.getCurrentTerm() + 1; - terminal.println(Terminal.Verbosity.VERBOSE, "Incrementing currentTerm. New value is " + newCurrentTerm); - Manifest newManifest = new Manifest(newCurrentTerm, manifest.getClusterStateVersion(), newGeneration, - manifest.getIndexGenerations()); - terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); - Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); - MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); - } catch (Exception e) { - terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); - MetaData.FORMAT.cleanupOldFiles(manifest.getGlobalGeneration(), dataPaths); - throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); - } + writeNewMetaData(terminal, manifest, manifest.getCurrentTerm(), metaData, newMetaData, dataPaths); } } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 6f16e4bc71a71..1cbaaeb80b884 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -1604,13 +1604,16 @@ public static ZonedDateTime from(TemporalAccessor accessor) { } else if (isLocalDateSet) { return localDate.atStartOfDay(zoneId); } else if (isLocalTimeSet) { - return of(LOCALDATE_EPOCH, localTime, zoneId); + return of(getLocaldate(accessor), localTime, zoneId); } else if (accessor.isSupported(ChronoField.YEAR)) { if (accessor.isSupported(MONTH_OF_YEAR)) { return getFirstOfMonth(accessor).atStartOfDay(zoneId); } else { return Year.of(accessor.get(ChronoField.YEAR)).atDay(1).atStartOfDay(zoneId); } + } else if (accessor.isSupported(MONTH_OF_YEAR)) { + // missing year, falling back to the epoch and then filling + return getLocaldate(accessor).atStartOfDay(zoneId); } else if (accessor.isSupported(WeekFields.ISO.weekBasedYear())) { if (accessor.isSupported(WeekFields.ISO.weekOfWeekBasedYear())) { return Year.of(accessor.get(WeekFields.ISO.weekBasedYear())) @@ -1630,6 +1633,18 @@ public static ZonedDateTime from(TemporalAccessor accessor) { throw new IllegalArgumentException("temporal accessor [" + accessor + "] cannot be converted to zoned date time"); } + private static LocalDate getLocaldate(TemporalAccessor accessor) { + if (accessor.isSupported(MONTH_OF_YEAR)) { + if (accessor.isSupported(DAY_OF_MONTH)) { + return LocalDate.of(1970, accessor.get(MONTH_OF_YEAR), accessor.get(DAY_OF_MONTH)); + } else { + return LocalDate.of(1970, accessor.get(MONTH_OF_YEAR), 1); + } + } + + return LOCALDATE_EPOCH; + } + @SuppressForbidden(reason = "ZonedDateTime.of is fine here") private static ZonedDateTime of(LocalDate localDate, LocalTime localTime, ZoneId zoneId) { return ZonedDateTime.of(localDate, localTime, zoneId); diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index c824a7c7e7c35..22b29bd0edf45 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.time; -import org.elasticsearch.bootstrap.JavaVersion; - import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.ResolverStyle; @@ -72,7 +70,7 @@ public TemporalAccessor resolve(Map fieldValues, private static final EpochField NANOS_OF_SECOND = new EpochField(ChronoUnit.NANOS, ChronoUnit.SECONDS, ValueRange.of(0, 999_999_999)) { @Override public boolean isSupportedBy(TemporalAccessor temporal) { - return temporal.isSupported(ChronoField.NANO_OF_SECOND) && temporal.getLong(ChronoField.NANO_OF_SECOND) != 0; + return temporal.isSupported(ChronoField.NANO_OF_SECOND); } @Override public long getFrom(TemporalAccessor temporal) { @@ -117,32 +115,30 @@ public boolean isSupportedBy(TemporalAccessor temporal) { } @Override public long getFrom(TemporalAccessor temporal) { - return temporal.getLong(ChronoField.NANO_OF_SECOND); + return temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000; } }; // this supports seconds without any fraction private static final DateTimeFormatter SECONDS_FORMATTER1 = new DateTimeFormatterBuilder() .appendValue(SECONDS, 1, 19, SignStyle.NORMAL) + .optionalStart() // optional is used so isSupported will be called when printing + .appendFraction(NANOS_OF_SECOND, 0, 9, true) + .optionalEnd() .toFormatter(Locale.ROOT); // this supports seconds ending in dot private static final DateTimeFormatter SECONDS_FORMATTER2 = new DateTimeFormatterBuilder() - .append(SECONDS_FORMATTER1) + .appendValue(SECONDS, 1, 19, SignStyle.NORMAL) .appendLiteral('.') .toFormatter(Locale.ROOT); - // this supports seconds with a fraction and is also used for printing - private static final DateTimeFormatter SECONDS_FORMATTER3 = new DateTimeFormatterBuilder() - .append(SECONDS_FORMATTER1) - .optionalStart() // optional is used so isSupported will be called when printing - .appendFraction(NANOS_OF_SECOND, 1, 9, true) - .optionalEnd() - .toFormatter(Locale.ROOT); - // this supports milliseconds without any fraction private static final DateTimeFormatter MILLISECONDS_FORMATTER1 = new DateTimeFormatterBuilder() .appendValue(MILLIS, 1, 19, SignStyle.NORMAL) + .optionalStart() + .appendFraction(NANOS_OF_MILLI, 0, 6, true) + .optionalEnd() .toFormatter(Locale.ROOT); // this supports milliseconds ending in dot @@ -151,32 +147,13 @@ public long getFrom(TemporalAccessor temporal) { .appendLiteral('.') .toFormatter(Locale.ROOT); - // this supports milliseconds with a fraction and is also used for printing - private static final DateTimeFormatter MILLISECONDS_FORMATTER3 = new DateTimeFormatterBuilder() - .append(MILLISECONDS_FORMATTER1) - .optionalStart() // optional is used so isSupported will be called when printing - .appendFraction(NANOS_OF_MILLI, 1, 6, true) - .optionalEnd() - .toFormatter(Locale.ROOT); - - static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER3, + static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER1, builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), - SECONDS_FORMATTER1, SECONDS_FORMATTER2, SECONDS_FORMATTER3); - - static final DateFormatter MILLIS_FORMATTER = getEpochMillisFormatter(); + SECONDS_FORMATTER1, SECONDS_FORMATTER2); - private static DateFormatter getEpochMillisFormatter() { - // the third formatter fails under java 8 as a printer, so fall back to this one - final DateTimeFormatter printer; - if (JavaVersion.current().getVersion().get(0) == 8) { - printer = MILLISECONDS_FORMATTER1; - } else { - printer = MILLISECONDS_FORMATTER3; - } - return new JavaDateFormatter("epoch_millis", printer, - builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), - MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); - } + static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter("epoch_millis", MILLISECONDS_FORMATTER1, + builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2); private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index e69cfd2c7af0b..b4b471e220a77 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -194,7 +194,7 @@ public String toString() { } try { String source = XContentHelper.convertToJson(doc.source(), reformat, doc.getXContentType()); - sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog)).append("]"); + sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog).trim()).append("]"); } catch (IOException e) { sb.append(", source[_failed_to_convert_[").append(e.getMessage()).append("]]"); /* diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java index c4f310073c488..20c9c3c70c10c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java @@ -32,13 +32,6 @@ public interface AtomicFieldData extends Accountable, Releasable { */ ScriptDocValues getScriptValues(); - /** - * Returns field values for use by returned hits. - */ - default ScriptDocValues getLegacyFieldValues() { - return getScriptValues(); - } - /** * Return a String representation of the values. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java index 66b25c21c8051..9e0f3ab073619 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java @@ -25,11 +25,6 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.script.JodaCompatibleZonedDateTime; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.IOException; /** * Specialization of {@link AtomicNumericFieldData} for integers. @@ -52,34 +47,6 @@ public long ramBytesUsed() { return ramBytesUsed; } - @Override - public final ScriptDocValues getLegacyFieldValues() { - switch (numericType) { - case DATE: - final ScriptDocValues.Dates realDV = new ScriptDocValues.Dates(getLongValues()); - return new ScriptDocValues() { - - @Override - public int size() { - return realDV.size(); - } - - @Override - public DateTime get(int index) { - JodaCompatibleZonedDateTime dt = realDV.get(index); - return new DateTime(dt.toInstant().toEpochMilli(), DateTimeZone.UTC); - } - - @Override - public void setNextDocId(int docId) throws IOException { - realDV.setNextDocId(docId); - } - }; - default: - return getScriptValues(); - } - } - @Override public final ScriptDocValues getScriptValues() { switch (numericType) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 72dbe28d12d09..29f22d8dc2cf0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -276,8 +276,8 @@ public void parse(ParseContext context) throws IOException { context.doc().add(field); } } catch (Exception e) { - throw new MapperParsingException("failed to parse field [{}] of type [{}]", e, fieldType().name(), - fieldType().typeName()); + throw new MapperParsingException("failed to parse field [{}] of type [{}] in document with id '{}'", e, fieldType().name(), + fieldType().typeName(), context.sourceToParse().id()); } multiFields.parse(this, context); } diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index cca1ca0fcc0d1..def32f0c75059 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -64,6 +64,7 @@ protected AbstractQueryBuilder() { protected AbstractQueryBuilder(StreamInput in) throws IOException { boost = in.readFloat(); + checkNegativeBoost(boost); queryName = in.readOptionalString(); } @@ -139,6 +140,13 @@ public final float boost() { return this.boost; } + protected final void checkNegativeBoost(float boost) { + if (Float.compare(boost, 0f) < 0) { + throw new IllegalArgumentException("negative [boost] are not allowed in [" + toString() + "], " + + "use a value between 0 and 1 to deboost"); + } + } + /** * Sets the boost for this query. Documents matching this query will (in addition to the normal * weightings) have their score multiplied by the boost provided. @@ -146,10 +154,7 @@ public final float boost() { @SuppressWarnings("unchecked") @Override public final QB boost(float boost) { - if (Float.compare(boost, 0f) < 0) { - throw new IllegalArgumentException("negative [boost] are not allowed in [" + toString() + "], " + - "use a value between 0 and 1 to deboost"); - } + checkNegativeBoost(boost); this.boost = boost; return (QB) this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index d8476d791d7ec..9f2c85106de08 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -211,7 +211,10 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { int size = in.readVInt(); fieldsBoosts = new TreeMap<>(); for (int i = 0; i < size; i++) { - fieldsBoosts.put(in.readString(), in.readFloat()); + String field = in.readString(); + float boost = in.readFloat(); + checkNegativeBoost(boost); + fieldsBoosts.put(field, boost); } type = Type.readFromStream(in); operator = Operator.readFromStream(in); @@ -295,6 +298,7 @@ public MultiMatchQueryBuilder field(String field, float boost) { if (Strings.isEmpty(field)) { throw new IllegalArgumentException("supplied field is null or empty."); } + checkNegativeBoost(boost); this.fieldsBoosts.put(field, boost); return this; } @@ -303,6 +307,9 @@ public MultiMatchQueryBuilder field(String field, float boost) { * Add several fields to run the query against with a specific boost. */ public MultiMatchQueryBuilder fields(Map fields) { + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsBoosts.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 363384030a2ac..3f8a0acc91695 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -169,7 +169,10 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { defaultField = in.readOptionalString(); int size = in.readVInt(); for (int i = 0; i < size; i++) { - fieldsAndWeights.put(in.readString(), in.readFloat()); + String field = in.readString(); + Float weight = in.readFloat(); + checkNegativeBoost(weight); + fieldsAndWeights.put(field, weight); } defaultOperator = Operator.readFromStream(in); analyzer = in.readOptionalString(); @@ -264,6 +267,7 @@ public QueryStringQueryBuilder field(String field) { * Adds a field to run the query string against with a specific boost. */ public QueryStringQueryBuilder field(String field, float boost) { + checkNegativeBoost(boost); this.fieldsAndWeights.put(field, boost); return this; } @@ -272,6 +276,9 @@ public QueryStringQueryBuilder field(String field, float boost) { * Add several fields to run the query against with a specific boost. */ public QueryStringQueryBuilder fields(Map fields) { + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsAndWeights.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 466341e3cc8b4..2b2045266455b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -161,6 +161,7 @@ public SimpleQueryStringBuilder(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { String field = in.readString(); Float weight = in.readFloat(); + checkNegativeBoost(weight); fields.put(field, weight); } fieldsAndWeights.putAll(fields); @@ -223,6 +224,7 @@ public SimpleQueryStringBuilder field(String field, float boost) { if (Strings.isEmpty(field)) { throw new IllegalArgumentException("supplied field is null or empty"); } + checkNegativeBoost(boost); this.fieldsAndWeights.put(field, boost); return this; } @@ -230,6 +232,9 @@ public SimpleQueryStringBuilder field(String field, float boost) { /** Add several fields to run the query against with a specific boost. */ public SimpleQueryStringBuilder fields(Map fields) { Objects.requireNonNull(fields, "fields cannot be null"); + for (float fieldBoost : fields.values()) { + checkNegativeBoost(fieldBoost); + } this.fieldsAndWeights.putAll(fields); return this; } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 7eefaadaadde2..88fd5293392b5 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -97,11 +97,10 @@ private List buildFieldQueries(MultiMatchQueryBuilder.Type type, Map getCleanStatus commits = DirectoryReader.listCommits(indexDirectory); } catch (IndexNotFoundException infe) { throw new ElasticsearchException("unable to find a valid shard at [" + indexPath + "]", infe); + } catch (IOException e) { + throw new ElasticsearchException("unable to list commits at [" + indexPath + "]", e); } // Retrieve the generation and UUID from the existing data diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 3d93effecc545..81c6273ec1a36 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -110,6 +110,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -422,6 +424,8 @@ private void registerAggregations(List plugins) { GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new)); registerAggregation(new AggregationSpec(GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder::parse).addResultReader(InternalGeoHashGrid::new)); + registerAggregation(new AggregationSpec(GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, + GeoTileGridAggregationBuilder::parse).addResultReader(InternalGeoTileGrid::new)); registerAggregation(new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse).addResultReader(InternalNested::new)); registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder.NAME, ReverseNestedAggregationBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index fd56172325230..d78e42ba89603 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -30,6 +30,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -250,6 +252,13 @@ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } + /** + * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + */ + public static GeoTileGridAggregationBuilder geotileGrid(String name) { + return new GeoTileGridAggregationBuilder(name); + } + /** * Create a new {@link SignificantTerms} aggregation with the given name. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java new file mode 100644 index 0000000000000..33efeeb5d38b6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { + public static final String NAME = "geotile_grid"; + private static final int DEFAULT_PRECISION = 7; + private static final int DEFAULT_MAX_NUM_CELLS = 10000; + + private static final ObjectParser PARSER = createParser(NAME, GeoTileUtils::parsePrecision); + + public GeoTileGridAggregationBuilder(String name) { + super(name); + precision(DEFAULT_PRECISION); + size(DEFAULT_MAX_NUM_CELLS); + shardSize = -1; + } + + public GeoTileGridAggregationBuilder(StreamInput in) throws IOException { + super(in); + } + + @Override + public GeoGridAggregationBuilder precision(int precision) { + this.precision = GeoTileUtils.checkPrecisionRange(precision); + return this; + } + + @Override + protected ValuesSourceAggregatorFactory createFactory( + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData + ) throws IOException { + return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + subFactoriesBuilder, metaData); + } + + private GeoTileGridAggregationBuilder(GeoTileGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metaData) { + return new GeoTileGridAggregationBuilder(this, factoriesBuilder, metaData); + } + + public static GeoGridAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new GeoTileGridAggregationBuilder(aggregationName), null); + } + + @Override + public String getType() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java new file mode 100644 index 0000000000000..d2ff5ed82513c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Aggregates data expressed as geotile longs (for efficiency's sake) but formats results as geotile strings. + */ +public class GeoTileGridAggregator extends GeoGridAggregator { + + GeoTileGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, + int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, pipelineAggregators, metaData); + } + + @Override + InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + return new InternalGeoTileGrid(name, requiredSize, buckets, pipelineAggregators, metaData); + } + + @Override + public InternalGeoTileGrid buildEmptyAggregation() { + return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData()); + } + + InternalGeoGridBucket newEmptyBucket() { + return new InternalGeoTileGridBucket(0, 0, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java new file mode 100644 index 0000000000000..87077a89d6c23 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSource.GeoPoint; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { + + private final int precision; + private final int requiredSize; + private final int shardSize; + + GeoTileGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, + int shardSize, SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData + ) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metaData); + this.precision = precision; + this.requiredSize = requiredSize; + this.shardSize = shardSize; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + throws IOException { + final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, + Collections.emptyList(), pipelineAggregators, metaData); + return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + @Override + public InternalAggregation buildEmptyAggregation() { + return aggregation; + } + }; + } + + @Override + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + List pipelineAggregators, Map metaData) throws IOException { + if (collectsFromSingleBucket == false) { + return asMultiBucketAggregator(this, context, parent); + } + CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, GeoTileUtils::longEncode); + return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, context, parent, + pipelineAggregators, metaData); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java new file mode 100644 index 0000000000000..d85cf6b1a56ce --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; + +import java.io.IOException; +import java.util.Locale; + +import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; +import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; + +/** + * Implements geotile key hashing, same as used by many map tile implementations. + * The string key is formatted as "zoom/x/y" + * The hash value (long) contains all three of those values compacted into a single 64bit value: + * bits 58..63 -- zoom (0..29) + * bits 29..57 -- X tile index (0..2^zoom) + * bits 0..28 -- Y tile index (0..2^zoom) + */ +final class GeoTileUtils { + + private GeoTileUtils() {} + + /** + * Largest number of tiles (precision) to use. + * This value cannot be more than (64-5)/2 = 29, because 5 bits are used for zoom level itself (0-31) + * If zoom is not stored inside hash, it would be possible to use up to 32. + * Note that changing this value will make serialization binary-incompatible between versions. + * Another consideration is that index optimizes lat/lng storage, loosing some precision. + * E.g. hash lng=140.74779717298918D lat=45.61884022447444D == "18/233561/93659", but shown as "18/233561/93658" + */ + static final int MAX_ZOOM = 29; + + /** + * Bit position of the zoom value within hash - zoom is stored in the most significant 6 bits of a long number. + */ + private static final int ZOOM_SHIFT = MAX_ZOOM * 2; + + /** + * Bit mask to extract just the lowest 29 bits of a long + */ + private static final long X_Y_VALUE_MASK = (1L << MAX_ZOOM) - 1; + + /** + * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. + * + * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). + * + * @param parser {@link XContentParser} to parse the value from + * @return int representing precision + */ + static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + final Object node = parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER) + ? Integer.valueOf(parser.intValue()) + : parser.text(); + return XContentMapValues.nodeIntegerValue(node); + } + + /** + * Assert the precision value is within the allowed range, and return it if ok, or throw. + */ + static int checkPrecisionRange(int precision) { + if (precision < 0 || precision > MAX_ZOOM) { + throw new IllegalArgumentException("Invalid geotile_grid precision of " + + precision + ". Must be between 0 and " + MAX_ZOOM + "."); + } + return precision; + } + + /** + * Encode lon/lat to the geotile based long format. + * The resulting hash contains interleaved tile X and Y coordinates. + * The precision itself is also encoded as a few high bits. + */ + static long longEncode(double longitude, double latitude, int precision) { + // Mathematics for this code was adapted from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Java + + // Number of tiles for the current zoom level along the X and Y axis + final long tiles = 1 << checkPrecisionRange(precision); + + long xTile = (long) Math.floor((normalizeLon(longitude) + 180) / 360 * tiles); + + double latSin = Math.sin(Math.toRadians(normalizeLat(latitude))); + long yTile = (long) Math.floor((0.5 - (Math.log((1 + latSin) / (1 - latSin)) / (4 * Math.PI))) * tiles); + + // Edge values may generate invalid values, and need to be clipped. + // For example, polar regions (above/below lat 85.05112878) get normalized. + if (xTile < 0) { + xTile = 0; + } + if (xTile >= tiles) { + xTile = tiles - 1; + } + if (yTile < 0) { + yTile = 0; + } + if (yTile >= tiles) { + yTile = tiles - 1; + } + + // Zoom value is placed in front of all the bits used for the geotile + // e.g. when max zoom is 29, the largest index would use 58 bits (57th..0th), + // leaving 5 bits unused for zoom. See MAX_ZOOM comment above. + return ((long) precision << ZOOM_SHIFT) | (xTile << MAX_ZOOM) | yTile; + } + + /** + * Parse geotile hash as zoom, x, y integers. + */ + private static int[] parseHash(long hash) { + final int zoom = (int) (hash >>> ZOOM_SHIFT); + final int xTile = (int) ((hash >>> MAX_ZOOM) & X_Y_VALUE_MASK); + final int yTile = (int) (hash & X_Y_VALUE_MASK); + return new int[]{zoom, xTile, yTile}; + } + + /** + * Encode to a geotile string from the geotile based long format + */ + static String stringEncode(long hash) { + int[] res = parseHash(hash); + validateZXY(res[0], res[1], res[2]); + return "" + res[0] + "/" + res[1] + "/" + res[2]; + } + + /** + * Decode long hash as a GeoPoint (center of the tile) + */ + static GeoPoint hashToGeoPoint(long hash) { + int[] res = parseHash(hash); + return zxyToGeoPoint(res[0], res[1], res[2]); + } + + /** + * Decode a string bucket key in "zoom/x/y" format to a GeoPoint (center of the tile) + */ + static GeoPoint keyToGeoPoint(String hashAsString) { + final String[] parts = hashAsString.split("/", 4); + if (parts.length != 3) { + throw new IllegalArgumentException("Invalid geotile_grid hash string of " + + hashAsString + ". Must be three integers in a form \"zoom/x/y\"."); + } + + try { + return zxyToGeoPoint(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2])); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid geotile_grid hash string of " + + hashAsString + ". Must be three integers in a form \"zoom/x/y\".", e); + } + } + + /** + * Validates Zoom, X, and Y values, and returns the total number of allowed tiles along the x/y axis. + */ + private static int validateZXY(int zoom, int xTile, int yTile) { + final int tiles = 1 << checkPrecisionRange(zoom); + if (xTile < 0 || yTile < 0 || xTile >= tiles || yTile >= tiles) { + throw new IllegalArgumentException(String.format( + Locale.ROOT, "Zoom/X/Y combination is not valid: %d/%d/%d", zoom, xTile, yTile)); + } + return tiles; + } + + /** + * Converts zoom/x/y integers into a GeoPoint. + */ + private static GeoPoint zxyToGeoPoint(int zoom, int xTile, int yTile) { + final int tiles = validateZXY(zoom, xTile, yTile); + final double n = Math.PI - (2.0 * Math.PI * (yTile + 0.5)) / tiles; + final double lat = Math.toDegrees(Math.atan(Math.sinh(n))); + final double lon = ((xTile + 0.5) / tiles * 360.0) - 180; + return new GeoPoint(lat, lon); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 0c28788666249..7c874781d0c22 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -33,8 +33,6 @@ */ public class InternalGeoHashGrid extends InternalGeoGrid { - private static final String NAME = "geohash_grid"; - InternalGeoHashGrid(String name, int requiredSize, List buckets, List pipelineAggregators, Map metaData) { super(name, requiredSize, buckets, pipelineAggregators, metaData); @@ -66,6 +64,6 @@ Reader getBucketReader() { @Override public String getWriteableName() { - return NAME; + return GeoHashGridAggregationBuilder.NAME; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java new file mode 100644 index 0000000000000..8a842b66dcfca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Represents a grid of cells where each cell's location is determined by a geohash. + * All geohashes in a grid are of the same precision and held internally as a single long + * for efficiency's sake. + */ +public class InternalGeoTileGrid extends InternalGeoGrid { + + InternalGeoTileGrid(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + super(name, requiredSize, buckets, pipelineAggregators, metaData); + } + + public InternalGeoTileGrid(StreamInput in) throws IOException { + super(in); + } + + @Override + public InternalGeoGrid create(List buckets) { + return new InternalGeoTileGrid(name, requiredSize, buckets, pipelineAggregators(), metaData); + } + + @Override + public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); + } + + @Override + InternalGeoGrid create(String name, int requiredSize, List buckets, List list, Map metaData) { + return new InternalGeoTileGrid(name, requiredSize, buckets, list, metaData); + } + + @Override + Reader getBucketReader() { + return InternalGeoTileGridBucket::new; + } + + @Override + public String getWriteableName() { + return GeoTileGridAggregationBuilder.NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java new file mode 100644 index 0000000000000..fb9afbaaca4f8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoTileGridBucket.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; + +import java.io.IOException; + +public class InternalGeoTileGridBucket extends InternalGeoGridBucket { + InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + super(hashAsLong, docCount, aggregations); + } + + /** + * Read from a stream. + */ + public InternalGeoTileGridBucket(StreamInput in) throws IOException { + super(in); + } + + @Override + InternalGeoTileGridBucket buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, + InternalAggregations aggregations) { + return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); + } + + @Override + public String getKeyAsString() { + return GeoTileUtils.stringEncode(hashAsLong); + } + + @Override + public GeoPoint getKey() { + return GeoTileUtils.hashToGeoPoint(hashAsLong); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java new file mode 100644 index 0000000000000..e88c7ad305433 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoTileGrid extends ParsedGeoGrid { + + private static ObjectParser PARSER = createParser(ParsedGeoTileGrid::new, + ParsedGeoTileGridBucket::fromXContent, ParsedGeoTileGridBucket::fromXContent); + + public static ParsedGeoGrid fromXContent(XContentParser parser, String name) throws IOException { + ParsedGeoGrid aggregation = PARSER.parse(parser, null); + aggregation.setName(name); + return aggregation; + } + + @Override + public String getType() { + return GeoTileGridAggregationBuilder.NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java new file mode 100644 index 0000000000000..d2d18b40e76d1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoTileGridBucket.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoTileGridBucket extends ParsedGeoGridBucket { + + @Override + public GeoPoint getKey() { + return GeoTileUtils.keyToGeoPoint(hashAsString); + } + + @Override + public String getKeyAsString() { + return hashAsString; + } + + static ParsedGeoTileGridBucket fromXContent(XContentParser parser) throws IOException { + return parseXContent(parser, false, ParsedGeoTileGridBucket::new, (p, bucket) -> bucket.hashAsString = p.text()); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 1ff0efd3e8307..877a8e59bc2d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -133,7 +133,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) { execution = ExecutionMode.MAP; } - final long maxOrd = getMaxOrd(valuesSource, context.searcher()); + final long maxOrd = execution == ExecutionMode.GLOBAL_ORDINALS ? getMaxOrd(valuesSource, context.searcher()) : -1; if (execution == null) { execution = ExecutionMode.GLOBAL_ORDINALS; } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index f5c99fc513759..7085e5ba5868c 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -641,23 +641,23 @@ public SearchSourceBuilder collapse(CollapseBuilder collapse) { * Add an aggregation to perform as part of the search. */ public SearchSourceBuilder aggregation(AggregationBuilder aggregation) { - if (aggregations == null) { + if (aggregations == null) { aggregations = AggregatorFactories.builder(); - } + } aggregations.addAggregator(aggregation); - return this; + return this; } /** * Add an aggregation to perform as part of the search. */ public SearchSourceBuilder aggregation(PipelineAggregationBuilder aggregation) { - if (aggregations == null) { + if (aggregations == null) { aggregations = AggregatorFactories.builder(); - } - aggregations.addPipelineAggregator(aggregation); - return this; } + aggregations.addPipelineAggregator(aggregation); + return this; + } /** * Gets the bytes representing the aggregation builders for this request. diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 6b7b506114361..132b269b196e0 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -67,6 +67,10 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestDeduplicator; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -85,7 +89,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.cluster.SnapshotsInProgress.completed; -import static org.elasticsearch.transport.EmptyTransportResponseHandler.INSTANCE_SAME; /** * This service runs on data and master nodes and controls currently snapshotted shards on these nodes. It is responsible for @@ -112,6 +115,10 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private volatile Map> shardSnapshots = emptyMap(); + // A map of snapshots to the shardIds that we already reported to the master as failed + private final TransportRequestDeduplicator remoteFailedRequestDeduplicator = + new TransportRequestDeduplicator<>(); + private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; @@ -272,12 +279,11 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // Abort all running shards for this snapshot Map snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { - final String failure = "snapshot has been aborted"; for (ObjectObjectCursor shard : entry.shards()) { - final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); final Stage stage = lastSnapshotStatus.getStage(); if (stage == Stage.FINALIZE) { logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + @@ -295,6 +301,15 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { } } } + } else { + final Snapshot snapshot = entry.snapshot(); + for (ObjectObjectCursor curr : entry.shards()) { + // due to CS batching we might have missed the INIT state and straight went into ABORTED + // notify master that abort has completed by moving to FAILED + if (curr.value.state() == State.ABORTED) { + notifyFailedSnapshotShard(snapshot, curr.key, localNodeId, curr.value.reason()); + } + } } } } @@ -515,12 +530,33 @@ void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, f /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { - try { - UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); - transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, INSTANCE_SAME); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); - } + remoteFailedRequestDeduplicator.executeOnce( + new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status), + new ActionListener() { + @Override + public void onResponse(Void aVoid) { + logger.trace("[{}] [{}] updated snapshot state", snapshot, status); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + } + }, + (req, reqListener) -> transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, req, + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + reqListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + reqListener.onFailure(exp); + } + }) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index af6d7055e533a..c5b478fa908a9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1210,7 +1210,10 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (state == State.INIT) { // snapshot is still initializing, mark it as aborted shards = snapshotEntry.shards(); - + assert shards.isEmpty(); + // No shards in this snapshot, we delete it right away since the SnapshotShardsService + // has no work to do. + endSnapshot(snapshotEntry); } else if (state == State.STARTED) { // snapshot is started - mark every non completed shard as aborted final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 3e52999cbf382..169f5ae1c0943 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -291,7 +291,6 @@ public void testTransportBroadcastReplicationTasks() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37893") public void testTransportBulkTasks() { registerTaskManageListeners(BulkAction.NAME); // main task registerTaskManageListeners(BulkAction.NAME + "[s]"); // shard task @@ -299,6 +298,8 @@ public void testTransportBulkTasks() { registerTaskManageListeners(BulkAction.NAME + "[s][r]"); // shard task on replica createIndex("test"); ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks + // ensures the mapping is available on all nodes so we won't retry the request (in case replicas don't have the right mapping). + client().admin().indices().preparePutMapping("test").setType("doc").setSource("foo", "type=keyword").get(); client().prepareBulk().add(client().prepareIndex("test", "doc", "test_id") .setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java index 33c0d22473c65..7cfc2ea1f280d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.exists; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -29,26 +28,14 @@ import org.elasticsearch.test.InternalTestCluster; import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; - -import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; @ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0, autoMinMasterNodes = false) public class IndicesExistsIT extends ESIntegTestCase { - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - final List masterNodeNames - = allNodesSettings.stream().filter(NODE_MASTER_SETTING::get).map(NODE_NAME_SETTING::get).collect(Collectors.toList()); - return allNodesSettings.stream().map(s -> Settings.builder().put(s) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames).build()).collect(Collectors.toList()); - } - public void testIndexExistsWithBlocksInPlace() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); Settings settings = Settings.builder() .put(GatewayService.RECOVER_AFTER_NODES_SETTING.getKey(), 99).build(); String node = internalCluster().startNode(settings); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index e9cde3f7aadea..9107b75db1798 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.text.Text; @@ -313,9 +314,14 @@ private static AtomicArray generateFetchResults(int nShards, return fetchResults; } + private static SearchRequest randomSearchRequest() { + return randomBoolean() ? new SearchRequest() : SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "remote", 0, randomBoolean()); + } + public void testConsumer() { int bufferSize = randomIntBetween(2, 3); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); @@ -377,7 +383,7 @@ public void testConsumerConcurrently() throws InterruptedException { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -424,7 +430,7 @@ public void testConsumerConcurrently() throws InterruptedException { public void testConsumerOnlyAggs() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0)); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -460,7 +466,7 @@ public void testConsumerOnlyAggs() { public void testConsumerOnlyHits() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); if (randomBoolean()) { request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10))); } @@ -493,8 +499,7 @@ public void testConsumerOnlyHits() { private void assertFinalReduction(SearchRequest searchRequest) { assertThat(reductions.size(), greaterThanOrEqualTo(1)); - //the last reduction step was the final one only if no cluster alias was provided with the search request - assertEquals(searchRequest.getLocalClusterAlias() == null, reductions.get(reductions.size() - 1)); + assertEquals(searchRequest.isFinalReduce(), reductions.get(reductions.size() - 1)); } public void testNewSearchPhaseResults() { @@ -568,7 +573,7 @@ public void testReduceTopNWithFromOffset() { public void testConsumerSortByField() { int expectedNumResults = randomIntBetween(1, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); int size = randomIntBetween(1, 10); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = @@ -604,7 +609,7 @@ public void testConsumerSortByField() { public void testConsumerFieldCollapsing() { int expectedNumResults = randomIntBetween(30, 100); int bufferSize = randomIntBetween(2, 200); - SearchRequest request = randomBoolean() ? new SearchRequest() : new SearchRequest("remote"); + SearchRequest request = randomSearchRequest(); int size = randomIntBetween(5, 10); request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 1d2d59c60e2ae..c139b75f45c42 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -54,17 +54,20 @@ protected SearchRequest createSearchRequest() throws IOException { } //clusterAlias and absoluteStartMillis do not have public getters/setters hence we randomize them only in this test specifically. return SearchRequest.withLocalReduction(request, request.indices(), - randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong()); + randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong(), randomBoolean()); } public void testWithLocalReduction() { - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(null, Strings.EMPTY_ARRAY, "", 0)); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); SearchRequest request = new SearchRequest(); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, null, "", 0)); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, new String[]{null}, "", 0)); - expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, null, 0)); - expectThrows(IllegalArgumentException.class, () -> SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", -1)); - SearchRequest searchRequest = SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", 0); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, null, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, + new String[]{null}, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.withLocalReduction(request, + Strings.EMPTY_ARRAY, null, 0, randomBoolean())); + expectThrows(IllegalArgumentException.class, () -> SearchRequest.withLocalReduction(request, + Strings.EMPTY_ARRAY, "", -1, randomBoolean())); + SearchRequest searchRequest = SearchRequest.withLocalReduction(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); assertNull(searchRequest.validate()); } @@ -92,6 +95,12 @@ public void testRandomVersionSerialization() throws IOException { assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); } + //TODO move to the 6_7_0 branch once backported to 6.x + if (version.before(Version.V_7_0_0)) { + assertTrue(deserializedRequest.isFinalReduce()); + } else { + assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); + } } public void testReadFromPre6_7_0() throws IOException { @@ -103,6 +112,7 @@ public void testReadFromPre6_7_0() throws IOException { assertNull(searchRequest.getLocalClusterAlias()); assertAbsoluteStartMillisIsCurrentTime(searchRequest); assertTrue(searchRequest.isCcsMinimizeRoundtrips()); + assertTrue(searchRequest.isFinalReduce()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index 8fd75c5fd673d..ed14d11946f75 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -27,13 +27,17 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; public class TransportSearchActionSingleNodeTests extends ESSingleNodeTestCase { public void testLocalClusterAlias() { - long nowInMillis = System.currentTimeMillis(); + long nowInMillis = randomLongBetween(0, Long.MAX_VALUE); IndexRequest indexRequest = new IndexRequest("test"); indexRequest.id("1"); indexRequest.source("field", "value"); @@ -42,7 +46,8 @@ public void testLocalClusterAlias() { assertEquals(RestStatus.CREATED, indexResponse.status()); { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "local", nowInMillis); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, + "local", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); SearchHit[] hits = searchResponse.getHits().getHits(); @@ -53,7 +58,8 @@ public void testLocalClusterAlias() { assertEquals("1", hit.getId()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", nowInMillis); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, + "", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); SearchHit[] hits = searchResponse.getHits().getHits(); @@ -94,19 +100,22 @@ public void testAbsoluteStartMillis() { assertEquals(0, searchResponse.getTotalShards()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); searchRequest.indices(""); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } { - SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0); + SearchRequest searchRequest = SearchRequest.withLocalReduction(new SearchRequest(), + Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); rangeQuery.gte("1970-01-01"); @@ -118,4 +127,50 @@ public void testAbsoluteStartMillis() { assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } } + + public void testFinalReduce() { + long nowInMillis = randomLongBetween(0, Long.MAX_VALUE); + { + IndexRequest indexRequest = new IndexRequest("test"); + indexRequest.id("1"); + indexRequest.source("price", 10); + IndexResponse indexResponse = client().index(indexRequest).actionGet(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + } + { + IndexRequest indexRequest = new IndexRequest("test"); + indexRequest.id("2"); + indexRequest.source("price", 100); + IndexResponse indexResponse = client().index(indexRequest).actionGet(); + assertEquals(RestStatus.CREATED, indexResponse.status()); + } + client().admin().indices().prepareRefresh("test").get(); + + SearchRequest originalRequest = new SearchRequest(); + SearchSourceBuilder source = new SearchSourceBuilder(); + source.size(0); + originalRequest.source(source); + TermsAggregationBuilder terms = new TermsAggregationBuilder("terms", ValueType.NUMERIC); + terms.field("price"); + terms.size(1); + source.aggregation(terms); + + { + SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.withLocalReduction(originalRequest, + Strings.EMPTY_ARRAY, "remote", nowInMillis, true); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + Aggregations aggregations = searchResponse.getAggregations(); + LongTerms longTerms = aggregations.get("terms"); + assertEquals(1, longTerms.getBuckets().size()); + } + { + SearchRequest searchRequest = SearchRequest.withLocalReduction(originalRequest, + Strings.EMPTY_ARRAY, "remote", nowInMillis, false); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(2, searchResponse.getHits().getTotalHits().value); + Aggregations aggregations = searchResponse.getAggregations(); + LongTerms longTerms = aggregations.get("terms"); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 8a5859e200eac..9a9524d0ff57e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -402,7 +402,7 @@ public void testCCSRemoteReduceMergeFails() throws Exception { } public void testCCSRemoteReduce() throws Exception { - int numClusters = randomIntBetween(2, 10); + int numClusters = randomIntBetween(1, 10); DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); @@ -440,7 +440,7 @@ public void testCCSRemoteReduce() throws Exception { assertEquals(0, searchResponse.getClusters().getSkipped()); assertEquals(totalClusters, searchResponse.getClusters().getTotal()); assertEquals(totalClusters, searchResponse.getClusters().getSuccessful()); - assertEquals(totalClusters + 1, searchResponse.getNumReducePhases()); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); } { SearchRequest searchRequest = new SearchRequest(); @@ -510,7 +510,6 @@ public void onNodeDisconnected(DiscoveryNode node) { awaitLatch(latch, 5, TimeUnit.SECONDS); assertNotNull(failure.get()); assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); } @@ -583,7 +582,7 @@ public void onNodeDisconnected(DiscoveryNode node) { assertEquals(0, searchResponse.getClusters().getSkipped()); assertEquals(totalClusters, searchResponse.getClusters().getTotal()); assertEquals(totalClusters, searchResponse.getClusters().getSuccessful()); - assertEquals(totalClusters + 1, searchResponse.getNumReducePhases()); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); } assertEquals(0, service.getConnectionManager().size()); } finally { diff --git a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 461c92d69f444..6b51836b4381d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -72,12 +71,12 @@ public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwabl final Settings sharedSettings = Settings.builder() .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) - .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out + .put("cluster.join.timeout", "10s") // still long to induce failures but not too long so test won't time out .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .build(); + internalCluster().setBootstrapMasterNodeIndex(2); + internalCluster().startMasterOnlyNodes(3, sharedSettings); String dataNode = internalCluster().startDataOnlyNode(sharedSettings); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 26b8ae88d266d..4b23cd223a9dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -35,7 +34,6 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -46,12 +44,10 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -68,8 +64,6 @@ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { - private int bootstrapNodeId; - @Override protected Collection> nodePlugins() { final HashSet> classes = new HashSet<>(super.nodePlugins()); @@ -77,28 +71,8 @@ protected Collection> nodePlugins() { return classes; } - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().size() + allNodesSettings.size() == bootstrapNodeId) { - List nodeNames = new ArrayList<>(); - Collections.addAll(nodeNames, internalCluster().getNodeNames()); - allNodesSettings.forEach(settings -> nodeNames.add(Node.NODE_NAME_SETTING.get(settings))); - - List otherNodesSettings = allNodesSettings.subList(0, allNodesSettings.size() - 1); - Settings lastNodeSettings = allNodesSettings.get(allNodesSettings.size()-1); - List newSettings = new ArrayList<>(); - newSettings.addAll(otherNodesSettings); - newSettings.add(Settings.builder().put(lastNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) - .build()); - return newSettings; - } - return allNodesSettings; - } - public void testTwoNodesNoMasterBlock() throws Exception { - //bootstrap cluster once second node is started - bootstrapNodeId = 2; + internalCluster().setBootstrapMasterNodeIndex(1); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") @@ -231,8 +205,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { } public void testThreeNodesNoMasterBlock() throws Exception { - //bootstrap cluster once 3rd node is started - bootstrapNodeId = 3; + internalCluster().setBootstrapMasterNodeIndex(2); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s") @@ -307,8 +280,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { } public void testCannotCommitStateThreeNodes() throws Exception { - //bootstrap cluster once 3rd node is started - bootstrapNodeId = 3; + internalCluster().setBootstrapMasterNodeIndex(2); Settings settings = Settings.builder() .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms") diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 8758e169b5124..071c8a0195531 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -35,8 +34,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; -import java.util.Collections; -import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -46,20 +43,8 @@ @TestLogging("_root:DEBUG,org.elasticsearch.action.admin.cluster.state:TRACE") public class SpecificMasterNodesIT extends ESIntegTestCase { - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - // if it's the first master in the cluster bootstrap the cluster with this node name - Settings settings = allNodesSettings.get(0); - if (internalCluster().numMasterNodes() == 0 && settings.getAsBoolean(Node.NODE_MASTER_SETTING.getKey(), false)) { - return Collections.singletonList(Settings.builder() - .put(settings) - .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), settings.get(Node.NODE_NAME_SETTING.getKey())) - .build()); - } - return allNodesSettings; - } - public void testSimpleOnlyMasterNodeElection() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false) @@ -100,6 +85,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { } public void testElectOnlyBetweenMasterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); @@ -146,6 +132,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { } public void testAliasFilterValidation() { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master node / non data"); internalCluster().startNode(Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index cf8e1737a7708..6e90aed5f74bf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -201,6 +201,42 @@ private static ClusterState state(DiscoveryNode localNode, String[] acceptedConf .lastCommittedConfiguration(config(committedConfig)).build())).build(); } + + public void testDescriptionAfterDetachCluster() { + final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + + final ClusterState clusterState = state(localNode, + VotingConfiguration.MUST_JOIN_ELECTED_MASTER.getNodeIds().toArray(new String[0])); + + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered []; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final TransportAddress otherAddress = buildNewFakeTransportAddress(); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, singletonList(otherAddress), emptyList(), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered []; " + + "discovery will continue using [" + otherAddress + "] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final DiscoveryNode otherNode = new DiscoveryNode("otherNode", buildNewFakeTransportAddress(), Version.CURRENT); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(otherNode), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered [" + otherNode + "]; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + final DiscoveryNode yetAnotherNode = new DiscoveryNode("yetAnotherNode", buildNewFakeTransportAddress(), Version.CURRENT); + assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(yetAnotherNode), 0L).getDescription(), + is("master not discovered yet and this node was detached from its previous cluster, " + + "have discovered [" + yetAnotherNode + "]; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + } + public void testDescriptionAfterBootstrapping() { final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index c3028de1801da..93c89cfafabd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1048,15 +1048,9 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti } assertTrue(newNode.getLastAppliedClusterState().version() == 0); - // reset clusterUUIDCommitted (and node / cluster state term) to let node join again - // TODO: use elasticsearch-node detach-cluster tool once it's implemented final ClusterNode detachedNode = newNode.restartedNode( - metaData -> MetaData.builder(metaData) - .clusterUUIDCommitted(false) - .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()) - .term(0L).build()) - .build(), - term -> 0L); + metaData -> DetachClusterCommand.updateMetaData(metaData), + term -> DetachClusterCommand.updateCurrentTerm()); cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); cluster1.stabilise(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java new file mode 100644 index 0000000000000..ae8eba050020a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java @@ -0,0 +1,418 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +public class ElasticsearchNodeCommandIT extends ESIntegTestCase { + + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, int nodeOrdinal, boolean abort) + throws Exception { + final MockTerminal terminal = new MockTerminal(); + final OptionSet options = command.getParser().parse("-ordinal", Integer.toString(nodeOrdinal)); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options, environment); + } finally { + assertThat(terminal.getOutput(), containsString(ElasticsearchNodeCommand.STOP_WARNING_MSG)); + } + + return terminal; + } + + private MockTerminal unsafeBootstrap(Environment environment, int nodeOrdinal, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, nodeOrdinal, abort); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); + return terminal; + } + + private MockTerminal detachCluster(Environment environment, int nodeOrdinal, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, nodeOrdinal, abort); + assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); + assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); + return terminal; + } + + private MockTerminal unsafeBootstrap(Environment environment) throws Exception { + return unsafeBootstrap(environment, 0, false); + } + + private MockTerminal detachCluster(Environment environment) throws Exception { + return detachCluster(environment, 0, false); + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } + + public void testBootstrapNotMasterEligible() { + final Environment environment = TestEnvironment.newEnvironment(Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build()); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); + } + + public void testBootstrapNoDataFolder() { + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); + } + + public void testDetachNoDataFolder() { + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); + } + + public void testBootstrapNodeLocked() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + } + } + + public void testDetachNodeLocked() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + } + } + + public void testBootstrapNoNodeMetaData() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { + NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + } + + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); + } + + public void testBootstrapNotBootstrappedCluster() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); + } + + public void testDetachNotBootstrappedCluster() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); + } + + public void testBootstrapNoManifestFile() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); + } + + public void testDetachNoManifestFile() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); + } + + public void testBootstrapNoMetaData() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + } + + public void testDetachNoMetaData() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); + } + + public void testBootstrapAbortedByUser() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void testDetachAbortedByUser() throws IOException { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startNode(); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> detachCluster(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + } + + public void test3MasterNodes2Failed() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); + List masterNodes = new ArrayList<>(); + + logger.info("--> start 1st master-eligible node"); + masterNodes.add(internalCluster().startMasterOnlyNode(Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + .build())); // node ordinal 0 + + logger.info("--> start one data-only node"); + String dataNode = internalCluster().startDataOnlyNode(Settings.builder() + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + .build()); // node ordinal 1 + + logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap"); + masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 + + logger.info("--> create index test"); + createIndex("test"); + + logger.info("--> stop 2nd and 3d master eligible node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); + + logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); + assertBusy(() -> { + ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + + logger.info("--> stop 1st master-eligible node and data-only node"); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); + internalCluster().stopRandomDataNode(); + + logger.info("--> unsafely-bootstrap 1st master-eligible node"); + MockTerminal terminal = unsafeBootstrap(environment); + MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); + assertThat(terminal.getOutput(), containsString( + String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, + metaData.coordinationMetaData().term(), metaData.version()))); + + logger.info("--> start 1st master-eligible node"); + internalCluster().startMasterOnlyNode(); + + logger.info("--> detach-cluster on data-only node"); + detachCluster(environment, 1, false); + + logger.info("--> start data-only node"); + String dataNode2 = internalCluster().startDataOnlyNode(); + + logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); + assertBusy(() -> { + ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertFalse(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + assertTrue(state.metaData().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); + }); + + logger.info("--> ensure index test is green"); + ensureGreen("test"); + + logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); + detachCluster(environment, 2, false); + detachCluster(environment, 3, false); + + logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); + internalCluster().startMasterOnlyNodes(2); + ensureStableCluster(4); + } + + public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + + logger.info("--> start mixed data and master-eligible node and bootstrap cluster"); + String masterNode = internalCluster().startNode(); // node ordinal 0 + + logger.info("--> start data-only node and ensure 2 nodes stable cluster"); + String dataNode = internalCluster().startDataOnlyNode(); // node ordinal 1 + ensureStableCluster(2); + + logger.info("--> index 1 doc and ensure index is green"); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + ensureGreen("test"); + + logger.info("--> verify 1 doc in the index"); + assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); + assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + + logger.info("--> stop data-only node and detach it from the old cluster"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment, 1, false); + + logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); + internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){ + @Override + public boolean clearData(String nodeName) { + return true; + } + }); + + logger.info("--> start data-only only node and ensure 2 nodes stable cluster"); + internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + logger.info("--> verify that the dangling index exists and has green status"); + assertBusy(() -> { + assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); + }); + ensureGreen("test"); + + logger.info("--> verify the doc is there"); + assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); + } + + public void testNoInitialBootstrapAfterDetach() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startMasterOnlyNode(); + internalCluster().stopCurrentMasterNode(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment); + + String node = internalCluster().startMasterOnlyNode(Settings.builder() + // give the cluster 2 seconds to elect the master (it should not) + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") + .build()); + + ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node)); + } + + public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startMasterOnlyNode(); + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); + internalCluster().client().admin().cluster().updateSettings(req).get(); + + ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + assertThat(state.metaData().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("1234kb")); + + internalCluster().stopCurrentMasterNode(); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + detachCluster(environment); + unsafeBootstrap(environment); + + internalCluster().startMasterOnlyNode(); + ensureStableCluster(1); + + state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + assertThat(state.metaData().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("1234kb")); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java deleted file mode 100644 index 334d392b1793d..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.Manifest; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetaData; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.containsString; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") -public class UnsafeBootstrapMasterIT extends ESIntegTestCase { - - private int bootstrapNodeId; - - @Before - public void resetBootstrapNodeId() { - bootstrapNodeId = -1; - } - - /** - * Performs cluster bootstrap when node with id bootstrapNodeId is started. - * Any node of the batch could be selected as bootstrap target. - */ - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().size() + allNodesSettings.size() == bootstrapNodeId) { - List nodeNames = new ArrayList<>(); - Collections.addAll(nodeNames, internalCluster().getNodeNames()); - allNodesSettings.forEach(settings -> nodeNames.add(Node.NODE_NAME_SETTING.get(settings))); - - List newSettings = new ArrayList<>(); - int bootstrapIndex = randomInt(allNodesSettings.size() - 1); - for (int i = 0; i < allNodesSettings.size(); i++) { - Settings nodeSettings = allNodesSettings.get(i); - if (i == bootstrapIndex) { - newSettings.add(Settings.builder().put(nodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) - .build()); - } else { - newSettings.add(nodeSettings); - } - } - - return newSettings; - } - return allNodesSettings; - } - - private MockTerminal executeCommand(Environment environment, boolean abort) throws Exception { - final UnsafeBootstrapMasterCommand command = new UnsafeBootstrapMasterCommand(); - final MockTerminal terminal = new MockTerminal(); - final OptionParser parser = new OptionParser(); - final OptionSet options = parser.parse(); - final String input; - - if (abort) { - input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); - } else { - input = randomBoolean() ? "y" : "Y"; - } - - terminal.addTextInput(input); - - try { - command.execute(terminal, options, environment); - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); - } finally { - assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.STOP_WARNING_MSG)); - } - - return terminal; - } - - private MockTerminal executeCommand(Environment environment) throws Exception { - return executeCommand(environment, false); - } - - private void expectThrows(ThrowingRunnable runnable, String message) { - ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); - assertThat(ex.getMessage(), containsString(message)); - } - - public void testNotMasterEligible() { - final Environment environment = TestEnvironment.newEnvironment(Settings.builder() - .put(internalCluster().getDefaultSettings()) - .put(Node.NODE_MASTER_SETTING.getKey(), false) - .build()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); - } - - public void testNoDataFolder() { - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_FOLDER_FOUND_MSG); - } - - public void testNodeLocked() throws IOException { - Settings envSettings = buildEnvSettings(Settings.EMPTY); - Environment environment = TestEnvironment.newEnvironment(envSettings); - try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); - } - } - - public void testNoNodeMetaData() throws IOException { - Settings envSettings = buildEnvSettings(Settings.EMPTY); - Environment environment = TestEnvironment.newEnvironment(envSettings); - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { - NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - } - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); - } - - public void testNotBootstrappedCluster() throws Exception { - internalCluster().startNode( - Settings.builder() - .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup - .build()); - assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - }); - - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.GLOBAL_GENERATION_MISSING_MSG); - } - - public void testNoManifestFile() throws IOException { - bootstrapNodeId = 1; - internalCluster().startNode(); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_MANIFEST_FILE_FOUND_MSG); - } - - public void testNoMetaData() throws IOException { - bootstrapNodeId = 1; - internalCluster().startNode(); - ensureStableCluster(1); - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); - - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_GLOBAL_METADATA_MSG); - } - - public void testAbortedByUser() throws IOException { - bootstrapNodeId = 1; - internalCluster().startNode(); - ensureStableCluster(1); - internalCluster().stopRandomDataNode(); - - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment, true), UnsafeBootstrapMasterCommand.ABORTED_BY_USER_MSG); - } - - public void test3MasterNodes2Failed() throws Exception { - bootstrapNodeId = 3; - List masterNodes = internalCluster().startMasterOnlyNodes(3, Settings.EMPTY); - - String dataNode = internalCluster().startDataOnlyNode(); - createIndex("test"); - - Client dataNodeClient = internalCluster().client(dataNode); - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); - - assertBusy(() -> { - ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - }); - - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); - - NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); - - MockTerminal terminal = executeCommand(environment); - - MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); - assertThat(terminal.getOutput(), containsString( - String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, - metaData.coordinationMetaData().term(), metaData.version()))); - - internalCluster().startMasterOnlyNode(); - - assertBusy(() -> { - ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) - .execute().actionGet().getState(); - assertFalse(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); - assertTrue(state.metaData().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); - }); - - ensureGreen("test"); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index aadd48a9bd50d..7ed3f45e505f9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -328,11 +328,16 @@ public void testClusterStateUpdateLogging() throws Exception { MasterService.class.getCanonicalName(), Level.DEBUG, "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test4", + MasterService.class.getCanonicalName(), + Level.DEBUG, + "*processing [test4]: took [0s] no change in cluster state")); Logger clusterLogger = LogManager.getLogger(MasterService.class); Loggers.addAppender(clusterLogger, mockAppender); try { - final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override @@ -342,9 +347,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { @@ -364,9 +367,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } @Override - public void onFailure(String source, Exception e) { - latch.countDown(); - } + public void onFailure(String source, Exception e) { } }); masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override @@ -376,9 +377,7 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { @@ -394,21 +393,18 @@ public ClusterState execute(ClusterState currentState) { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } @Override public void onFailure(String source, Exception e) { fail(); } }); - latch.await(); + assertBusy(mockAppender::assertAllExpectationsMatched); } finally { Loggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } - mockAppender.assertAllExpectationsMatched(); } public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index a0fcf988ca811..cd92061ae25d5 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -62,11 +62,14 @@ public void testTimeZoneFormatting() { formatter3.parse("20181126T121212.123-0830"); } + public void testCustomTimeFormats() { + assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); + assertSameDate("12/06", "dd/MM"); + assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); + } + // this test requires tests to run with -Djava.locale.providers=COMPAT in order to work -// public void testCustomTimeFormats() { -// assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); -// assertSameDate("12/06", "dd/MM"); -// assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); +// public void testCustomLocales() { // // // also ensure that locale based dates are the same // assertSameDate("Di., 05 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 90a9a76e6a4f9..e573a2ede6bdb 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -54,6 +54,11 @@ public void testEpochMillisParser() { assertThat(instant.getEpochSecond(), is(0L)); assertThat(instant.getNano(), is(0)); } + { + Instant instant = Instant.from(formatter.parse("123.123456")); + assertThat(instant.getEpochSecond(), is(0L)); + assertThat(instant.getNano(), is(123123456)); + } } public void testInvalidEpochMilliParser() { @@ -68,17 +73,27 @@ public void testInvalidEpochMilliParser() { // this is not in the duelling tests, because the epoch second parser in joda time drops the milliseconds after the comma // but is able to parse the rest // as this feature is supported it also makes sense to make it exact - public void testEpochSecondParser() { + public void testEpochSecondParserWithFraction() { DateFormatter formatter = DateFormatters.forPattern("epoch_second"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); + TemporalAccessor accessor = formatter.parse("1234.1"); + Instant instant = DateFormatters.from(accessor).toInstant(); + assertThat(instant.getEpochSecond(), is(1234L)); + assertThat(DateFormatters.from(accessor).toInstant().getNano(), is(100_000_000)); + + accessor = formatter.parse("1234"); + instant = DateFormatters.from(accessor).toInstant(); + assertThat(instant.getEpochSecond(), is(1234L)); + assertThat(instant.getNano(), is(0)); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), is("failed to parse date field [abc] with format [epoch_second]")); + + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.abc")); + assertThat(e.getMessage(), is("failed to parse date field [1234.abc] with format [epoch_second]")); + + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); assertThat(e.getMessage(), is("failed to parse date field [1234.1234567890] with format [epoch_second]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.123456789013221")); - assertThat(e.getMessage(), containsString("[1234.123456789013221]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("abc")); - assertThat(e.getMessage(), containsString("[abc]")); - e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.abc")); - assertThat(e.getMessage(), containsString("[1234.abc]")); } public void testEpochMilliParsersWithDifferentFormatters() { @@ -147,6 +162,26 @@ public void testSupportBackwardsJava8Format() { assertThat(formatter, instanceOf(JavaDateFormatter.class)); } + public void testEpochFormatting() { + long seconds = randomLongBetween(0, 130L * 365 * 86400); // from 1970 epoch till around 2100 + long nanos = randomLongBetween(0, 999_999_999L); + Instant instant = Instant.ofEpochSecond(seconds, nanos); + + DateFormatter millisFormatter = DateFormatter.forPattern("epoch_millis"); + String millis = millisFormatter.format(instant); + Instant millisInstant = Instant.from(millisFormatter.parse(millis)); + assertThat(millisInstant.toEpochMilli(), is(instant.toEpochMilli())); + assertThat(millisFormatter.format(Instant.ofEpochSecond(42, 0)), is("42000")); + assertThat(millisFormatter.format(Instant.ofEpochSecond(42, 123456789L)), is("42123.456789")); + + DateFormatter secondsFormatter = DateFormatter.forPattern("epoch_second"); + String formattedSeconds = secondsFormatter.format(instant); + Instant secondsInstant = Instant.from(secondsFormatter.parse(formattedSeconds)); + assertThat(secondsInstant.getEpochSecond(), is(instant.getEpochSecond())); + + assertThat(secondsFormatter.format(Instant.ofEpochSecond(42, 0)), is("42")); + } + public void testParsingStrictNanoDates() { DateFormatter formatter = DateFormatters.forPattern("strict_date_optional_time_nanos"); formatter.format(formatter.parse("2016-01-01T00:00:00.000")); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 0a9016c20111b..78b6b81189c2b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; @@ -63,7 +62,6 @@ import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -358,27 +356,6 @@ public void onFailure(Exception e) { } } - public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { - // test for https://github.com/elastic/elasticsearch/issues/8823 - Settings zen1Settings = Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build(); // TODO: needs adaptions for Zen2 - String masterNode = internalCluster().startMasterOnlyNode(zen1Settings); - internalCluster().startDataOnlyNode(zen1Settings); - ensureStableCluster(2); - assertAcked(prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 0))); - index("index", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - ensureGreen(); - - internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { - @Override - public boolean clearData(String nodeName) { - return true; - } - }); - - ensureGreen("index"); - assertTrue(client().prepareGet("index", "_doc", "1").get().isExists()); - } - public void testCannotJoinIfMasterLostDataFolder() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 718904eecb5bb..fc9450e982636 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -71,76 +70,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 - */ - public void testFailWithMinimumMasterNodesConfigured() throws Exception { - List nodes = startCluster(3); - - // Figure out what is the elected master node - final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node={}", masterNode); - - // Pick a node that isn't the elected master. - Set nonMasters = new HashSet<>(nodes); - nonMasters.remove(masterNode); - final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY)); - - - // Simulate a network issue between the unlucky node and elected master node in both directions. - - NetworkDisruption networkDisconnect = new NetworkDisruption( - new NetworkDisruption.TwoPartitions(masterNode, unluckyNode), - new NetworkDisruption.NetworkDisconnect()); - setDisruptionScheme(networkDisconnect); - networkDisconnect.startDisrupting(); - - // Wait until elected master has removed that the unlucky node... - ensureStableCluster(2, masterNode); - - // The unlucky node must report *no* master node, since it can't connect to master and in fact it should - // continuously ping until network failures have been resolved. However - // It may a take a bit before the node detects it has been cut off from the elected master - assertNoMaster(unluckyNode); - - networkDisconnect.stopDisrupting(); - - // Wait until the master node sees all 3 nodes again. - ensureStableCluster(3); - - // The elected master shouldn't have changed, since the unlucky node never could have elected himself as - // master since m_m_n of 2 could never be satisfied. - assertMaster(masterNode, nodes); - } - - /** - * Verify that nodes fault detection works after master (re) election - */ - public void testNodesFDAfterMasterReelection() throws Exception { - startCluster(4); - - logger.info("--> stopping current master"); - internalCluster().stopCurrentMasterNode(); - - ensureStableCluster(3); - - String master = internalCluster().getMasterName(); - String nonMaster = null; - for (String node : internalCluster().getNodeNames()) { - if (!node.equals(master)) { - nonMaster = node; - } - } - - logger.info("--> isolating [{}]", nonMaster); - NetworkDisruption.TwoPartitions partitions = isolateNode(nonMaster); - NetworkDisruption networkDisruption = addRandomDisruptionType(partitions); - networkDisruption.startDisrupting(); - - logger.info("--> waiting for master to remove it"); - ensureStableCluster(2, master); - } - /** * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes * that already are following another elected master node. These nodes should reject this cluster state and prevent diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java new file mode 100644 index 0000000000000..b5177b1ce3e47 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.cluster.coordination.FollowersChecker; +import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; +import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService.TestPlugin; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will + * not detect a master failure too quickly. + */ +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +public class StableMasterDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(TestPlugin.class); + } + + /** + * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 + */ + public void testFailWithMinimumMasterNodesConfigured() throws Exception { + List nodes = internalCluster().startNodes(3); + ensureStableCluster(3); + + // Figure out what is the elected master node + final String masterNode = internalCluster().getMasterName(); + logger.info("---> legit elected master node={}", masterNode); + + // Pick a node that isn't the elected master. + Set nonMasters = new HashSet<>(nodes); + nonMasters.remove(masterNode); + final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY)); + + // Simulate a network issue between the unlucky node and elected master node in both directions. + + NetworkDisruption networkDisconnect = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(masterNode, unluckyNode), + new NetworkDisruption.NetworkDisconnect()); + setDisruptionScheme(networkDisconnect); + networkDisconnect.startDisrupting(); + + // Wait until elected master has removed that the unlucky node... + ensureStableCluster(2, masterNode); + + // The unlucky node must report *no* master node, since it can't connect to master and in fact it should + // continuously ping until network failures have been resolved. However + // It may a take a bit before the node detects it has been cut off from the elected master + assertBusy(() -> assertNull(client(unluckyNode).admin().cluster().state( + new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode())); + + networkDisconnect.stopDisrupting(); + + // Wait until the master node sees all 3 nodes again. + ensureStableCluster(3); + + // The elected master shouldn't have changed, since the unlucky node never could have elected itself as master + assertThat(internalCluster().getMasterName(), equalTo(masterNode)); + } + + /** + * Verify that nodes fault detection works after master (re) election + */ + public void testFollowerCheckerDetectsUnresponsiveNodeAfterMasterReelection() throws Exception { + internalCluster().startNodes(4, + Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), "10") + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1).build()); + ensureStableCluster(4); + + logger.info("--> stopping current master"); + internalCluster().stopCurrentMasterNode(); + + ensureStableCluster(3); + + final String master = internalCluster().getMasterName(); + final List nonMasters = Arrays.stream(internalCluster().getNodeNames()).filter(n -> master.equals(n) == false) + .collect(Collectors.toList()); + final String isolatedNode = randomFrom(nonMasters); + final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0); + + logger.info("--> isolating [{}]", isolatedNode); + + final NetworkDisruption networkDisruption = new NetworkDisruption(new NetworkDisruption.TwoPartitions( + singleton(isolatedNode), Sets.newHashSet(master, otherNode)), new NetworkUnresponsive()); + setDisruptionScheme(networkDisruption); + networkDisruption.startDisrupting(); + + logger.info("--> waiting for master to remove it"); + ensureStableCluster(2, master); + + networkDisruption.stopDisrupting(); + ensureStableCluster(3); + } + + /** + * Verify that nodes fault detection works after master (re) election + */ + public void testFollowerCheckerDetectsDisconnectedNodeAfterMasterReelection() throws Exception { + internalCluster().startNodes(4); + ensureStableCluster(4); + + logger.info("--> stopping current master"); + internalCluster().stopCurrentMasterNode(); + + ensureStableCluster(3); + + final String master = internalCluster().getMasterName(); + final List nonMasters = Arrays.stream(internalCluster().getNodeNames()).filter(n -> master.equals(n) == false) + .collect(Collectors.toList()); + final String isolatedNode = randomFrom(nonMasters); + final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0); + + logger.info("--> isolating [{}]", isolatedNode); + + final NetworkDisruption networkDisruption = new NetworkDisruption(new NetworkDisruption.TwoPartitions( + singleton(isolatedNode), Stream.of(master, otherNode).collect(Collectors.toSet())), new NetworkDisconnect()); + setDisruptionScheme(networkDisruption); + networkDisruption.startDisrupting(); + + logger.info("--> waiting for master to remove it"); + ensureStableCluster(2, master); + + networkDisruption.stopDisrupting(); + ensureStableCluster(3); + } +} diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 0cddb929472b7..ebdae985a39c7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; @@ -49,7 +48,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.discovery.TestZenDiscovery; import java.io.IOException; import java.util.List; @@ -126,7 +124,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet(); fail(); } catch (IndexClosedException e) { // all is well @@ -170,7 +168,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet(); fail(); } catch (IndexClosedException e) { // all is well @@ -231,7 +229,7 @@ public void testJustMasterNodeAndJustDataNode() throws Exception { logger.info("--> create an index"); client().admin().indices().prepareCreate("test").execute().actionGet(); - client().prepareIndex("test", "type1").setSource("field1", "value1").setTimeout("100ms").execute().actionGet(); + client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet(); } public void testTwoNodesSingleDoc() throws Exception { @@ -275,57 +273,6 @@ public void testTwoNodesSingleDoc() throws Exception { } } - public void testDanglingIndices() throws Exception { - /*TODO This test test does not work with Zen2, because once master node looses its cluster state during restart - it will start with term = 1, which is the same as the term data node has. Data node won't accept cluster state from master - after the restart, because the term is the same, but version of the cluster state is greater on the data node. - Consider adding term to JoinRequest, so that master node can bump its term if its current term is less than JoinRequest#term. - */ - logger.info("--> starting two nodes"); - - final String node_1 = internalCluster().startNodes(2, - Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build()).get(0); - - logger.info("--> indexing a simple document"); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify 1 doc in the index"); - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); - } - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - - logger.info("--> restarting the nodes"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public boolean clearData(String nodeName) { - return node_1.equals(nodeName); - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - // spin a bit waiting for the index to exists - long time = System.currentTimeMillis(); - while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) { - if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) { - break; - } - } - - logger.info("--> verify that the dangling index exists"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify the doc is there"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - /** * This test ensures that when an index deletion takes place while a node is offline, when that * node rejoins the cluster, it deletes the index locally instead of importing it as a dangling index. diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index e6fc2ed975fbb..86976d553fa2a 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.Node; @@ -30,8 +29,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; -import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -41,22 +38,6 @@ public class RecoverAfterNodesIT extends ESIntegTestCase { private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10); - @Override - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (internalCluster().numDataAndMasterNodes() == 0) { - final Settings firstNodeSettings = allNodesSettings.get(0); - final List otherNodesSettings = allNodesSettings.subList(1, allNodesSettings.size()); - - final List updatedSettings = new ArrayList<>(); - updatedSettings.add(Settings.builder().put(firstNodeSettings) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - Node.NODE_NAME_SETTING.get(firstNodeSettings)).build()); - updatedSettings.addAll(otherNodesSettings); - - return updatedSettings; - } - return super.addExtraClusterBootstrapSettings(allNodesSettings); - } public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) { long start = System.currentTimeMillis(); @@ -75,6 +56,7 @@ public Client startNode(Settings.Builder settings) { } public void testRecoverAfterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start node (1)"); Client clientNode1 = startNode(Settings.builder().put("gateway.recover_after_nodes", 3)); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() @@ -100,6 +82,7 @@ public void testRecoverAfterNodes() throws Exception { } public void testRecoverAfterMasterNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder() .put("gateway.recover_after_master_nodes", 2).put(Node.NODE_DATA_SETTING.getKey(), false) @@ -145,6 +128,7 @@ public void testRecoverAfterMasterNodes() throws Exception { } public void testRecoverAfterDataNodes() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder() .put("gateway.recover_after_data_nodes", 2) diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 6bb799ac9ebb0..72a1cb4a87d7f 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -38,6 +38,7 @@ import java.io.UncheckedIOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; @@ -127,6 +128,22 @@ public void testReformatSetting() { assertTrue(log.isReformat()); } + public void testReformatIsFalseAndSourceIsTrim() { + String json = "\n\n{ \"fieldName\": 123 } \n "; + BytesReference source = new BytesArray(json); + ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + "test", null, null, source, XContentType.JSON, null); + Index index = new Index("foo", "123"); + // Turning off reformatting so the document is in logs as provided + SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, false, 1000); + String logLine = p.toString(); + + //expect the new lines and white characters to be trimmed + assertThat(logLine, containsString("source[{")); + assertThat(logLine.split("\n").length, equalTo(1)); + } + public void testLevelSetting() { SlowLogLevel level = randomFrom(SlowLogLevel.values()); IndexMetaData metaData = newIndexMeta("index", Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index 29c0fd3e9931a..8257aa99d0486 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -73,7 +73,7 @@ public void testSoftDeletesRetentionLock() { // Advances the global checkpoint and the local checkpoint of a safe commit globalCheckpoint.addAndGet(between(0, 1000)); for (final AtomicLong retainingSequenceNumber : retainingSequenceNumbers) { - retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), globalCheckpoint.get())); + retainingSequenceNumber.set(randomLongBetween(retainingSequenceNumber.get(), Math.max(globalCheckpoint.get(), 0L))); } safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get()); policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index f672a955cff18..6822298c2228e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -142,7 +142,7 @@ public void testParsesBooleansStrict() throws IOException { .endObject()); MapperParsingException ex = expectThrows(MapperParsingException.class, () -> defaultMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON))); - assertEquals("failed to parse field [field] of type [boolean]", ex.getMessage()); + assertEquals("failed to parse field [field] of type [boolean] in document with id '1'", ex.getMessage()); } public void testMultiFields() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index ffdb93474c0e9..f0e5f9fa86997 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -139,21 +139,21 @@ public void testUnexpectedFieldMappingType() throws Exception { .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(new SourceToParse("test", "type", "1", bytes, XContentType.JSON))); - assertThat(exception.getMessage(), containsString("failed to parse field [foo] of type [long]")); + assertThat(exception.getMessage(), containsString("failed to parse field [foo] of type [long] in document with id '1'")); } { BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("bar", "bar") .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(new SourceToParse("test", "type", "2", bytes, XContentType.JSON))); - assertThat(exception.getMessage(), containsString("failed to parse field [bar] of type [boolean]")); + assertThat(exception.getMessage(), containsString("failed to parse field [bar] of type [boolean] in document with id '2'")); } { BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("geo", 123) .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(new SourceToParse("test", "type", "2", bytes, XContentType.JSON))); - assertThat(exception.getMessage(), containsString("failed to parse field [geo] of type [geo_shape]")); + assertThat(exception.getMessage(), containsString("failed to parse field [geo]")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 5ec63681fe690..e1f235c19c662 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; @@ -41,7 +42,6 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37898") public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); @@ -50,7 +50,15 @@ public void testConflictingDynamicMappings() { client().prepareIndex("index", "type", "2").setSource("foo", "bar").get(); fail("Indexing request should have failed!"); } catch (MapperParsingException e) { - // expected + // general case, the parsing code complains that it can't parse "bar" as a "long" + assertThat(e.getMessage(), + Matchers.containsString("failed to parse field [foo] of type [long]")); + } catch (IllegalArgumentException e) { + // rare case: the node that processes the index request doesn't have the mappings + // yet and sends a mapping update to the master node to map "bar" as "text". This + // fails as it had been already mapped as a long by the previous index request. + assertThat(e.getMessage(), + Matchers.containsString("mapper [foo] of different type, current_type [long], merged_type [text]")); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 27651e0da0de4..36ba370939b17 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -460,6 +460,15 @@ public void testWithStopWords() throws Exception { assertEquals(expected, query); } + public void testNegativeFieldBoost() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> new MultiMatchQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 6f72277007dd5..7181c1de1fb41 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.search.QueryStringQueryParser; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; import java.io.IOException; @@ -1471,6 +1472,15 @@ public void testAnalyzedPrefix() throws Exception { assertEquals(expected, query); } + public void testNegativeFieldBoost() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> new QueryStringQueryBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), CoreMatchers.containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index bbc89ddf750aa..3242f343379aa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -57,6 +57,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -718,6 +719,15 @@ public void testUnmappedFieldNoTokenWithAndOperator() throws IOException { assertEquals(expected, query); } + public void testNegativeFieldBoost() { + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> new SimpleQueryStringBuilder("the quick fox") + .field(STRING_FIELD_NAME, -1.0f) + .field(STRING_FIELD_NAME_2) + .toQuery(createShardContext())); + assertThat(exc.getMessage(), containsString("negative [boost]")); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 02e26604dcd25..1c3c3b28773cf 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -56,6 +56,7 @@ import java.util.regex.Pattern; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -174,7 +175,7 @@ public void testCorruptedIndex() throws Exception { fail(); } catch (ElasticsearchException e) { if (corruptSegments) { - assertThat(e.getMessage(), is("Index is unrecoverable")); + assertThat(e.getMessage(), either(is("Index is unrecoverable")).or(startsWith("unable to list commits"))); } else { assertThat(e.getMessage(), containsString("aborted by user")); } diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 0e2e230053cd2..e344f15f3c55f 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -147,8 +147,7 @@ public void testGetFieldMappings() throws Exception { @SuppressWarnings("unchecked") public void testSimpleGetFieldMappingsWithDefaults() throws Exception { assertAcked(prepareCreate("test").addMapping("type", getMappingForType("type"))); - - client().prepareIndex("test", "type", "1").setSource("num", 1).get(); + client().admin().indices().preparePutMapping("test").setType("type").setSource("num", "type=long").get(); GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings() .setFields("num", "field1", "obj.subfield").includeDefaults(true).get(); diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 7ac2ff659dfa9..fab952b658144 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -25,10 +25,12 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperService; @@ -72,15 +74,19 @@ public void testDynamicUpdates() throws Exception { .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) ).execute().actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5))) + .get(); - int recCount = randomIntBetween(200, 600); + int recCount = randomIntBetween(20, 200); List indexRequests = new ArrayList<>(); for (int rec = 0; rec < recCount; rec++) { String type = "type"; String fieldName = "field_" + type + "_" + rec; - indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value")); + indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)) + .setTimeout(TimeValue.timeValueMinutes(5)).setSource(fieldName, "some_value")); } - indexRandom(true, indexRequests); + indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet(); @@ -95,6 +101,9 @@ public void testDynamicUpdates() throws Exception { String fieldName = "field_" + type + "_" + rec; assertConcreteMappingsOnAll("test", type, fieldName); } + + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().putNull(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey())).get(); } public void testUpdateMappingWithoutType() { @@ -224,7 +233,7 @@ public void testUpdateMappingConcurrently() throws Throwable { JsonXContent.contentBuilder().startObject().startObject(typeName) .startObject("properties").startObject(fieldName).field("type", "text").endObject().endObject() .endObject().endObject() - ).get(); + ).setMasterNodeTimeout(TimeValue.timeValueMinutes(5)).get(); assertThat(response.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 555bf24335413..99c50a839abc6 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -119,6 +119,8 @@ public void testCloseWhileRelocatingShards() throws Exception { final String targetNode = internalCluster().startDataOnlyNode(); ensureClusterSizeConsistency(); // wait for the master to finish processing join. + final MockTransportService targetTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); final Set acknowledgedCloses = ConcurrentCollections.newConcurrentSet(); try { @@ -146,8 +148,7 @@ public void testCloseWhileRelocatingShards() throws Exception { } final DiscoveryNode sourceNode = clusterService.state().nodes().resolveNode(primary.currentNodeId()); - ((MockTransportService) internalCluster().getInstance(TransportService.class, targetNode)) - .addSendBehavior(internalCluster().getInstance(TransportService.class, sourceNode.getName()), + targetTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, sourceNode.getName()), (connection, requestId, action, request, options) -> { if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action)) { logger.debug("blocking recovery of shard {}", ((StartRecoveryRequest) request).shardId()); @@ -210,28 +211,30 @@ public void testCloseWhileRelocatingShards() throws Exception { } } } - } finally { - assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder() - .putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()))); - } - for (String index : indices) { - if (acknowledgedCloses.contains(index)) { - assertIndexIsClosed(index); - } else { - assertIndexIsOpened(index); + for (String index : indices) { + if (acknowledgedCloses.contains(index)) { + assertIndexIsClosed(index); + } else { + assertIndexIsOpened(index); + } } - } - assertThat("Consider that the test failed if no indices were successfully closed", acknowledgedCloses.size(), greaterThan(0)); - assertAcked(client().admin().indices().prepareOpen("index-*")); - ensureGreen(indices); + targetTransportService.clearAllRules(); + + assertThat("Consider that the test failed if no indices were successfully closed", acknowledgedCloses.size(), greaterThan(0)); + assertAcked(client().admin().indices().prepareOpen("index-*")); + ensureGreen(indices); - for (String index : acknowledgedCloses) { - long docsCount = client().prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; - assertEquals("Expected " + docsPerIndex.get(index) + " docs in index " + index + " but got " + docsCount - + " (close acknowledged=" + acknowledgedCloses.contains(index) + ")", (long) docsPerIndex.get(index), docsCount); + for (String index : acknowledgedCloses) { + long docsCount = client().prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; + assertEquals("Expected " + docsPerIndex.get(index) + " docs in index " + index + " but got " + docsCount + + " (close acknowledged=" + acknowledgedCloses.contains(index) + ")", (long) docsPerIndex.get(index), docsCount); + } + } finally { + assertAcked(client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()))); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index 874623132f36a..012171ec25a0b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilterTests; import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridTests; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridTests; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; @@ -140,6 +141,7 @@ private static List> getAggsTests() { aggsTests.add(new InternalFilterTests()); aggsTests.add(new InternalSamplerTests()); aggsTests.add(new GeoHashGridTests()); + aggsTests.add(new GeoTileGridTests()); aggsTests.add(new InternalRangeTests()); aggsTests.add(new InternalDateRangeTests()); aggsTests.add(new InternalGeoDistanceTests()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index f50c0bfd072b1..ae6e4cc984fbf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -996,39 +995,24 @@ public void testRangeWithFormatNumericValue() throws Exception { .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // using no format should also work when and to/from are string values searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // also e-notation should work, fractional parts should be truncated searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - if (JavaVersion.current().getVersion().get(0) == 8) { - assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); - } else { - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - } + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // using different format should work when to/from is compatible with // format in aggregation diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 664edba7db0d8..8cb42e352156b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -39,6 +39,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; +import static org.elasticsearch.search.aggregations.AggregationBuilders.geotileGrid; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.ipRange; @@ -306,5 +307,20 @@ public void testGeoHashGrid() throws Exception { assertThat(histo.getBuckets().size(), equalTo(4)); } + public void testGeoTileGrid() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation(geotileGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY) + .minDocCount(0))) + .get(); + + assertSearchResponse(response); + + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 5965574bef6e8..047903bc86100 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -89,6 +90,13 @@ public void testWithSeveralDocs() throws IOException { double lat = (180d * randomDouble()) - 90d; double lng = (360d * randomDouble()) - 180d; + // Precision-adjust longitude/latitude to avoid wrong bucket placement + // Internally, lat/lng get converted to 32 bit integers, loosing some precision. + // This does not affect geohashing because geohash uses the same algorithm, + // but it does affect other bucketing algos, thus we need to do the same steps here. + lng = GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(lng)); + lat = GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(lat)); + points.add(new LatLonDocValuesField(FIELD_NAME, lat, lng)); String hash = hashAsString(lng, lat, precision); if (distinctHashesPerDoc.contains(hash) == false) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java new file mode 100644 index 0000000000000..6544344543e34 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +public class GeoTileGridAggregatorTests extends GeoGridAggregatorTestCase { + + @Override + protected int randomPrecision() { + return randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + } + + @Override + protected String hashAsString(double lng, double lat, int precision) { + return GeoTileUtils.stringEncode(GeoTileUtils.longEncode(lng, lat, precision)); + } + + @Override + protected GeoGridAggregationBuilder createBuilder(String name) { + return new GeoTileGridAggregationBuilder(name); + } + + public void testPrecision() { + final GeoGridAggregationBuilder builder = createBuilder("_name"); + + expectThrows(IllegalArgumentException.class, () -> builder.precision(-1)); + expectThrows(IllegalArgumentException.class, () -> builder.precision(30)); + + int precision = randomIntBetween(0, 29); + builder.precision(precision); + assertEquals(precision, builder.precision()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java new file mode 100644 index 0000000000000..d3a9992af5305 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class GeoTileGridParserTests extends ESTestCase { + public void testParseValidFromInts() throws Exception { + int precision = randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\":" + precision + ", \"size\": 500, \"shard_size\": 550}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + } + + public void testParseValidFromStrings() throws Exception { + int precision = randomIntBetween(0, GeoTileUtils.MAX_ZOOM); + XContentParser stParser = createParser(JsonXContent.jsonXContent, + "{\"field\":\"my_loc\", \"precision\":\"" + precision + "\", \"size\": \"500\", \"shard_size\": \"550\"}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + // can create a factory + assertNotNull(GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + } + + public void testParseErrorOnBooleanPrecision() throws Exception { + XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":false}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + XContentParseException e = expectThrows(XContentParseException.class, + () -> GeoTileGridAggregationBuilder.parse("geotile_grid", stParser)); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("[geotile_grid] precision doesn't support values of type: VALUE_BOOLEAN")); + } + + public void testParseErrorOnPrecisionOutOfRange() throws Exception { + XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":\"30\"}"); + XContentParser.Token token = stParser.nextToken(); + assertSame(XContentParser.Token.START_OBJECT, token); + try { + GeoTileGridAggregationBuilder.parse("geotile_grid", stParser); + fail(); + } catch (XContentParseException ex) { + assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); + assertEquals("Invalid geotile_grid precision of 30. Must be between 0 and 29.", ex.getCause().getMessage()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java new file mode 100644 index 0000000000000..0a8aa8df56eec --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.List; +import java.util.Map; + +public class GeoTileGridTests extends GeoGridTestCase { + + @Override + protected InternalGeoTileGrid createInternalGeoGrid(String name, int size, List buckets, + List pipelineAggregators, Map metaData) { + return new InternalGeoTileGrid(name, size, buckets, pipelineAggregators, metaData); + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalGeoTileGrid::new; + } + + @Override + protected InternalGeoTileGridBucket createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations) { + return new InternalGeoTileGridBucket(key, docCount, aggregations); + } + + @Override + protected long longEncode(double lng, double lat, int precision) { + return GeoTileUtils.longEncode(lng, lat, precision); + } + + @Override + protected int randomPrecision() { + // precision values below 8 can lead to parsing errors + return randomIntBetween(8, GeoTileUtils.MAX_ZOOM); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java new file mode 100644 index 0000000000000..e2881fd9b9145 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtilsTests.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.MAX_ZOOM; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.checkPrecisionRange; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.hashToGeoPoint; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.keyToGeoPoint; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.longEncode; +import static org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils.stringEncode; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; + +public class GeoTileUtilsTests extends ESTestCase { + + private static final double GEOTILE_TOLERANCE = 1E-5D; + + /** + * Precision validation should throw an error if its outside of the valid range. + */ + public void testCheckPrecisionRange() { + for (int i = 0; i <= 29; i++) { + assertEquals(i, checkPrecisionRange(i)); + } + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> checkPrecisionRange(-1)); + assertThat(ex.getMessage(), containsString("Invalid geotile_grid precision of -1. Must be between 0 and 29.")); + ex = expectThrows(IllegalArgumentException.class, () -> checkPrecisionRange(30)); + assertThat(ex.getMessage(), containsString("Invalid geotile_grid precision of 30. Must be between 0 and 29.")); + } + + /** + * A few hardcoded lat/lng/zoom hashing expectations + */ + public void testLongEncode() { + assertEquals(0x0000000000000000L, longEncode(0, 0, 0)); + assertEquals(0x3C00095540001CA5L, longEncode(30, 70, 15)); + assertEquals(0x77FFFF4580000000L, longEncode(179.999, 89.999, 29)); + assertEquals(0x740000BA7FFFFFFFL, longEncode(-179.999, -89.999, 29)); + assertEquals(0x0800000040000001L, longEncode(1, 1, 2)); + assertEquals(0x0C00000060000000L, longEncode(-20, 100, 3)); + assertEquals(0x71127D27C8ACA67AL, longEncode(13, -15, 28)); + assertEquals(0x4C0077776003A9ACL, longEncode(-12, 15, 19)); + assertEquals(0x140000024000000EL, longEncode(-328.231870,16.064082, 5)); + assertEquals(0x6436F96B60000000L, longEncode(-590.769588,89.549167, 25)); + assertEquals(0x6411BD6BA0A98359L, longEncode(999.787079,51.830093, 25)); + assertEquals(0x751BD6BBCA983596L, longEncode(999.787079,51.830093, 29)); + assertEquals(0x77CF880A20000000L, longEncode(-557.039740,-632.103969, 29)); + assertEquals(0x7624FA4FA0000000L, longEncode(13,88, 29)); + assertEquals(0x7624FA4FBFFFFFFFL, longEncode(13,-88, 29)); + assertEquals(0x0400000020000000L, longEncode(13,89, 1)); + assertEquals(0x0400000020000001L, longEncode(13,-89, 1)); + assertEquals(0x0400000020000000L, longEncode(13,95, 1)); + assertEquals(0x0400000020000001L, longEncode(13,-95, 1)); + + expectThrows(IllegalArgumentException.class, () -> longEncode(0, 0, -1)); + expectThrows(IllegalArgumentException.class, () -> longEncode(-1, 0, MAX_ZOOM + 1)); + } + + private void assertGeoPointEquals(GeoPoint gp, final double longitude, final double latitude) { + assertThat(gp.lon(), closeTo(longitude, GEOTILE_TOLERANCE)); + assertThat(gp.lat(), closeTo(latitude, GEOTILE_TOLERANCE)); + } + + public void testHashToGeoPoint() { + assertGeoPointEquals(keyToGeoPoint("0/0/0"), 0.0, 0.0); + assertGeoPointEquals(keyToGeoPoint("1/0/0"), -90.0, 66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/1/0"), 90.0, 66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/0/1"), -90.0, -66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("1/1/1"), 90.0, -66.51326044311186); + assertGeoPointEquals(keyToGeoPoint("29/536870000/10"), 179.99938879162073, 85.05112817241982); + assertGeoPointEquals(keyToGeoPoint("29/10/536870000"), -179.99999295920134, -85.0510760525731); + + //noinspection ConstantConditions + expectThrows(NullPointerException.class, () -> keyToGeoPoint(null)); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("a")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/-1/-1")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/-1/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/-1")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("a/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/a/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("0/0/a")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint("-1/0/0")); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint((MAX_ZOOM + 1) + "/0/0")); + + for (int z = 0; z <= MAX_ZOOM; z++) { + final int zoom = z; + final int max_index = (int) Math.pow(2, zoom); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint(zoom + "/0/" + max_index)); + expectThrows(IllegalArgumentException.class, () -> keyToGeoPoint(zoom + "/" + max_index + "/0")); + } + } + + /** + * Make sure that hash produces the expected key, and that the key could be converted to hash via a GeoPoint + */ + private void assertStrCodec(long hash, String key, int zoom) { + assertEquals(key, stringEncode(hash)); + final GeoPoint gp = keyToGeoPoint(key); + assertEquals(hash, longEncode(gp.lon(), gp.lat(), zoom)); + } + + /** + * A few hardcoded lat/lng/zoom hashing expectations + */ + public void testStringEncode() { + assertStrCodec(0x0000000000000000L, "0/0/0", 0); + assertStrCodec(0x3C00095540001CA5L, "15/19114/7333", 15); + assertStrCodec(0x77FFFF4580000000L, "29/536869420/0", 29); + assertStrCodec(0x740000BA7FFFFFFFL, "29/1491/536870911", 29); + assertStrCodec(0x0800000040000001L, "2/2/1", 2); + assertStrCodec(0x0C00000060000000L, "3/3/0", 3); + assertStrCodec(0x71127D27C8ACA67AL, "28/143911230/145532538", 28); + assertStrCodec(0x4C0077776003A9ACL, "19/244667/240044", 19); + assertStrCodec(0x140000024000000EL, "5/18/14", 5); + assertStrCodec(0x6436F96B60000000L, "25/28822363/0", 25); + assertStrCodec(0x6411BD6BA0A98359L, "25/9300829/11109209", 25); + assertStrCodec(0x751BD6BBCA983596L, "29/148813278/177747350", 29); + assertStrCodec(0x77CF880A20000000L, "29/511459409/0", 29); + assertStrCodec(0x7624FA4FA0000000L, "29/287822461/0", 29); + assertStrCodec(0x7624FA4FBFFFFFFFL, "29/287822461/536870911", 29); + assertStrCodec(0x0400000020000000L, "1/1/0", 1); + assertStrCodec(0x0400000020000001L, "1/1/1", 1); + + expectThrows(IllegalArgumentException.class, () -> stringEncode(-1L)); + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x7800000000000000L)); // z=30 + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x0000000000000001L)); // z=0,x=0,y=1 + expectThrows(IllegalArgumentException.class, () -> stringEncode(0x0000000020000000L)); // z=0,x=1,y=0 + + for (int zoom = 0; zoom < 5; zoom++) { + int maxTile = 1 << zoom; + for (int x = 0; x < maxTile; x++) { + for (int y = 0; y < maxTile; y++) { + String expectedTileIndex = zoom + "/" + x + "/" + y; + GeoPoint point = keyToGeoPoint(expectedTileIndex); + String actualTileIndex = stringEncode(longEncode(point.lon(), point.lat(), zoom)); + assertEquals(expectedTileIndex, actualTileIndex); + } + } + } + } + + /** + * Ensure that for all points at all supported precision levels that the long encoding of a geotile + * is compatible with its String based counterpart + */ + public void testGeoTileAsLongRoutines() { + for (double lat = -90; lat <= 90; lat++) { + for (double lng = -180; lng <= 180; lng++) { + for (int p = 0; p <= 29; p++) { + long hash = longEncode(lng, lat, p); + if (p > 0) { + assertNotEquals(0, hash); + } + + // GeoPoint would be in the center of the bucket, thus must produce the same hash + GeoPoint point = hashToGeoPoint(hash); + long hashAsLong2 = longEncode(point.lon(), point.lat(), p); + assertEquals(hash, hashAsLong2); + + // Same point should be generated from the string key + assertEquals(point, keyToGeoPoint(stringEncode(hash))); + } + } + } + } + + /** + * Make sure the polar regions are handled properly. + * Mercator projection does not show anything above 85 or below -85, + * so ensure they are clipped correctly. + */ + public void testSingularityAtPoles() { + double minLat = -85.05112878; + double maxLat = 85.05112878; + double lon = randomIntBetween(-180, 180); + double lat = randomBoolean() + ? randomDoubleBetween(-90, minLat, true) + : randomDoubleBetween(maxLat, 90, true); + double clippedLat = Math.min(Math.max(lat, minLat), maxLat); + int zoom = randomIntBetween(0, MAX_ZOOM); + String tileIndex = stringEncode(longEncode(lon, lat, zoom)); + String clippedTileIndex = stringEncode(longEncode(lon, clippedLat, zoom)); + assertEquals(tileIndex, clippedTileIndex); + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java new file mode 100644 index 0000000000000..a54155db92da1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -0,0 +1,1037 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; +import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; +import org.elasticsearch.cluster.coordination.CoordinationState; +import org.elasticsearch.cluster.coordination.Coordinator; +import org.elasticsearch.cluster.coordination.CoordinatorTests; +import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; +import org.elasticsearch.cluster.coordination.InMemoryPersistedState; +import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.elasticsearch.cluster.metadata.AliasValidator; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.indices.recovery.PeerRecoverySourceService; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.disruption.DisruptableMockTransport; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; +import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; + +public class SnapshotResiliencyTests extends ESTestCase { + + private DeterministicTaskQueue deterministicTaskQueue; + + private TestClusterNodes testClusterNodes; + + private Path tempDir; + + @Before + public void createServices() { + tempDir = createTempDir(); + deterministicTaskQueue = + new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random()); + } + + @After + public void stopServices() { + testClusterNodes.nodes.values().forEach(TestClusterNode::stop); + } + + public void testSuccessfulSnapshot() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener(() -> createdSnapshot.set(true))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + + public void testSnapshotWithNodeDisconnects() { + final int dataNodes = randomIntBetween(2, 10); + setupTestCluster(randomFrom(1, 3, 5), dataNodes); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + + final AdminClient masterAdminClient = masterNode.client.admin(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> { + for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { + scheduleNow(this::disconnectRandomDataNode); + } + if (randomBoolean()) { + scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); + } + masterAdminClient.cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener(() -> { + for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { + scheduleNow(this::disconnectOrRestartDataNode); + } + final boolean disconnectedMaster = randomBoolean(); + if (disconnectedMaster) { + scheduleNow(this::disconnectOrRestartMasterNode); + } + if (disconnectedMaster || randomBoolean()) { + scheduleSoon(() -> testClusterNodes.clearNetworkDisruptions()); + } else if (randomBoolean()) { + scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); + } + createdSnapshot.set(true); + })); + })))); + + runUntil(() -> { + final Optional randomMaster = testClusterNodes.randomMasterNode(); + if (randomMaster.isPresent()) { + final SnapshotsInProgress snapshotsInProgress = randomMaster.get().clusterService.state().custom(SnapshotsInProgress.TYPE); + return snapshotsInProgress != null && snapshotsInProgress.entries().isEmpty(); + } + return false; + }, TimeUnit.MINUTES.toMillis(1L)); + + clearDisruptionsAndAwaitSync(); + + assertTrue(createdSnapshot.get()); + final TestClusterNode randomMaster = testClusterNodes.randomMasterNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); + SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertThat(finalSnapshotsInProgress.entries(), empty()); + final Repository repository = randomMaster.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + } + + public void testConcurrentSnapshotCreateAndDelete() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener( + () -> masterNode.client.admin().cluster().deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), + assertNoFailureListener(() -> masterNode.client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName).execute( + assertNoFailureListener(() -> createdSnapshot.set(true)) + ))))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + + /** + * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently + * deleting a snapshot. + */ + public void testSnapshotPrimaryRelocations() { + final int masterNodeCount = randomFrom(1, 3, 5); + setupTestCluster(masterNodeCount, randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + final TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + final AdminClient masterAdminClient = masterNode.client.admin(); + masterAdminClient.cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterAdminClient.indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL) + .settings(defaultIndexSettings(shards)), + assertNoFailureListener( + () -> masterAdminClient.cluster().state(new ClusterStateRequest(), assertNoFailureListener( + clusterStateResponse -> { + final ShardRouting shardToRelocate = + clusterStateResponse.getState().routingTable().allShards(index).get(0); + final TestClusterNode currentPrimaryNode = + testClusterNodes.nodeById(shardToRelocate.currentNodeId()); + final TestClusterNode otherNode = + testClusterNodes.randomDataNodeSafe(currentPrimaryNode.node.getName()); + final Runnable maybeForceAllocate = new Runnable() { + @Override + public void run() { + masterAdminClient.cluster().state(new ClusterStateRequest(), assertNoFailureListener( + resp -> { + final ShardRouting shardRouting = resp.getState().routingTable() + .shardRoutingTable(shardToRelocate.shardId()).primaryShard(); + if (shardRouting.unassigned() + && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) { + if (masterNodeCount > 1) { + scheduleNow(() -> testClusterNodes.stopNode(masterNode)); + } + testClusterNodes.randomDataNodeSafe().client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .execute(ActionListener.wrap(() -> { + testClusterNodes.randomDataNodeSafe().client.admin().cluster() + .deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), noopListener()); + createdSnapshot.set(true); + })); + scheduleNow( + () -> testClusterNodes.randomMasterNodeSafe().client.admin().cluster().reroute( + new ClusterRerouteRequest().add( + new AllocateEmptyPrimaryAllocationCommand( + index, shardRouting.shardId().id(), otherNode.node.getName(), true) + ), noopListener())); + } else { + scheduleSoon(this); + } + } + )); + } + }; + scheduleNow(() -> testClusterNodes.stopNode(currentPrimaryNode)); + scheduleNow(maybeForceAllocate); + } + )))))); + + runUntil(() -> { + final Optional randomMaster = testClusterNodes.randomMasterNode(); + if (randomMaster.isPresent()) { + final SnapshotsInProgress snapshotsInProgress = + randomMaster.get().clusterService.state().custom(SnapshotsInProgress.TYPE); + return (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) && createdSnapshot.get(); + } + return false; + }, TimeUnit.MINUTES.toMillis(1L)); + + clearDisruptionsAndAwaitSync(); + + assertTrue(createdSnapshot.get()); + final SnapshotsInProgress finalSnapshotsInProgress = testClusterNodes.randomDataNodeSafe() + .clusterService.state().custom(SnapshotsInProgress.TYPE); + assertThat(finalSnapshotsInProgress.entries(), empty()); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); + } + + private void clearDisruptionsAndAwaitSync() { + testClusterNodes.clearNetworkDisruptions(); + runUntil(() -> { + final List versions = testClusterNodes.nodes.values().stream() + .map(n -> n.clusterService.state().version()).distinct().collect(Collectors.toList()); + return versions.size() == 1L; + }, TimeUnit.MINUTES.toMillis(1L)); + } + + private void disconnectOrRestartDataNode() { + if (randomBoolean()) { + disconnectRandomDataNode(); + } else { + testClusterNodes.randomDataNode().ifPresent(TestClusterNode::restart); + } + } + + private void disconnectOrRestartMasterNode() { + testClusterNodes.randomMasterNode().ifPresent(masterNode -> { + if (randomBoolean()) { + testClusterNodes.disconnectNode(masterNode); + } else { + masterNode.restart(); + } + }); + } + + private void disconnectRandomDataNode() { + testClusterNodes.randomDataNode().ifPresent(n -> testClusterNodes.disconnectNode(n)); + } + + private void startCluster() { + final ClusterState initialClusterState = + new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.discoveryNodes()).build(); + testClusterNodes.nodes.values().forEach(testClusterNode -> testClusterNode.start(initialClusterState)); + + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + + final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) + .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); + testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( + testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); + + runUntil( + () -> { + List masterNodeIds = testClusterNodes.nodes.values().stream() + .map(node -> node.clusterService.state().nodes().getMasterNodeId()) + .distinct().collect(Collectors.toList()); + return masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false; + }, + TimeUnit.SECONDS.toMillis(30L) + ); + } + + private void runUntil(Supplier fulfilled, long timeout) { + final long start = deterministicTaskQueue.getCurrentTimeMillis(); + while (timeout > deterministicTaskQueue.getCurrentTimeMillis() - start) { + if (fulfilled.get()) { + return; + } + deterministicTaskQueue.runAllRunnableTasks(); + deterministicTaskQueue.advanceTime(); + } + fail("Condition wasn't fulfilled."); + } + + private void setupTestCluster(int masterNodes, int dataNodes) { + testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); + startCluster(); + } + + private void scheduleSoon(Runnable runnable) { + deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + randomLongBetween(0, 100L), runnable); + } + + private void scheduleNow(Runnable runnable) { + deterministicTaskQueue.scheduleNow(runnable); + } + + private static Settings defaultIndexSettings(int shards) { + // TODO: randomize replica count settings once recovery operations aren't blocking anymore + return Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).build(); + } + + private static ActionListener assertNoFailureListener(Consumer consumer) { + return new ActionListener() { + @Override + public void onResponse(final T t) { + consumer.accept(t); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }; + } + + private static ActionListener assertNoFailureListener(Runnable r) { + return new ActionListener() { + @Override + public void onResponse(final T t) { + r.run(); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }; + } + + private static ActionListener noopListener() { + return new ActionListener() { + @Override + public void onResponse(final T t) { + } + + @Override + public void onFailure(final Exception e) { + } + }; + } + + /** + * Create a {@link Environment} with random path.home and path.repo + **/ + private Environment createEnvironment(String nodeName) { + return TestEnvironment.newEnvironment(Settings.builder() + .put(NODE_NAME_SETTING.getKey(), nodeName) + .put(PATH_HOME_SETTING.getKey(), tempDir.resolve(nodeName).toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), + ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)) + .build()); + } + + private static ClusterState stateForNode(ClusterState state, DiscoveryNode node) { + return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); + } + + private final class TestClusterNodes { + + // LinkedHashMap so we have deterministic ordering when iterating over the map in tests + private final Map nodes = new LinkedHashMap<>(); + + private DisconnectedNodes disruptedLinks = new DisconnectedNodes(); + + TestClusterNodes(int masterNodes, int dataNodes) { + for (int i = 0; i < masterNodes; ++i) { + nodes.computeIfAbsent("node" + i, nodeName -> { + try { + return newMasterNode(nodeName); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + for (int i = 0; i < dataNodes; ++i) { + nodes.computeIfAbsent("data-node" + i, nodeName -> { + try { + return newDataNode(nodeName); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + } + + public TestClusterNode nodeById(final String nodeId) { + return nodes.values().stream().filter(n -> n.node.getId().equals(nodeId)).findFirst() + .orElseThrow(() -> new AssertionError("Could not find node by id [" + nodeId + ']')); + } + + private TestClusterNode newMasterNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNode.Role.MASTER); + } + + private TestClusterNode newDataNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNode.Role.DATA); + } + + private TestClusterNode newNode(String nodeName, DiscoveryNode.Role role) throws IOException { + return new TestClusterNode( + new DiscoveryNode(nodeName, randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), + Collections.singleton(role), Version.CURRENT), this::getDisruption); + } + + public TestClusterNode randomMasterNodeSafe() { + return randomMasterNode().orElseThrow(() -> new AssertionError("Expected to find at least one connected master node")); + } + + public Optional randomMasterNode() { + // Select from sorted list of data-nodes here to not have deterministic behaviour + final List masterNodes = testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) + .sorted(Comparator.comparing(n -> n.node.getName())).collect(Collectors.toList()); + return masterNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(masterNodes)); + } + + public void stopNode(TestClusterNode node) { + node.stop(); + nodes.remove(node.node.getName()); + } + + public TestClusterNode randomDataNodeSafe(String... excludedNames) { + return randomDataNode(excludedNames).orElseThrow(() -> new AssertionError("Could not find another data node.")); + } + + public Optional randomDataNode(String... excludedNames) { + // Select from sorted list of data-nodes here to not have deterministic behaviour + final List dataNodes = testClusterNodes.nodes.values().stream().filter(n -> n.node.isDataNode()) + .filter(n -> { + for (final String nodeName : excludedNames) { + if (n.node.getName().equals(nodeName)) { + return false; + } + } + return true; + }) + .sorted(Comparator.comparing(n -> n.node.getName())).collect(Collectors.toList()); + return dataNodes.isEmpty() ? Optional.empty() : Optional.ofNullable(randomFrom(dataNodes)); + } + + public void disconnectNode(TestClusterNode node) { + if (disruptedLinks.disconnected.contains(node.node.getName())) { + return; + } + testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().disconnectFromNode(node.node)); + disruptedLinks.disconnect(node.node.getName()); + } + + public void clearNetworkDisruptions() { + disruptedLinks.disconnected.forEach(nodeName -> { + if (testClusterNodes.nodes.containsKey(nodeName)) { + final DiscoveryNode node = testClusterNodes.nodes.get(nodeName).node; + testClusterNodes.nodes.values().forEach(n -> n.transportService.getConnectionManager().openConnection(node, null)); + } + }); + disruptedLinks.clear(); + } + + private NetworkDisruption.DisruptedLinks getDisruption() { + return disruptedLinks; + } + + /** + * Builds a {@link DiscoveryNodes} instance that holds the nodes in this test cluster. + * @return DiscoveryNodes + */ + public DiscoveryNodes discoveryNodes() { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + nodes.values().forEach(node -> builder.add(node.node)); + return builder.build(); + } + + /** + * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. + * @param state ClusterState + * @return Master Node + */ + public TestClusterNode currentMaster(ClusterState state) { + TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); + assertNotNull(master); + assertTrue(master.node.isMasterNode()); + return master; + } + } + + private final class TestClusterNode { + + private final Logger logger = LogManager.getLogger(TestClusterNode.class); + + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Stream.concat( + ClusterModule.getNamedWriteables().stream(), NetworkModule.getNamedWriteables().stream()).collect(Collectors.toList())); + + private final TransportService transportService; + + private final ClusterService clusterService; + + private final RepositoriesService repositoriesService; + + private final SnapshotsService snapshotsService; + + private final SnapshotShardsService snapshotShardsService; + + private final IndicesService indicesService; + + private final IndicesClusterStateService indicesClusterStateService; + + private final DiscoveryNode node; + + private final MasterService masterService; + + private final AllocationService allocationService; + + private final NodeClient client; + + private final NodeEnvironment nodeEnv; + + private final DisruptableMockTransport mockTransport; + + private final ThreadPool threadPool; + + private final Supplier disruption; + + private Coordinator coordinator; + + TestClusterNode(DiscoveryNode node, Supplier disruption) throws IOException { + this.disruption = disruption; + this.node = node; + final Environment environment = createEnvironment(node.getName()); + masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow); + final Settings settings = environment.settings(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool = deterministicTaskQueue.getThreadPool(); + clusterService = new ClusterService(settings, clusterSettings, masterService, + new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { + @Override + protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { + return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue); + } + }); + mockTransport = new DisruptableMockTransport(node, logger) { + @Override + protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { + return disruption.get().disrupt(node.getName(), destination.getName()) + ? ConnectionStatus.DISCONNECTED : ConnectionStatus.CONNECTED; + } + + @Override + protected Optional getDisruptableMockTransport(TransportAddress address) { + return testClusterNodes.nodes.values().stream().map(cn -> cn.mockTransport) + .filter(transport -> transport.getLocalNode().getAddress().equals(address)) + .findAny(); + } + + @Override + protected void execute(Runnable runnable) { + scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable)); + } + + @Override + protected NamedWriteableRegistry writeableRegistry() { + return namedWriteableRegistry; + } + }; + transportService = mockTransport.createTransportService( + settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)), + new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, TransportRequestHandler actualHandler) { + // TODO: Remove this hack once recoveries are async and can be used in these tests + if (action.startsWith("internal:index/shard/recovery")) { + return (request, channel, task) -> scheduleSoon( + new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + channel.sendResponse(new TransportException(new IOException("failed to recover shard"))); + } + + @Override + public void onFailure(final Exception e) { + throw new AssertionError(e); + } + }); + } else { + return actualHandler; + } + } + }, + a -> node, null, emptySet() + ); + final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + repositoriesService = new RepositoriesService( + settings, clusterService, transportService, + Collections.singletonMap(FsRepository.TYPE, metaData -> { + final Repository repository = new FsRepository(metaData, environment, xContentRegistry()) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo in the test thread + } + }; + repository.start(); + return repository; + } + ), + emptyMap(), + threadPool + ); + snapshotsService = + new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool); + nodeEnv = new NodeEnvironment(settings, environment); + final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()); + final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap()); + client = new NodeClient(settings, threadPool); + allocationService = ESAllocationTestCase.createAllocationService(settings); + final IndexScopedSettings indexScopedSettings = + new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap(), emptyMap()), + indexNameExpressionResolver, + new MapperRegistry(emptyMap(), emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + new BigArrays(new PageCacheRecycler(settings), null, "test"), + scriptService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), + emptyMap() + ); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + final ActionFilters actionFilters = new ActionFilters(emptySet()); + snapshotShardsService = new SnapshotShardsService( + settings, clusterService, snapshotsService, threadPool, + transportService, indicesService, actionFilters, indexNameExpressionResolver); + final ShardStateAction shardStateAction = new ShardStateAction( + clusterService, transportService, allocationService, + new RoutingService(clusterService, allocationService), + threadPool + ); + indicesClusterStateService = new IndicesClusterStateService( + settings, + indicesService, + clusterService, + threadPool, + new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService), + shardStateAction, + new NodeMappingRefreshAction(transportService, new MetaDataMappingService(clusterService, indicesService)), + repositoriesService, + mock(SearchService.class), + new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), + new PeerRecoverySourceService(transportService, indicesService, recoverySettings), + snapshotShardsService, + new PrimaryReplicaSyncer( + transportService, + new TransportResyncReplicationAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver)), + new GlobalCheckpointSyncAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver), + new RetentionLeaseSyncAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver)); + Map actions = new HashMap<>(); + actions.put(CreateIndexAction.INSTANCE, + new TransportCreateIndexAction( + transportService, clusterService, threadPool, + new MetaDataCreateIndexService(settings, clusterService, indicesService, + allocationService, new AliasValidator(), environment, indexScopedSettings, + threadPool, namedXContentRegistry, false), + actionFilters, indexNameExpressionResolver + )); + actions.put(PutRepositoryAction.INSTANCE, + new TransportPutRepositoryAction( + transportService, clusterService, repositoriesService, threadPool, + actionFilters, indexNameExpressionResolver + )); + actions.put(CreateSnapshotAction.INSTANCE, + new TransportCreateSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); + actions.put(ClusterRerouteAction.INSTANCE, + new TransportClusterRerouteAction(transportService, clusterService, threadPool, allocationService, + actionFilters, indexNameExpressionResolver)); + actions.put(ClusterStateAction.INSTANCE, + new TransportClusterStateAction(transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver)); + actions.put(IndicesShardStoresAction.INSTANCE, + new TransportIndicesShardStoresAction( + transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + new TransportNodesListGatewayStartedShards(settings, + threadPool, clusterService, transportService, actionFilters, nodeEnv, indicesService, namedXContentRegistry)) + ); + actions.put(DeleteSnapshotAction.INSTANCE, + new TransportDeleteSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); + client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + } + + public void restart() { + testClusterNodes.disconnectNode(this); + final ClusterState oldState = this.clusterService.state(); + stop(); + testClusterNodes.nodes.remove(node.getName()); + scheduleSoon(() -> { + try { + final TestClusterNode restartedNode = new TestClusterNode( + new DiscoveryNode(node.getName(), node.getId(), node.getAddress(), emptyMap(), + node.getRoles(), Version.CURRENT), disruption); + testClusterNodes.nodes.put(node.getName(), restartedNode); + restartedNode.start(oldState); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void stop() { + testClusterNodes.disconnectNode(this); + indicesService.close(); + clusterService.close(); + indicesClusterStateService.close(); + if (coordinator != null) { + coordinator.close(); + } + nodeEnv.close(); + } + + public void start(ClusterState initialState) { + transportService.start(); + transportService.acceptIncomingRequests(); + snapshotsService.start(); + snapshotShardsService.start(); + final CoordinationState.PersistedState persistedState = + new InMemoryPersistedState(initialState.term(), stateForNode(initialState, node)); + coordinator = new Coordinator(node.getName(), clusterService.getSettings(), + clusterService.getClusterSettings(), transportService, namedWriteableRegistry, + allocationService, masterService, () -> persistedState, + hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) + .map(n -> n.node.getAddress()).collect(Collectors.toList()), + clusterService.getClusterApplierService(), Collections.emptyList(), random()); + masterService.setClusterStatePublisher(coordinator); + coordinator.start(); + masterService.start(); + clusterService.getClusterApplierService().setNodeConnectionsService( + new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) { + @Override + public void connectToNodes(DiscoveryNodes discoveryNodes) { + // override this method as it does blocking calls + boolean callSuper = true; + for (final DiscoveryNode node : discoveryNodes) { + try { + transportService.connectToNode(node); + } catch (Exception e) { + callSuper = false; + } + } + if (callSuper) { + super.connectToNodes(discoveryNodes); + } + } + }); + clusterService.getClusterApplierService().start(); + indicesService.start(); + indicesClusterStateService.start(); + coordinator.startInitialJoin(); + } + } + + private final class DisconnectedNodes extends NetworkDisruption.DisruptedLinks { + + /** + * Node names that are disconnected from all other nodes. + */ + private final Set disconnected = new HashSet<>(); + + @Override + public boolean disrupt(String node1, String node2) { + if (node1.equals(node2)) { + return false; + } + // Check if both nodes are still part of the cluster + if (testClusterNodes.nodes.containsKey(node1) == false + || testClusterNodes.nodes.containsKey(node2) == false) { + return true; + } + return disconnected.contains(node1) || disconnected.contains(node2); + } + + public void disconnect(String node) { + disconnected.add(node); + } + + public void clear() { + disconnected.clear(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java deleted file mode 100644 index 8b750939238cb..0000000000000 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.snapshots; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.resync.TransportResyncReplicationAction; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.NodeConnectionsService; -import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.coordination.CoordinationState; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.CoordinatorTests; -import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.metadata.AliasValidator; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; -import org.elasticsearch.cluster.metadata.MetaDataMappingService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterApplierService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.gateway.MetaStateService; -import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; -import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; -import org.elasticsearch.index.shard.PrimaryReplicaSyncer; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; -import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.indices.recovery.PeerRecoverySourceService; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchService; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.disruption.DisruptableMockTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Path; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; -import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.hasSize; -import static org.mockito.Mockito.mock; - -public class SnapshotsServiceTests extends ESTestCase { - - private DeterministicTaskQueue deterministicTaskQueue; - - private TestClusterNodes testClusterNodes; - - private Path tempDir; - - @Before - public void createServices() { - tempDir = createTempDir(); - deterministicTaskQueue = - new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "shared").build(), random()); - } - - @After - public void stopServices() { - testClusterNodes.nodes.values().forEach( - n -> { - n.indicesService.close(); - n.clusterService.close(); - n.indicesClusterStateService.close(); - n.nodeEnv.close(); - n.coordinator.close(); - } - ); - } - - public void testSuccessfulSnapshot() { - setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); - - String repoName = "repo"; - String snapshotName = "snapshot"; - final String index = "test"; - - final int shards = randomIntBetween(1, 10); - - TestClusterNode masterNode = - testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); - final AtomicBoolean createdSnapshot = new AtomicBoolean(); - masterNode.client.admin().cluster().preparePutRepository(repoName) - .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute( - assertNoFailureListener( - () -> masterNode.client.admin().indices().create( - new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( - Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), - assertNoFailureListener( - () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) - .execute(assertNoFailureListener(() -> createdSnapshot.set(true))))))); - - deterministicTaskQueue.runAllRunnableTasks(); - - assertTrue(createdSnapshot.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); - assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); - Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); - assertThat(snapshotIds, hasSize(1)); - - final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); - assertEquals(shards, snapshotInfo.successfulShards()); - assertEquals(0, snapshotInfo.failedShards()); - } - - public void testConcurrentSnapshotCreateAndDelete() { - setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); - - String repoName = "repo"; - String snapshotName = "snapshot"; - final String index = "test"; - - final int shards = randomIntBetween(1, 10); - - TestClusterNode masterNode = - testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); - final AtomicBoolean createdSnapshot = new AtomicBoolean(); - masterNode.client.admin().cluster().preparePutRepository(repoName) - .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute( - assertNoFailureListener( - () -> masterNode.client.admin().indices().create( - new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( - Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), - assertNoFailureListener( - () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) - .execute(assertNoFailureListener( - () -> masterNode.client.admin().cluster().deleteSnapshot( - new DeleteSnapshotRequest(repoName, snapshotName), - assertNoFailureListener(() -> masterNode.client.admin().cluster() - .prepareCreateSnapshot(repoName, snapshotName).execute( - assertNoFailureListener(() -> createdSnapshot.set(true)) - ))))))))); - - deterministicTaskQueue.runAllRunnableTasks(); - - assertTrue(createdSnapshot.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); - assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); - Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); - assertThat(snapshotIds, hasSize(1)); - - final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); - assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); - assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); - assertEquals(shards, snapshotInfo.successfulShards()); - assertEquals(0, snapshotInfo.failedShards()); - } - - private void startCluster() { - final ClusterState initialClusterState = - new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.randomDiscoveryNodes()).build(); - testClusterNodes.nodes.values().forEach(testClusterNode -> testClusterNode.start(initialClusterState)); - - deterministicTaskQueue.advanceTime(); - deterministicTaskQueue.runAllRunnableTasks(); - - final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) - .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); - testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( - testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); - - runUntil( - () -> { - List masterNodeIds = testClusterNodes.nodes.values().stream() - .map(node -> node.clusterService.state().nodes().getMasterNodeId()) - .distinct().collect(Collectors.toList()); - return masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false; - }, - TimeUnit.SECONDS.toMillis(30L) - ); - } - - private void runUntil(Supplier fulfilled, long timeout) { - final long start = deterministicTaskQueue.getCurrentTimeMillis(); - while (timeout > deterministicTaskQueue.getCurrentTimeMillis() - start) { - deterministicTaskQueue.runAllRunnableTasks(); - if (fulfilled.get()) { - return; - } - deterministicTaskQueue.advanceTime(); - } - fail("Condition wasn't fulfilled."); - } - - private void setupTestCluster(int masterNodes, int dataNodes) { - testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); - startCluster(); - } - - private static ActionListener assertNoFailureListener(Runnable r) { - return new ActionListener() { - @Override - public void onResponse(final T t) { - r.run(); - } - - @Override - public void onFailure(final Exception e) { - throw new AssertionError(e); - } - }; - } - - /** - * Create a {@link Environment} with random path.home and path.repo - **/ - private Environment createEnvironment(String nodeName) { - return TestEnvironment.newEnvironment(Settings.builder() - .put(NODE_NAME_SETTING.getKey(), nodeName) - .put(PATH_HOME_SETTING.getKey(), tempDir.resolve(nodeName).toAbsolutePath()) - .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo").toAbsolutePath()) - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)) - .build()); - } - - private TestClusterNode newMasterNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNode.Role.MASTER); - } - - private TestClusterNode newDataNode(String nodeName) throws IOException { - return newNode(nodeName, DiscoveryNode.Role.DATA); - } - - private TestClusterNode newNode(String nodeName, DiscoveryNode.Role role) throws IOException { - return new TestClusterNode( - new DiscoveryNode(nodeName, randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - Collections.singleton(role), Version.CURRENT) - ); - } - - private static ClusterState stateForNode(ClusterState state, DiscoveryNode node) { - return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); - } - - private final class TestClusterNodes { - - // LinkedHashMap so we have deterministic ordering when iterating over the map in tests - private final Map nodes = new LinkedHashMap<>(); - - TestClusterNodes(int masterNodes, int dataNodes) { - for (int i = 0; i < masterNodes; ++i) { - nodes.computeIfAbsent("node" + i, nodeName -> { - try { - return SnapshotsServiceTests.this.newMasterNode(nodeName); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - for (int i = 0; i < dataNodes; ++i) { - nodes.computeIfAbsent("data-node" + i, nodeName -> { - try { - return SnapshotsServiceTests.this.newDataNode(nodeName); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } - } - - /** - * Builds a {@link DiscoveryNodes} instance that has one master eligible node set as its master - * by random. - * @return DiscoveryNodes with set master node - */ - public DiscoveryNodes randomDiscoveryNodes() { - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - nodes.values().forEach(node -> builder.add(node.node)); - return builder.build(); - } - - /** - * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. - * @param state ClusterState - * @return Master Node - */ - public TestClusterNode currentMaster(ClusterState state) { - TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); - assertNotNull(master); - assertTrue(master.node.isMasterNode()); - return master; - } - } - - private final class TestClusterNode { - - private final Logger logger = LogManager.getLogger(TestClusterNode.class); - - private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - - private final TransportService transportService; - - private final ClusterService clusterService; - - private final RepositoriesService repositoriesService; - - private final SnapshotsService snapshotsService; - - private final SnapshotShardsService snapshotShardsService; - - private final IndicesService indicesService; - - private final IndicesClusterStateService indicesClusterStateService; - - private final DiscoveryNode node; - - private final MasterService masterService; - - private final AllocationService allocationService; - - private final NodeClient client; - - private final NodeEnvironment nodeEnv; - - private final DisruptableMockTransport mockTransport; - - private final ThreadPool threadPool; - - private Coordinator coordinator; - - TestClusterNode(DiscoveryNode node) throws IOException { - this.node = node; - final Environment environment = createEnvironment(node.getName()); - masterService = new FakeThreadPoolMasterService(node.getName(), "test", deterministicTaskQueue::scheduleNow); - final Settings settings = environment.settings(); - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool = deterministicTaskQueue.getThreadPool(); - clusterService = new ClusterService(settings, clusterSettings, masterService, - new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { - @Override - protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { - return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue); - } - }); - mockTransport = new DisruptableMockTransport(node, logger) { - @Override - protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { - return ConnectionStatus.CONNECTED; - } - - @Override - protected Optional getDisruptableMockTransport(TransportAddress address) { - return testClusterNodes.nodes.values().stream().map(cn -> cn.mockTransport) - .filter(transport -> transport.getLocalNode().getAddress().equals(address)) - .findAny(); - } - - @Override - protected void execute(Runnable runnable) { - deterministicTaskQueue.scheduleNow(CoordinatorTests.onNodeLog(getLocalNode(), runnable)); - } - }; - transportService = mockTransport.createTransportService( - settings, deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)), - NOOP_TRANSPORT_INTERCEPTOR, - a -> node, null, emptySet() - ); - final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - repositoriesService = new RepositoriesService( - settings, clusterService, transportService, - Collections.singletonMap(FsRepository.TYPE, metaData -> { - final Repository repository = new FsRepository(metaData, environment, xContentRegistry()) { - @Override - protected void assertSnapshotOrGenericThread() { - // eliminate thread name check as we create repo in the test thread - } - }; - repository.start(); - return repository; - } - ), - emptyMap(), - threadPool - ); - snapshotsService = - new SnapshotsService(settings, clusterService, indexNameExpressionResolver, repositoriesService, threadPool); - nodeEnv = new NodeEnvironment(settings, environment); - final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()); - final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap()); - client = new NodeClient(settings, threadPool); - allocationService = ESAllocationTestCase.createAllocationService(settings); - final IndexScopedSettings indexScopedSettings = - new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap(), emptyMap()), - indexNameExpressionResolver, - new MapperRegistry(emptyMap(), emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - new BigArrays(new PageCacheRecycler(settings), null, "test"), - scriptService, - client, - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap() - ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); - final ActionFilters actionFilters = new ActionFilters(emptySet()); - snapshotShardsService = new SnapshotShardsService( - settings, clusterService, snapshotsService, threadPool, - transportService, indicesService, actionFilters, indexNameExpressionResolver); - final ShardStateAction shardStateAction = new ShardStateAction( - clusterService, transportService, allocationService, - new RoutingService(clusterService, allocationService), - deterministicTaskQueue.getThreadPool() - ); - indicesClusterStateService = new IndicesClusterStateService( - settings, - indicesService, - clusterService, - threadPool, - new PeerRecoveryTargetService( - deterministicTaskQueue.getThreadPool(), transportService, recoverySettings, clusterService), - shardStateAction, - new NodeMappingRefreshAction(transportService, new MetaDataMappingService(clusterService, indicesService)), - repositoriesService, - mock(SearchService.class), - new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), - new PeerRecoverySourceService(transportService, indicesService, recoverySettings), - snapshotShardsService, - new PrimaryReplicaSyncer( - transportService, - new TransportResyncReplicationAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver)), - new GlobalCheckpointSyncAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver), - new RetentionLeaseSyncAction( - settings, - transportService, - clusterService, - indicesService, - threadPool, - shardStateAction, - actionFilters, - indexNameExpressionResolver)); - Map actions = new HashMap<>(); - actions.put(CreateIndexAction.INSTANCE, - new TransportCreateIndexAction( - transportService, clusterService, threadPool, - new MetaDataCreateIndexService(settings, clusterService, indicesService, - allocationService, new AliasValidator(), environment, indexScopedSettings, - threadPool, namedXContentRegistry, false), - actionFilters, indexNameExpressionResolver - )); - actions.put(PutRepositoryAction.INSTANCE, - new TransportPutRepositoryAction( - transportService, clusterService, repositoriesService, threadPool, - actionFilters, indexNameExpressionResolver - )); - actions.put(CreateSnapshotAction.INSTANCE, - new TransportCreateSnapshotAction( - transportService, clusterService, threadPool, - snapshotsService, actionFilters, indexNameExpressionResolver - )); - actions.put(DeleteSnapshotAction.INSTANCE, - new TransportDeleteSnapshotAction( - transportService, clusterService, threadPool, - snapshotsService, actionFilters, indexNameExpressionResolver - )); - client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); - } - - public void start(ClusterState initialState) { - transportService.start(); - transportService.acceptIncomingRequests(); - snapshotsService.start(); - snapshotShardsService.start(); - final CoordinationState.PersistedState persistedState = - new InMemoryPersistedState(0L, stateForNode(initialState, node)); - coordinator = new Coordinator(node.getName(), clusterService.getSettings(), - clusterService.getClusterSettings(), transportService, namedWriteableRegistry, - allocationService, masterService, () -> persistedState, - hostsResolver -> testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) - .map(n -> n.node.getAddress()).collect(Collectors.toList()), - clusterService.getClusterApplierService(), Collections.emptyList(), random()); - masterService.setClusterStatePublisher(coordinator); - coordinator.start(); - masterService.start(); - clusterService.getClusterApplierService().setNodeConnectionsService( - new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) { - @Override - public void connectToNodes(DiscoveryNodes discoveryNodes) { - // override this method as it does blocking calls - for (final DiscoveryNode node : discoveryNodes) { - transportService.connectToNode(node); - } - super.connectToNodes(discoveryNodes); - } - }); - clusterService.getClusterApplierService().start(); - indicesService.start(); - indicesClusterStateService.start(); - coordinator.startInitialJoin(); - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 45f8682dc5e61..0dfdd2505235a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1942,11 +1942,6 @@ public Settings nodeSettings(int nodeOrdinal) { .put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - return ESIntegTestCase.this.addExtraClusterBootstrapSettings(allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return ESIntegTestCase.this.nodeConfigPath(nodeOrdinal); @@ -1975,18 +1970,6 @@ public Collection> transportClientPlugins() { }; } - /** - * This method is called before starting a collection of nodes. - * At this point the test has a holistic view on all nodes settings and might perform settings adjustments as needed. - * For instance, the test could retrieve master node names and fill in - * {@link org.elasticsearch.cluster.coordination.ClusterBootstrapService#INITIAL_MASTER_NODES_SETTING} setting. - * - * @param allNodesSettings list of node settings before update - * @return list of node settings after update - */ - protected List addExtraClusterBootstrapSettings(List allNodesSettings) { - return allNodesSettings; - } /** * Iff this returns true mock transport implementations are used for the test runs. Otherwise not mock transport impls are used. @@ -2214,6 +2197,9 @@ public final void cleanUpCluster() throws Exception { // Deleting indices is going to clear search contexts implicitly so we // need to check that there are no more in-flight search contexts before // we remove indices + if (isInternalCluster()) { + internalCluster().setBootstrapMasterNodeIndex(-1); + } super.ensureAllSearchContextsReleased(); if (runTestScopeLifecycle()) { printTestMessage("cleaning up after"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index fd560af806066..f9d72e38044fb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -51,6 +51,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -212,6 +214,7 @@ public abstract class InternalAggregationTestCase map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 9313d9389d49c..5e75a50bef4d9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; @@ -243,6 +244,8 @@ public final class InternalTestCluster extends TestCluster { // If set to true only the first node in the cluster will be made a unicast node private boolean hostsListContainsOnlyFirstNode; + private int bootstrapMasterNodeIndex = -1; + public InternalTestCluster( final long clusterSeed, final Path baseDir, @@ -400,6 +403,22 @@ public InternalTestCluster( EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY)); } + public int getBootstrapMasterNodeIndex() { + return bootstrapMasterNodeIndex; + } + + /** + * Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)} + * for the description of how this field is used. + * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMinMasterNodes is false. + */ + public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { + if (autoManageMinMasterNodes && bootstrapMasterNodeIndex != -1) { + throw new AssertionError("bootstrapMasterNodeIndex should be -1 if autoManageMinMasterNodes is true"); + } + this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; + } + @Override public String getClusterName() { return clusterName; @@ -1146,7 +1165,7 @@ private synchronized void reset(boolean wipeData) throws IOException { settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); } - int bootstrapNodeIndex = -1; + int autoBootstrapMasterNodeIndex = -1; final List masterNodeNames = settings.stream() .filter(Node.NODE_MASTER_SETTING::get) .map(Node.NODE_NAME_SETTING::get) @@ -1154,17 +1173,17 @@ private synchronized void reset(boolean wipeData) throws IOException { if (prevNodeCount == 0 && autoManageMinMasterNodes) { if (numSharedDedicatedMasterNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); + autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); } else if (numSharedDataNodes > 0) { - bootstrapNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); + autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDataNodes - 1); } } - final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + final List updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); for (int i = 0; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { Settings nodeSettings = updatedSettings.get(i); - if (i == bootstrapNodeIndex) { + if (i == autoBootstrapMasterNodeIndex) { nodeSettings = Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), masterNodeNames).put(nodeSettings).build(); } final NodeAndClient nodeAndClient = buildNode(i, nodeSettings, true, onTransportServiceStarted); @@ -1944,6 +1963,54 @@ public synchronized Set nodesInclude(String index) { return Collections.emptySet(); } + /** + * Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started + * with the names of all existing and new master-eligible nodes. + * Indexing starts from 0. + * If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing. + */ + private List bootstrapMasterNodeWithSpecifiedIndex(List allNodesSettings) { + if (getBootstrapMasterNodeIndex() == -1) { // fast-path + return allNodesSettings; + } + + int currentNodeId = numMasterNodes() - 1; + List newSettings = new ArrayList<>(); + + for (Settings settings : allNodesSettings) { + if (Node.NODE_MASTER_SETTING.get(settings) == false) { + newSettings.add(settings); + } else { + currentNodeId++; + if (currentNodeId != bootstrapMasterNodeIndex) { + newSettings.add(settings); + } else { + List nodeNames = new ArrayList<>(); + + for (Settings nodeSettings : getDataOrMasterNodeInstances(Settings.class)) { + if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { + nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); + } + } + + for (Settings nodeSettings : allNodesSettings) { + if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { + nodeNames.add(Node.NODE_NAME_SETTING.get(nodeSettings)); + } + } + + newSettings.add(Settings.builder().put(settings) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) + .build()); + + setBootstrapMasterNodeIndex(-1); + } + } + } + + return newSettings; + } + /** * Starts a node with default settings and returns its name. */ @@ -1992,7 +2059,7 @@ public synchronized List startNodes(Settings... extraSettings) { } final List nodes = new ArrayList<>(); final int prevMasterCount = getMasterNodesCount(); - int bootstrapMasterNodeIndex = + int autoBootstrapMasterNodeIndex = prevMasterCount == 0 && autoManageMinMasterNodes && newMasterCount > 0 && Arrays.stream(extraSettings) .allMatch(s -> Node.NODE_MASTER_SETTING.get(s) == false || TestZenDiscovery.USE_ZEN2.get(s) == true) ? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1) : -1; @@ -2010,16 +2077,16 @@ public synchronized List startNodes(Settings... extraSettings) { .map(Node.NODE_NAME_SETTING::get) .collect(Collectors.toList()); - final List updatedSettings = nodeConfigurationSource.addExtraClusterBootstrapSettings(settings); + final List updatedSettings = bootstrapMasterNodeWithSpecifiedIndex(settings); for (int i = 0; i < numOfNodes; i++) { final Settings nodeSettings = updatedSettings.get(i); final Builder builder = Settings.builder(); if (Node.NODE_MASTER_SETTING.get(nodeSettings)) { - if (bootstrapMasterNodeIndex == 0) { + if (autoBootstrapMasterNodeIndex == 0) { builder.putList(INITIAL_MASTER_NODES_SETTING.getKey(), initialMasterNodes); } - bootstrapMasterNodeIndex -= 1; + autoBootstrapMasterNodeIndex -= 1; } final NodeAndClient nodeAndClient = diff --git a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java index 5ed21d64c6890..60c69bbd6c652 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java +++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -24,7 +24,6 @@ import java.nio.file.Path; import java.util.Collection; import java.util.Collections; -import java.util.List; public abstract class NodeConfigurationSource { @@ -52,10 +51,6 @@ public Settings transportClientSettings() { public abstract Path nodeConfigPath(int nodeOrdinal); - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - return allNodesSettings; - } - /** Returns plugins that should be loaded on the node */ public Collection> nodePlugins() { return Collections.emptyList(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java index 2a1101c6d7986..d750a8256b8bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java @@ -21,10 +21,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -72,7 +70,6 @@ protected final void execute(String action, Runnable runnable) { if (action.equals(HANDSHAKE_ACTION_NAME)) { runnable.run(); } else { - execute(runnable); } } @@ -254,10 +251,6 @@ public String toString() { } } - private NamedWriteableRegistry writeableRegistry() { - return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - } - public enum ConnectionStatus { CONNECTED, DISCONNECTED, // network requests to or from this node throw a ConnectTransportException diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index ddfcc29c750ce..a6dbd1561936e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.transport; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; @@ -29,6 +30,8 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -96,7 +99,8 @@ public void handleResponse(final long reque final Response deliveredResponse; try (BytesStreamOutput output = new BytesStreamOutput()) { response.writeTo(output); - deliveredResponse = transportResponseHandler.read(output.bytes().streamInput()); + deliveredResponse = transportResponseHandler.read( + new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writeableRegistry())); } catch (IOException | UnsupportedOperationException e) { throw new AssertionError("failed to serialize/deserialize response " + response, e); } @@ -275,4 +279,8 @@ public boolean removeMessageListener(TransportMessageListener listener) { } return false; } + + protected NamedWriteableRegistry writeableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index ca2fe8c753e44..b48c9b9ddcf18 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -56,15 +56,11 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.IntStream; -import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST; import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; -import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; @@ -144,21 +140,23 @@ public void testBeforeTest() throws Exception { final boolean masterNodes; final int minNumDataNodes; final int maxNumDataNodes; + final int bootstrapMasterNodeIndex; if (autoManageMinMasterNodes) { masterNodes = randomBoolean(); minNumDataNodes = randomIntBetween(0, 3); maxNumDataNodes = randomIntBetween(minNumDataNodes, 4); + bootstrapMasterNodeIndex = -1; } else { // if we manage min master nodes, we need to lock down the number of nodes minNumDataNodes = randomIntBetween(0, 4); maxNumDataNodes = minNumDataNodes; masterNodes = false; + bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); } final int numClientNodes = randomIntBetween(0, 2); final String clusterName1 = "shared1"; final String clusterName2 = "shared2"; String transportClient = getTestTransportType(); - final long bootstrapNodeSelectionSeed = randomLong(); NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { @@ -176,14 +174,6 @@ public Settings nodeSettings(int nodeOrdinal) { return settings.build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (autoManageMinMasterNodes) { - return allNodesSettings; - } - return addBootstrapConfiguration(new Random(bootstrapNodeSelectionSeed), allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return null; @@ -202,9 +192,12 @@ public Settings transportClientSettings() { InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); + cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); + InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); + cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -231,19 +224,6 @@ public Settings transportClientSettings() { } } - private static List addBootstrapConfiguration(Random random, List allNodesSettings) { - final List updatedSettings = new ArrayList<>(allNodesSettings); - final int bootstrapIndex = randomFrom(random, IntStream.range(0, updatedSettings.size()) - .filter(i -> NODE_MASTER_SETTING.get(allNodesSettings.get(i))).boxed().collect(Collectors.toList())); - final Settings settings = updatedSettings.get(bootstrapIndex); - assertFalse(INITIAL_MASTER_NODES_SETTING.exists(settings)); - assertTrue(NODE_MASTER_SETTING.get(settings)); - updatedSettings.set(bootstrapIndex, - Settings.builder().put(settings).putList(INITIAL_MASTER_NODES_SETTING.getKey(), allNodesSettings.stream() - .filter(NODE_MASTER_SETTING::get).map(NODE_NAME_SETTING::get).collect(Collectors.toList())).build()); - return updatedSettings; - } - public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { long clusterSeed = randomLong(); boolean masterNodes = randomBoolean(); @@ -353,8 +333,6 @@ public void testDifferentRolesMaintainPathOnRestart() throws Exception { InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, false, 0, 0, "test", new NodeConfigurationSource() { - private boolean bootstrapConfigurationSet; - @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -369,16 +347,6 @@ public Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - public List addExtraClusterBootstrapSettings(List allNodesSettings) { - if (bootstrapConfigurationSet || allNodesSettings.stream().noneMatch(NODE_MASTER_SETTING::get)) { - return allNodesSettings; - } - - bootstrapConfigurationSet = true; - return addBootstrapConfiguration(random(), allNodesSettings); - } - @Override public Path nodeConfigPath(int nodeOrdinal) { return null; @@ -399,6 +367,8 @@ public Settings transportClientSettings() { roles.add(role); } + cluster.setBootstrapMasterNodeIndex(randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(MASTER)).count() - 1)); + try { Map> pathsPerRole = new HashMap<>(); for (int i = 0; i < numNodes; i++) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index a72b2f21d71df..f093143112d3d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -235,8 +235,7 @@ private long readFileBytes(String fileName, BytesReference reference) throws IOE BytesRefIterator refIterator = reference.iterator(); BytesRef ref; while ((ref = refIterator.next()) != null) { - byte[] refBytes = ref.bytes; - indexInput.readBytes(refBytes, 0, refBytes.length); + indexInput.readBytes(ref.bytes, ref.offset, ref.length); } long offsetAfterRead = indexInput.getFilePointer(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index c4fdeb116ae86..2dccc0e96b7a2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -70,7 +70,6 @@ import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; -import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -551,27 +550,6 @@ protected void assertMaxSeqNoOfUpdatesIsTransferred(Index leaderIndex, Index fol }); } - protected void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numberOfShards, long expectedTotal) throws Exception { - assertBusy(() -> { - long[] numOfOptimizedOps = new long[numberOfShards]; - for (int shardId = 0; shardId < numberOfShards; shardId++) { - for (String node : getFollowerCluster().nodesInclude(followerIndex.getName())) { - IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, node); - IndexShard shard = indicesService.getShardOrNull(new ShardId(followerIndex, shardId)); - if (shard != null && shard.routingEntry().primary()) { - try { - FollowingEngine engine = ((FollowingEngine) IndexShardTestCase.getEngine(shard)); - numOfOptimizedOps[shardId] = engine.getNumberOfOptimizedIndexing(); - } catch (AlreadyClosedException e) { - throw new AssertionError(e); // causes assertBusy to retry - } - } - } - } - assertThat(Arrays.stream(numOfOptimizedOps).sum(), equalTo(expectedTotal)); - }); - } - static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 55fcb6ace89fd..74c44704e2e1c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; @@ -101,9 +102,30 @@ public void testFollowIndex() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); - final int firstBatchNumDocs = randomIntBetween(2, 64); + final int firstBatchNumDocs; + // Sometimes we want to index a lot of documents to ensure that the recovery works with larger files + if (rarely()) { + firstBatchNumDocs = randomIntBetween(1800, 2000); + } else { + firstBatchNumDocs = randomIntBetween(10, 64); + } + final int flushPoint = (int) (firstBatchNumDocs * 0.75); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); - for (int i = 0; i < firstBatchNumDocs; i++) { + BulkRequestBuilder bulkRequestBuilder = leaderClient().prepareBulk(); + for (int i = 0; i < flushPoint; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + IndexRequest indexRequest = new IndexRequest("index1", "doc", Integer.toString(i)) + .source(source, XContentType.JSON) + .timeout(TimeValue.timeValueSeconds(1)); + bulkRequestBuilder.add(indexRequest); + } + bulkRequestBuilder.get(); + + leaderClient().admin().indices().prepareFlush("index1").setWaitIfOngoing(true).get(); + + // Index some docs after the flush that might be recovered in the normal index following operations + for (int i = flushPoint; i < firstBatchNumDocs; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); } @@ -147,7 +169,7 @@ public void testFollowIndex() throws Exception { for (int i = 0; i < firstBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, firstBatchNumDocs); + pauseFollow("index2"); followerClient().execute(ResumeFollowAction.INSTANCE, followRequest.getFollowRequest()).get(); final int secondBatchNumDocs = randomIntBetween(2, 64); @@ -172,8 +194,6 @@ public void testFollowIndex() throws Exception { for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, - firstBatchNumDocs + secondBatchNumDocs); pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); } @@ -287,7 +307,6 @@ public void testFollowIndexWithoutWaitForComplete() throws Exception { for (int i = 0; i < firstBatchNumDocs; i++) { assertBusy(assertExpectedDocumentRunnable(i)); } - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, firstBatchNumDocs); pauseFollow("index2"); } @@ -432,8 +451,6 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertIndexFullyReplicatedToFollower("index1", "index2"); pauseFollow("index2"); leaderClient().admin().indices().prepareRefresh("index1").get(); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfShards, - leaderClient().prepareSearch("index1").get().getHits().getTotalHits().value); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfShards); } @@ -475,7 +492,6 @@ public void testFollowIndexWithNestedField() throws Exception { } pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 1); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), 1, numDocs); } public void testUnfollowNonExistingIndex() { @@ -538,7 +554,6 @@ public void testFollowIndexMaxOperationSizeInBytes() throws Exception { } pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 1); - assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), 1, numDocs); } public void testAttemptToChangeCcrFollowingIndexSetting() throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 0b9640839202b..84dc4c9a5887b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -271,17 +270,11 @@ private static class Status { private final boolean isSecurityExplicitlyEnabled; private Status status = new Status(OperationMode.TRIAL, true); - private boolean isSecurityEnabledByTrialVersion; public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - // 6.0+ requires TLS for production licenses, so if TLS is enabled and security is enabled - // we can interpret this as an explicit enabling of security if the security enabled - // setting is not explicitly set - this.isSecurityExplicitlyEnabled = isSecurityEnabled && - (settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) || XPackSettings.TRANSPORT_SSL_ENABLED.get(settings)); - this.isSecurityEnabledByTrialVersion = false; + this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -289,7 +282,6 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { this.isSecurityEnabled = xPackLicenseState.isSecurityEnabled; this.isSecurityExplicitlyEnabled = xPackLicenseState.isSecurityExplicitlyEnabled; this.status = xPackLicenseState.status; - this.isSecurityEnabledByTrialVersion = xPackLicenseState.isSecurityEnabledByTrialVersion; } /** @@ -304,16 +296,6 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { void update(OperationMode mode, boolean active, @Nullable Version mostRecentTrialVersion) { synchronized (this) { status = new Status(mode, active); - if (isSecurityEnabled == true && isSecurityExplicitlyEnabled == false && mode == OperationMode.TRIAL - && isSecurityEnabledByTrialVersion == false) { - // Before 6.3, Trial licenses would default having security enabled. - // If this license was generated before that version, then treat it as if security is explicitly enabled - if (mostRecentTrialVersion == null || mostRecentTrialVersion.before(Version.V_6_3_0)) { - LogManager.getLogger(getClass()).info("Automatically enabling security for older trial license ({})", - mostRecentTrialVersion == null ? "[pre 6.1.0]" : mostRecentTrialVersion.toString()); - isSecurityEnabledByTrialVersion = true; - } - } } listeners.forEach(LicenseStateListener::licenseStateChanged); } @@ -345,7 +327,7 @@ public synchronized boolean isActive() { public synchronized boolean isAuthAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.STANDARD || mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -356,7 +338,7 @@ public synchronized boolean isAuthAllowed() { public synchronized boolean isIpFilteringAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -366,7 +348,7 @@ public synchronized boolean isIpFilteringAllowed() { public synchronized boolean isAuditingAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); } @@ -395,7 +377,7 @@ public synchronized boolean isStatsAndHealthAllowed() { public synchronized boolean isDocumentAndFieldLevelSecurityAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (mode == OperationMode.TRIAL || mode == OperationMode.PLATINUM); } @@ -412,7 +394,7 @@ public enum AllowedRealmType { */ public synchronized AllowedRealmType allowedRealmType() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); if (isSecurityCurrentlyEnabled) { switch (status.mode) { case PLATINUM: @@ -435,7 +417,7 @@ public synchronized AllowedRealmType allowedRealmType() { */ public synchronized boolean isCustomRoleProvidersAllowed() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) && status.active; } @@ -446,7 +428,7 @@ public synchronized boolean isCustomRoleProvidersAllowed() { */ public synchronized boolean isAuthorizationRealmAllowed() { final boolean isSecurityCurrentlyEnabled = - isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabledByTrialVersion, isSecurityEnabled); + isSecurityEnabled(status.mode, isSecurityExplicitlyEnabled, isSecurityEnabled); return isSecurityCurrentlyEnabled && (status.mode == OperationMode.PLATINUM || status.mode == OperationMode.TRIAL) && status.active; } @@ -676,19 +658,17 @@ public synchronized boolean isSecurityAvailable() { * @return true if security has been disabled by a trial license which is the case of the * default distribution post 6.3.0. The conditions necessary for this are: *
    - *
  • A trial license generated in 6.3.0+
  • + *
  • A trial license
  • *
  • xpack.security.enabled not specified as a setting
  • *
*/ public synchronized boolean isSecurityDisabledByTrialLicense() { - return status.mode == OperationMode.TRIAL && isSecurityEnabled - && isSecurityExplicitlyEnabled == false - && isSecurityEnabledByTrialVersion == false; + return status.mode == OperationMode.TRIAL && isSecurityEnabled && isSecurityExplicitlyEnabled == false; } private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, - final boolean isSecurityEnabledByTrialVersion, final boolean isSecurityEnabled) { - return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; + final boolean isSecurityEnabled) { + return mode == OperationMode.TRIAL ? isSecurityExplicitlyEnabled : isSecurityEnabled; } /** diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 76b735dc78a38..bbd5d950c8b9b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -81,24 +81,15 @@ public void testSecurityDefaults() { assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + licenseState = new XPackLicenseState(Settings.EMPTY); + assertSecurityNotAllowed(licenseState); + } + + public void testTransportSslDoesNotAutomaticallyEnableSecurityOnTrialLicense() { + final XPackLicenseState licenseState; licenseState = new XPackLicenseState(Settings.builder().put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), true).build()); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); - - licenseState = new XPackLicenseState(Settings.EMPTY); - assertThat(licenseState.isAuthAllowed(), is(false)); - assertThat(licenseState.isIpFilteringAllowed(), is(false)); - assertThat(licenseState.isAuditingAllowed(), is(false)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + assertSecurityNotAllowed(licenseState); } public void testSecurityBasic() { @@ -106,13 +97,7 @@ public void testSecurityBasic() { Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); licenseState.update(BASIC, true, null); - assertThat(licenseState.isAuthAllowed(), is(false)); - assertThat(licenseState.isIpFilteringAllowed(), is(false)); - assertThat(licenseState.isAuditingAllowed(), is(false)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + assertSecurityNotAllowed(licenseState); } public void testSecurityBasicExpired() { @@ -218,6 +203,10 @@ public void testNewTrialDefaultsSecurityOff() { licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(true)); + assertSecurityNotAllowed(licenseState); + } + + private void assertSecurityNotAllowed(XPackLicenseState licenseState) { assertThat(licenseState.isAuthAllowed(), is(false)); assertThat(licenseState.isIpFilteringAllowed(), is(false)); assertThat(licenseState.isAuditingAllowed(), is(false)); @@ -227,20 +216,6 @@ public void testNewTrialDefaultsSecurityOff() { assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); } - public void testOldTrialDefaultsSecurityOn() { - XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, rarely() ? null : VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_2_4)); - - assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(false)); - assertThat(licenseState.isAuthAllowed(), is(true)); - assertThat(licenseState.isIpFilteringAllowed(), is(true)); - assertThat(licenseState.isAuditingAllowed(), is(true)); - assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); - assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); - assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); - assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); - } - public void testSecurityAckBasicToNotGoldOrStandard() { OperationMode toMode = randomFrom(OperationMode.values(), mode -> mode != GOLD && mode != STANDARD); assertAckMesssages(XPackField.SECURITY, BASIC, toMode, 0); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java index f2914c3514d90..9fa34568a1e14 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java @@ -89,6 +89,7 @@ public void testIndexFollowing() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37231") public void testAutoFollowing() throws Exception { assumeTrue("CCR became available in 6.5, but test relies on a fix that was shipped with 6.6.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_6_6_0));