diff --git a/output/schema/schema.json b/output/schema/schema.json index 76a6ba7c70..9054d904cb 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -185,7 +185,7 @@ }, "description": "Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted.", "docId": "async-search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html\r", "name": "async_search.delete", "request": { "name": "Request", @@ -224,7 +224,7 @@ }, "description": "Retrieves the results of a previously submitted async search request given its ID.", "docId": "async-search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html\r", "name": "async_search.get", "request": { "name": "Request", @@ -263,7 +263,7 @@ }, "description": "Retrieves the status of a previously submitted async search request given its ID.", "docId": "async-search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html\r", "name": "async_search.status", "request": { "name": "Request", @@ -302,7 +302,7 @@ }, "description": "Executes a search request asynchronously.", "docId": "async-search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/async-search.html\r", "name": "async_search.submit", "request": { "name": "Request", @@ -346,7 +346,7 @@ }, "description": "Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported.", "docId": "autoscaling-delete-autoscaling-policy", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-delete-autoscaling-policy.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-delete-autoscaling-policy.html\r", "name": "autoscaling.delete_autoscaling_policy", "request": { "name": "Request", @@ -381,7 +381,7 @@ }, "description": "Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported.", "docId": "autoscaling-get-autoscaling-capacity", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-get-autoscaling-capacity.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-get-autoscaling-capacity.html\r", "name": "autoscaling.get_autoscaling_capacity", "request": { "name": "Request", @@ -416,7 +416,7 @@ }, "description": "Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported.", "docId": "autoscaling-get-autoscaling-capacity", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-get-autoscaling-capacity.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-get-autoscaling-capacity.html\r", "name": "autoscaling.get_autoscaling_policy", "request": { "name": "Request", @@ -451,7 +451,7 @@ }, "description": "Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported.", "docId": "autoscaling-put-autoscaling-policy", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-put-autoscaling-policy.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/autoscaling-put-autoscaling-policy.html\r", "name": "autoscaling.put_autoscaling_policy", "request": { "name": "Request", @@ -493,7 +493,7 @@ }, "description": "Allows to perform multiple index/update/delete operations in a single request.", "docId": "docs-bulk", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-bulk.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-bulk.html\r", "name": "bulk", "request": { "name": "Request", @@ -543,7 +543,7 @@ }, "description": "Shows information about currently configured aliases to indices including filter and routing infos.", "docId": "cat-alias", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-alias.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-alias.html\r", "name": "cat.aliases", "privileges": { "index": [ @@ -594,7 +594,7 @@ }, "description": "Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using.", "docId": "cat-allocation", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-allocation.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-allocation.html\r", "name": "cat.allocation", "privileges": { "cluster": [ @@ -695,7 +695,7 @@ }, "description": "Provides quick access to the document count of the entire cluster, or individual indices.", "docId": "cat-count", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-count.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-count.html\r", "name": "cat.count", "privileges": { "index": [ @@ -746,7 +746,7 @@ }, "description": "Shows how much heap memory is currently being used by fielddata on every data node in the cluster.", "docId": "cat-fielddata", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-fielddata.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-fielddata.html\r", "name": "cat.fielddata", "privileges": { "cluster": [ @@ -793,7 +793,7 @@ }, "description": "Returns a concise representation of the cluster health.", "docId": "cat-health", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-health.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-health.html\r", "name": "cat.health", "privileges": { "cluster": [ @@ -838,7 +838,7 @@ }, "description": "Returns help for the Cat APIs.", "docId": "cat", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat.html\r", "name": "cat.help", "request": { "name": "Request", @@ -877,7 +877,7 @@ }, "description": "Returns information about indices: number of primaries and replicas, document counts, disk size, ...", "docId": "cat-indices", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-indices.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-indices.html\r", "name": "cat.indices", "privileges": { "cluster": [ @@ -931,7 +931,7 @@ }, "description": "Returns information about the master node.", "docId": "cat-master", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-master.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-master.html\r", "name": "cat.master", "privileges": { "cluster": [ @@ -976,7 +976,7 @@ }, "description": "Gets configuration and usage information about data frame analytics jobs.", "docId": "cat-dfanalytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-dfanalytics.html\r", "name": "cat.ml_data_frame_analytics", "privileges": { "cluster": [ @@ -1027,7 +1027,7 @@ }, "description": "Gets configuration and usage information about datafeeds.", "docId": "cat-datafeeds", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-datafeeds.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-datafeeds.html\r", "name": "cat.ml_datafeeds", "privileges": { "cluster": [ @@ -1078,7 +1078,7 @@ }, "description": "Gets configuration and usage information about anomaly detection jobs.", "docId": "cat-anomaly-detectors", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-anomaly-detectors.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-anomaly-detectors.html\r", "name": "cat.ml_jobs", "privileges": { "cluster": [ @@ -1129,7 +1129,7 @@ }, "description": "Gets configuration and usage information about inference trained models.", "docId": "cat-trained-model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-trained-model.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-trained-model.html\r", "name": "cat.ml_trained_models", "privileges": { "cluster": [ @@ -1180,7 +1180,7 @@ }, "description": "Returns information about custom node attributes.", "docId": "cat-nodeattrs", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodeattrs.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodeattrs.html\r", "name": "cat.nodeattrs", "privileges": { "cluster": [ @@ -1225,7 +1225,7 @@ }, "description": "Returns basic statistics about performance of cluster nodes.", "docId": "cat-nodes", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodes.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodes.html\r", "name": "cat.nodes", "privileges": { "cluster": [ @@ -1270,7 +1270,7 @@ }, "description": "Returns a concise representation of the cluster pending tasks.", "docId": "cat-pending-tasks", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-pending-tasks.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-pending-tasks.html\r", "name": "cat.pending_tasks", "privileges": { "cluster": [ @@ -1315,7 +1315,7 @@ }, "description": "Returns information about installed plugins across nodes node.", "docId": "cat-plugins", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-plugins.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-plugins.html\r", "name": "cat.plugins", "privileges": { "cluster": [ @@ -1356,7 +1356,7 @@ }, "description": "Returns information about index shard recoveries, both on-going completed.", "docId": "cat-recovery", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-recovery.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-recovery.html\r", "name": "cat.recovery", "privileges": { "cluster": [ @@ -1406,7 +1406,7 @@ }, "description": "Returns information about snapshot repositories registered in the cluster.", "docId": "cat-repositories", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-repositories.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-repositories.html\r", "name": "cat.repositories", "privileges": { "cluster": [ @@ -1451,7 +1451,7 @@ }, "description": "Provides low-level information about the segments in the shards of an index.", "docId": "cat-segments", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-segments.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-segments.html\r", "name": "cat.segments", "privileges": { "cluster": [ @@ -1505,7 +1505,7 @@ }, "description": "Provides a detailed view of shard allocation on nodes.", "docId": "cat-shards", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-shards.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-shards.html\r", "name": "cat.shards", "privileges": { "cluster": [ @@ -1559,7 +1559,7 @@ }, "description": "Returns all snapshots in a specific repository.", "docId": "cat-snapshots", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-snapshots.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-snapshots.html\r", "name": "cat.snapshots", "privileges": { "cluster": [ @@ -1610,7 +1610,7 @@ }, "description": "Returns information about the tasks currently executing on one or more nodes in the cluster.", "docId": "tasks", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html\r", "name": "cat.tasks", "privileges": { "cluster": [ @@ -1655,7 +1655,7 @@ }, "description": "Returns information about existing templates.", "docId": "cat-templates", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-templates.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-templates.html\r", "name": "cat.templates", "privileges": { "cluster": [ @@ -1706,7 +1706,7 @@ }, "description": "Returns cluster-wide thread pool statistics per node.\nBy default the active, queue and rejected statistics are returned for all thread pools.", "docId": "cat-thread-pool", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-thread-pool.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-thread-pool.html\r", "name": "cat.thread_pool", "privileges": { "cluster": [ @@ -1757,7 +1757,7 @@ }, "description": "Gets configuration and usage information about transforms.", "docId": "cat-transforms", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-transforms.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-transforms.html\r", "name": "cat.transforms", "privileges": { "cluster": [ @@ -1804,7 +1804,7 @@ }, "description": "Deletes auto-follow patterns.", "docId": "ccr-delete-auto-follow-pattern", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-delete-auto-follow-pattern.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-delete-auto-follow-pattern.html\r", "name": "ccr.delete_auto_follow_pattern", "request": { "name": "Request", @@ -1839,7 +1839,7 @@ }, "description": "Creates a new follower index configured to follow the referenced leader index.", "docId": "ccr-put-follow", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-put-follow.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-put-follow.html\r", "name": "ccr.follow", "request": { "name": "Request", @@ -1877,7 +1877,7 @@ }, "description": "Retrieves information about all follower indices, including parameters and status for each follower index", "docId": "ccr-get-follow-info", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-follow-info.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-follow-info.html\r", "name": "ccr.follow_info", "request": { "name": "Request", @@ -1912,7 +1912,7 @@ }, "description": "Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices.", "docId": "ccr-get-follow-stats", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-follow-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-follow-stats.html\r", "name": "ccr.follow_stats", "request": { "name": "Request", @@ -1947,7 +1947,7 @@ }, "description": "Removes the follower retention leases from the leader.", "docId": "ccr-post-forget-follower", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-forget-follower.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-forget-follower.html\r", "name": "ccr.forget_follower", "request": { "name": "Request", @@ -1985,7 +1985,7 @@ }, "description": "Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection.", "docId": "ccr-get-auto-follow-pattern", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-auto-follow-pattern.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-auto-follow-pattern.html\r", "name": "ccr.get_auto_follow_pattern", "request": { "name": "Request", @@ -2026,7 +2026,7 @@ }, "description": "Pauses an auto-follow pattern", "docId": "ccr-pause-auto-follow-pattern", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-pause-auto-follow-pattern.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-pause-auto-follow-pattern.html\r", "name": "ccr.pause_auto_follow_pattern", "request": { "name": "Request", @@ -2061,7 +2061,7 @@ }, "description": "Pauses a follower index. The follower index will not fetch any additional operations from the leader index.", "docId": "ccr-post-pause-follow", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-pause-follow.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-pause-follow.html\r", "name": "ccr.pause_follow", "request": { "name": "Request", @@ -2096,7 +2096,7 @@ }, "description": "Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices.", "docId": "ccr-put-auto-follow-pattern", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-put-auto-follow-pattern.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-put-auto-follow-pattern.html\r", "name": "ccr.put_auto_follow_pattern", "request": { "name": "Request", @@ -2134,7 +2134,7 @@ }, "description": "Resumes an auto-follow pattern that has been paused", "docId": "ccr-resume-auto-follow-pattern", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-resume-auto-follow-pattern.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-resume-auto-follow-pattern.html\r", "name": "ccr.resume_auto_follow_pattern", "request": { "name": "Request", @@ -2169,7 +2169,7 @@ }, "description": "Resumes a follower index that has been paused", "docId": "ccr-post-resume-follow", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-resume-follow.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-resume-follow.html\r", "name": "ccr.resume_follow", "request": { "name": "Request", @@ -2207,7 +2207,7 @@ }, "description": "Gets all stats related to cross-cluster replication.", "docId": "ccr-get-stats", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-get-stats.html\r", "name": "ccr.stats", "request": { "name": "Request", @@ -2242,7 +2242,7 @@ }, "description": "Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication.", "docId": "ccr-post-unfollow", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-unfollow.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ccr-post-unfollow.html\r", "name": "ccr.unfollow", "request": { "name": "Request", @@ -2281,7 +2281,7 @@ }, "description": "Explicitly clears the search context for a scroll.", "docId": "clear-scroll-api", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/clear-scroll-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/clear-scroll-api.html\r", "name": "clear_scroll", "request": { "name": "Request", @@ -2334,7 +2334,7 @@ }, "description": "Close a point in time", "docId": "point-in-time-api", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/point-in-time-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/point-in-time-api.html\r", "name": "close_point_in_time", "request": { "name": "Request", @@ -2376,7 +2376,7 @@ }, "description": "Provides explanations for shard allocations in the cluster.", "docId": "cluster-allocation-explain", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-allocation-explain.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-allocation-explain.html\r", "name": "cluster.allocation_explain", "request": { "name": "Request", @@ -2419,7 +2419,7 @@ }, "description": "Deletes a component template", "docId": "indices-component-template", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html\r", "name": "cluster.delete_component_template", "privileges": { "cluster": [ @@ -2459,7 +2459,7 @@ }, "description": "Clears cluster voting config exclusions.", "docId": "voting-config-exclusions", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/voting-config-exclusions.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/voting-config-exclusions.html\r", "name": "cluster.delete_voting_config_exclusions", "request": { "name": "Request", @@ -2498,7 +2498,7 @@ }, "description": "Returns information about whether a particular component template exist", "docId": "indices-component-template", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html\r", "name": "cluster.exists_component_template", "request": { "name": "Request", @@ -2537,7 +2537,7 @@ }, "description": "Returns one or more component templates", "docId": "indices-component-template", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html\r", "name": "cluster.get_component_template", "privileges": { "cluster": [ @@ -2587,7 +2587,7 @@ }, "description": "Returns cluster settings.", "docId": "cluster-get-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-get-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-get-settings.html\r", "name": "cluster.get_settings", "privileges": { "cluster": [ @@ -2631,7 +2631,7 @@ }, "description": "Returns basic information about the health of the cluster.", "docId": "cluster-health", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-health.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-health.html\r", "name": "cluster.health", "privileges": { "cluster": [ @@ -2682,7 +2682,7 @@ }, "description": "Returns different information about the cluster.", "docId": "cluster-info", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-info.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-info.html\r", "name": "cluster.info", "request": { "name": "Request", @@ -2721,7 +2721,7 @@ }, "description": "Returns a list of any cluster-level changes (e.g. create index, update mapping,\nallocate or fail shard) which have not yet been executed.", "docId": "cluster-pending", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-pending.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-pending.html\r", "name": "cluster.pending_tasks", "privileges": { "cluster": [ @@ -2761,7 +2761,7 @@ }, "description": "Updates the cluster voting config exclusions by node ids or node names.", "docId": "voting-config-exclusions", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/voting-config-exclusions.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/voting-config-exclusions.html\r", "name": "cluster.post_voting_config_exclusions", "request": { "name": "Request", @@ -2800,7 +2800,7 @@ }, "description": "Creates or updates a component template", "docId": "indices-component-template", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html\r", "name": "cluster.put_component_template", "privileges": { "cluster": [ @@ -2848,7 +2848,7 @@ }, "description": "Updates the cluster settings.", "docId": "cluster-update-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-update-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-update-settings.html\r", "name": "cluster.put_settings", "request": { "name": "Request", @@ -2886,7 +2886,7 @@ }, "description": "Returns the information about configured remote clusters.", "docId": "cluster-remote-info", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-remote-info.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-remote-info.html\r", "name": "cluster.remote_info", "request": { "name": "Request", @@ -2925,7 +2925,7 @@ }, "description": "Allows to manually change the allocation of individual shards in the cluster.", "docId": "cluster-reroute", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-reroute.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-reroute.html\r", "name": "cluster.reroute", "request": { "name": "Request", @@ -2967,7 +2967,7 @@ }, "description": "Returns a comprehensive information about the state of the cluster.", "docId": "cluster-state", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-state.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-state.html\r", "name": "cluster.state", "privileges": { "cluster": [ @@ -3024,7 +3024,7 @@ }, "description": "Returns high-level overview of cluster statistics.", "docId": "cluster-stats", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-stats.html\r", "name": "cluster.stats", "privileges": { "cluster": [ @@ -8244,7 +8244,7 @@ }, "description": "Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle.", "docId": "ml-close-job", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-close-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-close-job.html\r", "name": "ml.close_job", "privileges": { "cluster": [ @@ -8291,7 +8291,7 @@ }, "description": "Deletes a calendar.", "docId": "ml-delete-calendar", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar.html\r", "name": "ml.delete_calendar", "privileges": { "cluster": [ @@ -8335,7 +8335,7 @@ }, "description": "Deletes scheduled events from a calendar.", "docId": "ml-delete-calendar-event", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar-event.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar-event.html\r", "name": "ml.delete_calendar_event", "request": { "name": "Request", @@ -8374,7 +8374,7 @@ }, "description": "Deletes anomaly detection jobs from a calendar.", "docId": "ml-delete-calendar-job", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-calendar-job.html\r", "name": "ml.delete_calendar_job", "privileges": { "cluster": [ @@ -8418,7 +8418,7 @@ }, "description": "Deletes an existing data frame analytics job.", "docId": "ml-delete-dfanalytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-dfanalytics.html\r", "name": "ml.delete_data_frame_analytics", "privileges": { "cluster": [ @@ -8462,7 +8462,7 @@ }, "description": "Deletes an existing datafeed.", "docId": "ml-delete-datafeed", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-datafeed.html\r", "name": "ml.delete_datafeed", "privileges": { "cluster": [ @@ -10429,7 +10429,7 @@ }, "description": "Instantiates a data frame analytics job.", "docId": "put-dfanalytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-dfanalytics.html\r", "name": "ml.put_data_frame_analytics", "privileges": { "cluster": [ @@ -11906,7 +11906,7 @@ }, "description": "Returns information about hot threads on each node in the cluster.", "docId": "cluster-nodes-hot-threads", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-hot-threads.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-hot-threads.html\r", "name": "nodes.hot_threads", "privileges": { "cluster": [ @@ -11957,7 +11957,7 @@ }, "description": "Returns information about nodes in the cluster.", "docId": "cluster-nodes-info", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-info.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-info.html\r", "name": "nodes.info", "request": { "name": "Request", @@ -12057,7 +12057,7 @@ }, "description": "Returns statistical information about nodes in the cluster.", "docId": "cluster-nodes-stats", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-stats.html\r", "name": "nodes.stats", "request": { "name": "Request", @@ -12126,7 +12126,7 @@ }, "description": "Returns low-level information about REST actions usage on nodes.", "docId": "cluster-nodes-usage", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-usage.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-usage.html\r", "name": "nodes.usage", "request": { "name": "Request", @@ -12183,7 +12183,7 @@ }, "description": "Open a point in time that can be used in subsequent searches", "docId": "point-in-time-api", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/point-in-time-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/point-in-time-api.html\r", "name": "open_point_in_time", "privileges": { "index": [ @@ -17132,7 +17132,7 @@ }, "description": "Cancels a task, if it can be cancelled through an API.", "docId": "tasks", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html\r", "name": "tasks.cancel", "request": { "name": "Request", @@ -17173,7 +17173,7 @@ }, "description": "Returns information about a task.", "docId": "tasks", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html\r", "name": "tasks.get", "request": { "name": "Request", @@ -17208,7 +17208,7 @@ }, "description": "Returns a list of tasks.", "docId": "tasks", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html\r", "name": "tasks.list", "privileges": { "cluster": [ @@ -19233,7 +19233,7 @@ }, "properties": [ { - "description": "Set to false to disable setting 'result' in the response\nto 'noop' if no change to the document occurred.", + "description": "Set to false to disable setting 'result' in the response\r\nto 'noop' if no change to the document occurred.", "name": "detect_noop", "required": false, "serverDefault": true, @@ -19296,7 +19296,7 @@ } }, { - "description": "Set to false to disable source retrieval. You can also specify a comma-separated\nlist of the fields you want to retrieve.", + "description": "Set to false to disable source retrieval. You can also specify a comma-separated\r\nlist of the fields you want to retrieve.", "name": "_source", "required": false, "serverDefault": "true", @@ -19309,7 +19309,7 @@ } }, { - "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\nnew document. If the document exists, the 'script' is executed.", + "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\r\nnew document. If the document exists, the 'script' is executed.", "name": "upsert", "required": false, "type": { @@ -20242,7 +20242,7 @@ "kind": "union_of" } }, - "description": "\nIf the index does exist, but the document does not,\nthe response is the same as the successful case, but with a 404.", + "description": "\r\nIf the index does exist, but the document does not,\r\nthe response is the same as the successful case, but with a 404.", "statusCodes": [ 404 ] @@ -21868,7 +21868,7 @@ { "description": "Whether this field is registered as a metadata field.", "docId": "mapping-metadata", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-fields.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-fields.html\r", "name": "metadata_field", "required": false, "type": { @@ -21912,7 +21912,7 @@ "stability": "experimental" } }, - "description": "Contains metric type if this fields is used as a time series\nmetrics, absent if the field is not used as metric.", + "description": "Contains metric type if this fields is used as a time series\r\nmetrics, absent if the field is not used as metric.", "name": "time_series_metric", "required": false, "since": "8.0.0", @@ -21935,7 +21935,7 @@ "stability": "experimental" } }, - "description": "If this list is present in response then some indices have the\nfield marked as a dimension and other indices, the ones in this list, do not.", + "description": "If this list is present in response then some indices have the\r\nfield marked as a dimension and other indices, the ones in this list, do not.", "name": "non_dimension_indices", "required": false, "since": "8.0.0", @@ -21961,7 +21961,7 @@ "stability": "experimental" } }, - "description": "The list of indices where this field is present if these indices\ndon’t have the same `time_series_metric` value for this field.", + "description": "The list of indices where this field is present if these indices\r\ndon’t have the same `time_series_metric` value for this field.", "name": "metric_conflicts_indices", "required": false, "since": "8.0.0", @@ -22025,9 +22025,9 @@ "since": "7.12.0" } }, - "description": "Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.\nThese fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.", + "description": "Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.\r\nThese fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.", "docId": "runtime-search-request", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/runtime-search-request.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/runtime-search-request.html\r", "name": "runtime_mappings", "required": false, "since": "7.12.0", @@ -22041,7 +22041,7 @@ } ] }, - "description": "The field capabilities API returns the information about the capabilities of fields among multiple indices.\nThe field capabilities API returns runtime fields like any other field. For example, a runtime field with a type\nof keyword is returned as any other field that belongs to the `keyword` family.", + "description": "The field capabilities API returns the information about the capabilities of fields among multiple indices.\r\nThe field capabilities API returns runtime fields like any other field. For example, a runtime field with a type\r\nof keyword is returned as any other field that belongs to the `keyword` family.", "inherits": { "type": { "name": "RequestBase", @@ -22069,7 +22069,7 @@ ], "query": [ { - "description": "If false, the request returns an error if any wildcard expression, index alias,\nor `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request\ntargeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar.", + "description": "If false, the request returns an error if any wildcard expression, index alias,\r\nor `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request\r\ntargeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -22416,7 +22416,7 @@ { "description": "Boolean) If true, the request is real-time as opposed to near-real-time.", "docId": "realtime", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#realtime", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#realtime\r", "name": "realtime", "required": false, "serverDefault": true, @@ -22444,7 +22444,7 @@ { "description": "Target the specified primary shard.", "docId": "routing", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#get-routing", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#get-routing\r", "name": "routing", "required": false, "type": { @@ -23046,7 +23046,7 @@ { "description": "Target the specified primary shard.", "docId": "routing", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#get-routing", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#get-routing\r", "name": "routing", "required": false, "type": { @@ -24771,7 +24771,7 @@ "kind": "properties", "properties": [ { - "description": "Indicates which source fields are returned for matching documents. These\nfields are returned in the hits._source property of the search response.", + "description": "Indicates which source fields are returned for matching documents. These\r\nfields are returned in the hits._source property of the search response.", "name": "_source", "required": false, "type": { @@ -24783,7 +24783,7 @@ } }, { - "description": "The request returns doc values for field names matching these patterns\nin the hits.fields property of the response. Accepts wildcard (*) patterns.", + "description": "The request returns doc values for field names matching these patterns\r\nin the hits.fields property of the response. Accepts wildcard (*) patterns.", "name": "docvalue_fields", "required": false, "type": { @@ -24798,7 +24798,7 @@ } }, { - "description": "List of stored fields to return as part of a hit. If no fields are specified,\nno stored fields are included in the response. If this field is specified, the _source\nparameter defaults to false. You can pass _source: true to return both source fields\nand stored fields in the search response.", + "description": "List of stored fields to return as part of a hit. If no fields are specified,\r\nno stored fields are included in the response. If this field is specified, the _source\r\nparameter defaults to false. You can pass _source: true to return both source fields\r\nand stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -24810,7 +24810,7 @@ } }, { - "description": "The request returns values for field names matching these patterns\nin the hits.fields property of the response. Accepts wildcard (*) patterns.", + "description": "The request returns values for field names matching these patterns\r\nin the hits.fields property of the response. Accepts wildcard (*) patterns.", "name": "fields", "required": false, "type": { @@ -24828,7 +24828,7 @@ "since": "8.2.0" } }, - "description": "Query to filter the documents that can match. The kNN search will return the top\n`k` documents that also match this filter. The value can be a single query or a\nlist of queries. If `filter` isn't provided, all documents are allowed to match.", + "description": "Query to filter the documents that can match. The kNN search will return the top\r\n`k` documents that also match this filter. The value can be a single query or a\r\nlist of queries. If `filter` isn't provided, all documents are allowed to match.", "name": "filter", "required": false, "since": "8.2.0", @@ -24887,7 +24887,7 @@ }, "path": [ { - "description": "A comma-separated list of index names to search;\nuse `_all` or to perform the operation on all indices", + "description": "A comma-separated list of index names to search;\r\nuse `_all` or to perform the operation on all indices", "name": "index", "required": true, "type": { @@ -24932,7 +24932,7 @@ } }, { - "description": "If true, the request timed out before completion;\nreturned results may be partial or empty.", + "description": "If true, the request timed out before completion;\r\nreturned results may be partial or empty.", "name": "timed_out", "required": true, "type": { @@ -24977,7 +24977,7 @@ } }, { - "description": "Contains field values for the documents. These fields\nmust be specified in the request using the `fields` parameter.", + "description": "Contains field values for the documents. These fields\r\nmust be specified in the request using the `fields` parameter.", "name": "fields", "required": false, "type": { @@ -24996,7 +24996,7 @@ } }, { - "description": "Highest returned document score. This value is null for requests\nthat do not sort by score.", + "description": "Highest returned document score. This value is null for requests\r\nthat do not sort by score.", "name": "max_score", "required": false, "type": { @@ -25293,7 +25293,7 @@ { "description": "If `true`, the request is real-time as opposed to near-real-time.", "docId": "realtime", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#realtime", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-get.html#realtime\r", "name": "realtime", "required": false, "serverDefault": true, @@ -25343,9 +25343,9 @@ } }, { - "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.", + "description": "A comma-separated list of source fields to exclude from the response.\r\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.", "docId": "mapping-source-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-source-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-source-field.html\r", "name": "_source_excludes", "required": false, "type": { @@ -25357,9 +25357,9 @@ } }, { - "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", + "description": "A comma-separated list of source fields to include in the response.\r\nIf this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter.\r\nIf the `_source` parameter is `false`, this parameter is ignored.", "docId": "mapping-source-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-source-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-source-field.html\r", "name": "_source_includes", "required": false, "type": { @@ -25654,7 +25654,7 @@ } }, { - "description": "List of stored fields to return as part of a hit. If no fields are specified,\nno stored fields are included in the response. If this field is specified, the _source\nparameter defaults to false. You can pass _source: true to return both source fields\nand stored fields in the search response.", + "description": "List of stored fields to return as part of a hit. If no fields are specified,\r\nno stored fields are included in the response. If this field is specified, the _source\r\nparameter defaults to false. You can pass _source: true to return both source fields\r\nand stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -25666,7 +25666,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns doc values for field\nnames matching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns doc values for field\r\nnames matching these patterns in the hits.fields property of the response.", "name": "docvalue_fields", "required": false, "type": { @@ -25715,7 +25715,7 @@ } }, { - "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "description": "Starting document offset. By default, you cannot page through more than 10,000\r\nhits using the from and size parameters. To page through more hits, use the\r\nsearch_after parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -25765,7 +25765,7 @@ } }, { - "description": "Minimum _score for matching documents. Documents with a lower _score are\nnot included in the search results.", + "description": "Minimum _score for matching documents. Documents with a lower _score are\r\nnot included in the search results.", "name": "min_score", "required": false, "type": { @@ -25859,7 +25859,7 @@ } }, { - "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "description": "The number of hits to return. By default, you cannot page through more\r\nthan 10,000 hits using the from and size parameters. To page through more\r\nhits, use the search_after parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -25873,7 +25873,7 @@ }, { "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -25885,7 +25885,7 @@ } }, { - "description": "Indicates which source fields are returned for matching documents. These\nfields are returned in the hits._source property of the search response.", + "description": "Indicates which source fields are returned for matching documents. These\r\nfields are returned in the hits._source property of the search response.", "name": "_source", "required": false, "type": { @@ -25897,7 +25897,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns values for field names\nmatching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns values for field names\r\nmatching these patterns in the hits.fields property of the response.", "name": "fields", "required": false, "type": { @@ -25912,7 +25912,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "description": "Maximum number of documents to collect for each shard. If a query reaches this\r\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\r\nbefore sorting. Defaults to 0, which does not terminate query execution early.", "name": "terminate_after", "required": false, "serverDefault": 0, @@ -25925,7 +25925,7 @@ } }, { - "description": "Stats groups to associate with the search. Each group maintains a statistics\naggregation for its associated searches. You can retrieve these stats using\nthe indices stats API.", + "description": "Stats groups to associate with the search. Each group maintains a statistics\r\naggregation for its associated searches. You can retrieve these stats using\r\nthe indices stats API.", "name": "stats", "required": false, "type": { @@ -25940,7 +25940,7 @@ } }, { - "description": "Specifies the period of time to wait for a response from each shard. If no response\nis received before the timeout expires, the request fails and returns an error.\nDefaults to no timeout.", + "description": "Specifies the period of time to wait for a response from each shard. If no response\r\nis received before the timeout expires, the request fails and returns an error.\r\nDefaults to no timeout.", "name": "timeout", "required": false, "type": { @@ -25965,7 +25965,7 @@ } }, { - "description": "Number of hits matching the query to count accurately. If true, the exact\nnumber of hits is returned at the cost of some performance. If false, the\nresponse does not include the total number of hits matching the query.\nDefaults to 10,000 hits.", + "description": "Number of hits matching the query to count accurately. If true, the exact\r\nnumber of hits is returned at the cost of some performance. If false, the\r\nresponse does not include the total number of hits matching the query.\r\nDefaults to 10,000 hits.", "name": "track_total_hits", "required": false, "type": { @@ -25990,7 +25990,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request. These fields take\nprecedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request. These fields take\r\nprecedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -26002,7 +26002,7 @@ } }, { - "description": "If true, returns sequence number and primary term of the last modification\nof each hit. See Optimistic concurrency control.", + "description": "If true, returns sequence number and primary term of the last modification\r\nof each hit. See Optimistic concurrency control.", "name": "seq_no_primary_term", "required": false, "type": { @@ -26014,7 +26014,7 @@ } }, { - "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\ncannot specify an in the request path.", + "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\r\ncannot specify an in the request path.", "name": "pit", "required": false, "type": { @@ -26231,7 +26231,7 @@ { "description": "If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests.", "docId": "ccs-network-delays", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays\r", "name": "ccs_minimize_roundtrips", "required": false, "serverDefault": true, @@ -26672,7 +26672,7 @@ } }, { - "description": "ID of the search template to use. If no source is specified,\nthis parameter is required.", + "description": "ID of the search template to use. If no source is specified,\r\nthis parameter is required.", "name": "id", "required": false, "type": { @@ -26714,7 +26714,7 @@ } }, { - "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. Also supports Mustache variables. If no id is specified, this\nparameter is required.", + "description": "An inline search template. Supports the same parameters as the search API's\r\nrequest body. Also supports Mustache variables. If no id is specified, this\r\nparameter is required.", "name": "source", "required": false, "type": { @@ -27223,7 +27223,7 @@ "body": { "kind": "no_body" }, - "description": "A search request by default executes against the most recent visible data of the target indices,\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\nsearch requests using the same point in time. For example, if refreshes happen between\n`search_after` requests, then the results of those requests might not be consistent as changes happening\nbetween searches are only visible to the more recent point in time.", + "description": "A search request by default executes against the most recent visible data of the target indices,\r\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\r\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\r\nsearch requests using the same point in time. For example, if refreshes happen between\r\n`search_after` requests, then the results of those requests might not be consistent as changes happening\r\nbetween searches are only visible to the more recent point in time.", "inherits": { "type": { "name": "RequestBase", @@ -27781,7 +27781,7 @@ { "description": "Discounted cumulative gain (DCG)", "docId": "dcg", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_discounted_cumulative_gain_dcg", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_discounted_cumulative_gain_dcg\r", "inherits": { "type": { "name": "RankEvalMetricBase", @@ -27814,7 +27814,7 @@ { "description": "Expected Reciprocal Rank (ERR)", "docId": "expected-reciprocal", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_expected_reciprocal_rank_err", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_expected_reciprocal_rank_err\r", "inherits": { "type": { "name": "RankEvalMetricBase", @@ -27845,7 +27845,7 @@ { "description": "Mean Reciprocal Rank", "docId": "mean-reciprocal", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_mean_reciprocal_rank", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#_mean_reciprocal_rank\r", "inherits": { "type": { "name": "RankEvalMetricRatingTreshold", @@ -27863,7 +27863,7 @@ { "description": "Precision at K (P@k)", "docId": "k-precision", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#k-precision", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#k-precision\r", "inherits": { "type": { "name": "RankEvalMetricRatingTreshold", @@ -27924,7 +27924,7 @@ { "description": "Recall at K (R@k)", "docId": "k-recall", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#k-recall", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html#k-recall\r", "inherits": { "type": { "name": "RankEvalMetricRatingTreshold", @@ -28101,7 +28101,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported.\nTo target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`.", + "description": "Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported.\r\nTo target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`.", "name": "index", "required": false, "type": { @@ -29599,7 +29599,7 @@ { "description": "Period to retain the search context for scrolling.", "docId": "scroll-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results\r", "name": "scroll", "required": false, "serverDefault": "1d", @@ -29659,7 +29659,7 @@ { "description": "Period to retain the search context for scrolling.", "docId": "scroll-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results\r", "name": "scroll", "required": false, "serverDefault": "1d", @@ -29814,7 +29814,7 @@ } }, { - "description": "Starting document offset.\nNeeds to be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "Starting document offset.\r\nNeeds to be non-negative.\r\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -29839,7 +29839,7 @@ } }, { - "description": "Number of hits matching the query to count accurately.\nIf `true`, the exact number of hits is returned at the cost of some performance.\nIf `false`, the response does not include the total number of hits matching the query.", + "description": "Number of hits matching the query to count accurately.\r\nIf `true`, the exact number of hits is returned at the cost of some performance.\r\nIf `false`, the response does not include the total number of hits matching the query.", "name": "track_total_hits", "required": false, "serverDefault": "10000", @@ -29878,7 +29878,7 @@ } }, { - "description": "Array of wildcard (`*`) patterns.\nThe request returns doc values for field names matching these patterns in the `hits.fields` property of the response.", + "description": "Array of wildcard (`*`) patterns.\r\nThe request returns doc values for field names matching these patterns in the `hits.fields` property of the response.", "name": "docvalue_fields", "required": false, "type": { @@ -29946,7 +29946,7 @@ } }, { - "description": "Minimum `_score` for matching documents.\nDocuments with a lower `_score` are not included in the search results.", + "description": "Minimum `_score` for matching documents.\r\nDocuments with a lower `_score` are not included in the search results.", "name": "min_score", "required": false, "type": { @@ -29958,7 +29958,7 @@ } }, { - "description": "Use the `post_filter` parameter to filter search results.\nThe search hits are filtered after the aggregations are calculated.\nA post filter has no impact on the aggregation results.", + "description": "Use the `post_filter` parameter to filter search results.\r\nThe search hits are filtered after the aggregations are calculated.\r\nA post filter has no impact on the aggregation results.", "name": "post_filter", "required": false, "type": { @@ -29970,7 +29970,7 @@ } }, { - "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\nNOTE: This is a debugging tool and adds significant overhead to search execution.", + "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\r\nNOTE: This is a debugging tool and adds significant overhead to search execution.", "name": "profile", "required": false, "serverDefault": false, @@ -30057,7 +30057,7 @@ } }, { - "description": "The number of hits to return.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "The number of hits to return.\r\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -30084,7 +30084,7 @@ { "description": "A comma-separated list of : pairs.", "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -30096,7 +30096,7 @@ } }, { - "description": "Indicates which source fields are returned for matching documents.\nThese fields are returned in the hits._source property of the search response.", + "description": "Indicates which source fields are returned for matching documents.\r\nThese fields are returned in the hits._source property of the search response.", "name": "_source", "required": false, "type": { @@ -30108,7 +30108,7 @@ } }, { - "description": "Array of wildcard (`*`) patterns.\nThe request returns values for field names matching these patterns in the `hits.fields` property of the response.", + "description": "Array of wildcard (`*`) patterns.\r\nThe request returns values for field names matching these patterns in the `hits.fields` property of the response.", "name": "fields", "required": false, "type": { @@ -30135,7 +30135,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\nUse with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.\nIf set to `0` (default), the query does not terminate early.", + "description": "Maximum number of documents to collect for each shard.\r\nIf a query reaches this limit, Elasticsearch terminates the query early.\r\nElasticsearch collects documents before sorting.\r\nUse with caution.\r\nElasticsearch applies this parameter to each shard handling the request.\r\nWhen possible, let Elasticsearch perform early termination automatically.\r\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.\r\nIf set to `0` (default), the query does not terminate early.", "name": "terminate_after", "required": false, "serverDefault": 0, @@ -30148,7 +30148,7 @@ } }, { - "description": "Specifies the period of time to wait for a response from each shard.\nIf no response is received before the timeout expires, the request fails and returns an error.\nDefaults to no timeout.", + "description": "Specifies the period of time to wait for a response from each shard.\r\nIf no response is received before the timeout expires, the request fails and returns an error.\r\nDefaults to no timeout.", "name": "timeout", "required": false, "type": { @@ -30198,7 +30198,7 @@ } }, { - "description": "List of stored fields to return as part of a hit.\nIf no fields are specified, no stored fields are included in the response.\nIf this field is specified, the `_source` parameter defaults to `false`.\nYou can pass `_source: true` to return both source fields and stored fields in the search response.", + "description": "List of stored fields to return as part of a hit.\r\nIf no fields are specified, no stored fields are included in the response.\r\nIf this field is specified, the `_source` parameter defaults to `false`.\r\nYou can pass `_source: true` to return both source fields and stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -30210,7 +30210,7 @@ } }, { - "description": "Limits the search to a point in time (PIT).\nIf you provide a PIT, you cannot specify an `` in the request path.", + "description": "Limits the search to a point in time (PIT).\r\nIf you provide a PIT, you cannot specify an `` in the request path.", "name": "pit", "required": false, "type": { @@ -30222,7 +30222,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request.\nThese fields take precedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request.\r\nThese fields take precedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -30234,7 +30234,7 @@ } }, { - "description": "Stats groups to associate with the search.\nEach group maintains a statistics aggregation for its associated searches.\nYou can retrieve these stats using the indices stats API.", + "description": "Stats groups to associate with the search.\r\nEach group maintains a statistics aggregation for its associated searches.\r\nYou can retrieve these stats using the indices stats API.", "name": "stats", "required": false, "type": { @@ -30250,7 +30250,7 @@ } ] }, - "description": "Returns search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.", + "description": "Returns search hits that match the query defined in the request.\r\nYou can provide search queries using the `q` query string parameter or the request body.\r\nIf both are specified, only the query parameter is used.", "inherits": { "type": { "name": "RequestBase", @@ -30264,7 +30264,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases to search.\r\nSupports wildcards (`*`).\r\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -30278,7 +30278,7 @@ ], "query": [ { - "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", + "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\r\nThis behavior applies even if the request targets other open indices.\r\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -30304,7 +30304,7 @@ } }, { - "description": "Analyzer to use for the query string.\nThis parameter can only be used when the q query string parameter is specified.", + "description": "Analyzer to use for the query string.\r\nThis parameter can only be used when the q query string parameter is specified.", "name": "analyzer", "required": false, "type": { @@ -30316,7 +30316,7 @@ } }, { - "description": "If true, wildcard and prefix queries are analyzed.\nThis parameter can only be used when the q query string parameter is specified.", + "description": "If true, wildcard and prefix queries are analyzed.\r\nThis parameter can only be used when the q query string parameter is specified.", "name": "analyze_wildcard", "required": false, "serverDefault": false, @@ -30329,7 +30329,7 @@ } }, { - "description": "The number of shard results that should be reduced at once on the coordinating node.\nThis value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.", + "description": "The number of shard results that should be reduced at once on the coordinating node.\r\nThis value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.", "name": "batched_reduce_size", "required": false, "serverDefault": 512, @@ -30344,7 +30344,7 @@ { "description": "If true, network round-trips between the coordinating node and the remote clusters are minimized when executing cross-cluster search (CCS) requests.", "docId": "ccs-network-delays", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays\r", "name": "ccs_minimize_roundtrips", "required": false, "serverDefault": true, @@ -30357,7 +30357,7 @@ } }, { - "description": "The default operator for query string query: AND or OR.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The default operator for query string query: AND or OR.\r\nThis parameter can only be used when the `q` query string parameter is specified.", "name": "default_operator", "required": false, "serverDefault": "OR", @@ -30370,7 +30370,7 @@ } }, { - "description": "Field to use as default where no field prefix is given in the query string.\nThis parameter can only be used when the q query string parameter is specified.", + "description": "Field to use as default where no field prefix is given in the query string.\r\nThis parameter can only be used when the q query string parameter is specified.", "name": "df", "required": false, "type": { @@ -30394,7 +30394,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\r\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\r\nSupports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "type": { @@ -30445,7 +30445,7 @@ } }, { - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\r\nThis parameter can only be used when the `q` query string parameter is specified.", "name": "lenient", "required": false, "serverDefault": false, @@ -30458,7 +30458,7 @@ } }, { - "description": "Defines the number of concurrent shard requests per node this search executes concurrently.\nThis value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests.", + "description": "Defines the number of concurrent shard requests per node this search executes concurrently.\r\nThis value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests.", "name": "max_concurrent_shard_requests", "required": false, "serverDefault": 5, @@ -30471,7 +30471,7 @@ } }, { - "description": "The minimum version of the node that can handle the request\nAny handling node with a lower version will fail the request.", + "description": "The minimum version of the node that can handle the request\r\nAny handling node with a lower version will fail the request.", "name": "min_compatible_shard_node", "required": false, "type": { @@ -30483,7 +30483,7 @@ } }, { - "description": "Nodes and shards used for the search.\nBy default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:\n`_only_local` to run the search only on shards on the local node;\n`_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method;\n`_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method;\n`_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method;\n`_shards:,` to run the search only on the specified shards;\n`` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order.", + "description": "Nodes and shards used for the search.\r\nBy default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are:\r\n`_only_local` to run the search only on shards on the local node;\r\n`_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method;\r\n`_only_nodes:,` to run the search on only the specified nodes IDs, where, if suitable shards exist on more than one selected node, use shards on those nodes using the default method, or if none of the specified nodes are available, select shards from any available node using the default method;\r\n`_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards using the default method;\r\n`_shards:,` to run the search only on the specified shards;\r\n`` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order.", "name": "preference", "required": false, "type": { @@ -30495,7 +30495,7 @@ } }, { - "description": "Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold.\nThis filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint).\nWhen unspecified, the pre-filter phase is executed if any of these conditions is met:\nthe request targets more than 128 shards;\nthe request targets one or more read-only index;\nthe primary sort of the query targets an indexed field.", + "description": "Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold.\r\nThis filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint).\r\nWhen unspecified, the pre-filter phase is executed if any of these conditions is met:\r\nthe request targets more than 128 shards;\r\nthe request targets one or more read-only index;\r\nthe primary sort of the query targets an indexed field.", "name": "pre_filter_shard_size", "required": false, "type": { @@ -30507,7 +30507,7 @@ } }, { - "description": "If `true`, the caching of search results is enabled for requests where `size` is `0`.\nDefaults to index level settings.", + "description": "If `true`, the caching of search results is enabled for requests where `size` is `0`.\r\nDefaults to index level settings.", "name": "request_cache", "required": false, "type": { @@ -30531,9 +30531,9 @@ } }, { - "description": "Period to retain the search context for scrolling. See Scroll search results.\nBy default, this value cannot exceed `1d` (24 hours).\nYou can change this limit using the `search.max_keep_alive` cluster-level setting.", + "description": "Period to retain the search context for scrolling. See Scroll search results.\r\nBy default, this value cannot exceed `1d` (24 hours).\r\nYou can change this limit using the `search.max_keep_alive` cluster-level setting.", "docId": "scroll-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results\r", "name": "scroll", "required": false, "type": { @@ -30572,7 +30572,7 @@ } }, { - "description": "A comma-separated list of stored fields to return as part of a hit.\nIf no fields are specified, no stored fields are included in the response.\nIf this field is specified, the `_source` parameter defaults to `false`.\nYou can pass `_source: true` to return both source fields and stored fields in the search response.", + "description": "A comma-separated list of stored fields to return as part of a hit.\r\nIf no fields are specified, no stored fields are included in the response.\r\nIf this field is specified, the `_source` parameter defaults to `false`.\r\nYou can pass `_source: true` to return both source fields and stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -30596,7 +30596,7 @@ } }, { - "description": "Specifies the suggest mode.\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", + "description": "Specifies the suggest mode.\r\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", "name": "suggest_mode", "required": false, "serverDefault": "missing", @@ -30609,7 +30609,7 @@ } }, { - "description": "Number of suggestions to return.\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", + "description": "Number of suggestions to return.\r\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", "name": "suggest_size", "required": false, "type": { @@ -30621,7 +30621,7 @@ } }, { - "description": "The source text for which the suggestions should be returned.\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", + "description": "The source text for which the suggestions should be returned.\r\nThis parameter can only be used when the `suggest_field` and `suggest_text` query string parameters are specified.", "name": "suggest_text", "required": false, "type": { @@ -30633,7 +30633,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\nUse with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.\nIf set to `0` (default), the query does not terminate early.", + "description": "Maximum number of documents to collect for each shard.\r\nIf a query reaches this limit, Elasticsearch terminates the query early.\r\nElasticsearch collects documents before sorting.\r\nUse with caution.\r\nElasticsearch applies this parameter to each shard handling the request.\r\nWhen possible, let Elasticsearch perform early termination automatically.\r\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.\r\nIf set to `0` (default), the query does not terminate early.", "name": "terminate_after", "required": false, "serverDefault": 0, @@ -30646,7 +30646,7 @@ } }, { - "description": "Specifies the period of time to wait for a response from each shard.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Specifies the period of time to wait for a response from each shard.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "type": { @@ -30658,7 +30658,7 @@ } }, { - "description": "Number of hits matching the query to count accurately.\nIf `true`, the exact number of hits is returned at the cost of some performance.\nIf `false`, the response does not include the total number of hits matching the query.", + "description": "Number of hits matching the query to count accurately.\r\nIf `true`, the exact number of hits is returned at the cost of some performance.\r\nIf `false`, the response does not include the total number of hits matching the query.", "name": "track_total_hits", "required": false, "serverDefault": "10000", @@ -30723,7 +30723,7 @@ } }, { - "description": "Indicates which source fields are returned for matching documents.\nThese fields are returned in the `hits._source` property of the search response.\nValid values are:\n`true` to return the entire document source;\n`false` to not return the document source;\n`` to return the source fields that are specified as a comma-separated list (supports wildcard (`*`) patterns).", + "description": "Indicates which source fields are returned for matching documents.\r\nThese fields are returned in the `hits._source` property of the search response.\r\nValid values are:\r\n`true` to return the entire document source;\r\n`false` to not return the document source;\r\n`` to return the source fields that are specified as a comma-separated list (supports wildcard (`*`) patterns).", "name": "_source", "required": false, "serverDefault": "true", @@ -30736,7 +30736,7 @@ } }, { - "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", + "description": "A comma-separated list of source fields to exclude from the response.\r\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\r\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_excludes", "required": false, "type": { @@ -30748,7 +30748,7 @@ } }, { - "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned.\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", + "description": "A comma-separated list of source fields to include in the response.\r\nIf this parameter is specified, only these source fields are returned.\r\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\r\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_includes", "required": false, "type": { @@ -30772,7 +30772,7 @@ } }, { - "description": "Query in the Lucene query string syntax using query parameter search.\nQuery parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.", + "description": "Query in the Lucene query string syntax using query parameter search.\r\nQuery parameter searches do not support the full Elasticsearch Query DSL but are handy for testing.", "name": "q", "required": false, "type": { @@ -30784,7 +30784,7 @@ } }, { - "description": "Defines the number of hits to return.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "Defines the number of hits to return.\r\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -30797,7 +30797,7 @@ } }, { - "description": "Starting document offset.\nNeeds to be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "Starting document offset.\r\nNeeds to be non-negative.\r\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -30812,7 +30812,7 @@ { "description": "A comma-separated list of : pairs.", "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -31706,15 +31706,15 @@ "kind": "enum", "members": [ { - "description": "Use the characters specified by `boundary_chars` as highlighting boundaries.\nThe `boundary_max_scan` setting controls how far to scan for boundary characters.\nOnly valid for the `fvh` highlighter.", + "description": "Use the characters specified by `boundary_chars` as highlighting boundaries.\r\nThe `boundary_max_scan` setting controls how far to scan for boundary characters.\r\nOnly valid for the `fvh` highlighter.", "name": "chars" }, { - "description": "Break highlighted fragments at the next sentence boundary, as determined by Java’s `BreakIterator`.\nYou can specify the locale to use with `boundary_scanner_locale`.\nWhen used with the `unified` highlighter, the `sentence` scanner splits sentences bigger than `fragment_size` at the first word boundary next to fragment_size.\nYou can set `fragment_size` to `0` to never split any sentence.", + "description": "Break highlighted fragments at the next sentence boundary, as determined by Java’s `BreakIterator`.\r\nYou can specify the locale to use with `boundary_scanner_locale`.\r\nWhen used with the `unified` highlighter, the `sentence` scanner splits sentences bigger than `fragment_size` at the first word boundary next to fragment_size.\r\nYou can set `fragment_size` to `0` to never split any sentence.", "name": "sentence" }, { - "description": "Break highlighted fragments at the next word boundary, as determined by Java’s `BreakIterator`.\nYou can specify the locale to use with `boundary_scanner_locale`.", + "description": "Break highlighted fragments at the next word boundary, as determined by Java’s `BreakIterator`.\r\nYou can specify the locale to use with `boundary_scanner_locale`.", "name": "word" } ], @@ -32166,7 +32166,7 @@ ], "description": "Text or location that we want similar documents for or a lookup to a document's field for the text.", "docId": "document-input-parameters", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-mlt-query.html#_document_input_parameters", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-mlt-query.html#_document_input_parameters\r", "kind": "type_alias", "name": { "name": "Context", @@ -32794,7 +32794,7 @@ } }, { - "description": "Specifies how to break the highlighted fragments: chars, sentence, or word.\nOnly valid for the unified and fvh highlighters.\nDefaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter.", + "description": "Specifies how to break the highlighted fragments: chars, sentence, or word.\r\nOnly valid for the unified and fvh highlighters.\r\nDefaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter.", "name": "boundary_scanner", "required": false, "type": { @@ -32806,7 +32806,7 @@ } }, { - "description": "Controls which locale is used to search for sentence and word boundaries.\nThis parameter takes a form of a language tag, for example: `\"en-US\"`, `\"fr-FR\"`, `\"ja-JP\"`.", + "description": "Controls which locale is used to search for sentence and word boundaries.\r\nThis parameter takes a form of a language tag, for example: `\"en-US\"`, `\"fr-FR\"`, `\"ja-JP\"`.", "name": "boundary_scanner_locale", "required": false, "serverDefault": "Locale.ROOT", @@ -32834,7 +32834,7 @@ } }, { - "description": "Specifies how text should be broken up in highlight snippets: `simple` or `span`.\nOnly valid for the `plain` highlighter.", + "description": "Specifies how text should be broken up in highlight snippets: `simple` or `span`.\r\nOnly valid for the `plain` highlighter.", "name": "fragmenter", "required": false, "serverDefault": "span", @@ -32871,7 +32871,7 @@ } }, { - "description": "Highlight matches for a query other than the search query.\nThis is especially useful if you use a rescore query because those are not taken into account by highlighting by default.", + "description": "Highlight matches for a query other than the search query.\r\nThis is especially useful if you use a rescore query because those are not taken into account by highlighting by default.", "name": "highlight_query", "required": false, "type": { @@ -32894,7 +32894,7 @@ } }, { - "description": "If set to a non-negative value, highlighting stops at this defined maximum limit.\nThe rest of the text is not processed, thus not highlighted and no error is returned\nThe `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting.", + "description": "If set to a non-negative value, highlighting stops at this defined maximum limit.\r\nThe rest of the text is not processed, thus not highlighted and no error is returned\r\nThe `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting.", "name": "max_analyzed_offset", "required": false, "type": { @@ -32919,7 +32919,7 @@ } }, { - "description": "The maximum number of fragments to return.\nIf the number of fragments is set to `0`, no fragments are returned.\nInstead, the entire field contents are highlighted and returned.\nThis can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required.\nIf `number_of_fragments` is `0`, `fragment_size` is ignored.", + "description": "The maximum number of fragments to return.\r\nIf the number of fragments is set to `0`, no fragments are returned.\r\nInstead, the entire field contents are highlighted and returned.\r\nThis can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required.\r\nIf `number_of_fragments` is `0`, `fragment_size` is ignored.", "name": "number_of_fragments", "required": false, "serverDefault": 5, @@ -32950,7 +32950,7 @@ } }, { - "description": "Sorts highlighted fragments by score when set to `score`.\nBy default, fragments will be output in the order they appear in the field (order: `none`).\nSetting this option to `score` will output the most relevant fragments first.\nEach highlighter applies its own logic to compute relevancy scores.", + "description": "Sorts highlighted fragments by score when set to `score`.\r\nBy default, fragments will be output in the order they appear in the field (order: `none`).\r\nSetting this option to `score` will output the most relevant fragments first.\r\nEach highlighter applies its own logic to compute relevancy scores.", "name": "order", "required": false, "serverDefault": "none", @@ -32963,7 +32963,7 @@ } }, { - "description": "Controls the number of matching phrases in a document that are considered.\nPrevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory.\nWhen using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory.\nOnly supported by the `fvh` highlighter.", + "description": "Controls the number of matching phrases in a document that are considered.\r\nPrevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory.\r\nWhen using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory.\r\nOnly supported by the `fvh` highlighter.", "name": "phrase_limit", "required": false, "serverDefault": 256, @@ -32976,7 +32976,7 @@ } }, { - "description": "Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text.\nBy default, highlighted text is wrapped in `` and `` tags.", + "description": "Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text.\r\nBy default, highlighted text is wrapped in `` and `` tags.", "name": "post_tags", "required": false, "type": { @@ -32991,7 +32991,7 @@ } }, { - "description": "Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text.\nBy default, highlighted text is wrapped in `` and `` tags.", + "description": "Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text.\r\nBy default, highlighted text is wrapped in `` and `` tags.", "name": "pre_tags", "required": false, "type": { @@ -33006,7 +33006,7 @@ } }, { - "description": "By default, only fields that contains a query match are highlighted.\nSet to `false` to highlight all fields.", + "description": "By default, only fields that contains a query match are highlighted.\r\nSet to `false` to highlight all fields.", "name": "require_field_match", "required": false, "serverDefault": true, @@ -33189,7 +33189,7 @@ } }, { - "esQuirk": "'_id' is not available when using 'stored_fields: _none_'\non a search request. Otherwise the field is always present on hits.", + "esQuirk": "'_id' is not available when using 'stored_fields: _none_'\r\non a search request. Otherwise the field is always present on hits.", "name": "_id", "required": true, "type": { @@ -34673,7 +34673,7 @@ "properties": [ { "codegenName": "Query", - "description": "The query to use for rescoring.\nThis query is only run on the Top-K results returned by the `query` and `post_filter` phases.", + "description": "The query to use for rescoring.\r\nThis query is only run on the Top-K results returned by the `query` and `post_filter` phases.", "name": "rescore_query", "required": true, "type": { @@ -34742,7 +34742,7 @@ "name": "min" }, { - "description": "Multiply the original score by the rescore query score.\nUseful for `function` query rescores.", + "description": "Multiply the original score by the rescore query score.\r\nUseful for `function` query rescores.", "name": "multiply" }, { @@ -34948,7 +34948,7 @@ "fetch", "fields" ], - "description": "Defines how to fetch a source. Fetching can be disabled entirely, or the source can be filtered.\nUsed as a query parameter along with the `_source_includes` and `_source_excludes` parameters.", + "description": "Defines how to fetch a source. Fetching can be disabled entirely, or the source can be filtered.\r\nUsed as a query parameter along with the `_source_includes` and `_source_excludes` parameters.", "kind": "type_alias", "name": { "name": "SourceConfigParam", @@ -35639,7 +35639,7 @@ "enabled", "count" ], - "description": "Number of hits matching the query to count accurately. If true, the exact\nnumber of hits is returned at the cost of some performance. If false, the\nresponse does not include the total number of hits matching the query.\nDefaults to 10,000 hits.", + "description": "Number of hits matching the query to count accurately. If true, the exact\r\nnumber of hits is returned at the cost of some performance. If false, the\r\nresponse does not include the total number of hits matching the query.\r\nDefaults to 10,000 hits.", "kind": "type_alias", "name": { "name": "TrackHits", @@ -35674,7 +35674,7 @@ "kind": "properties", "properties": [ { - "description": "Sub-aggregations for the geotile_grid.\n\nSupports the following aggregation types:\n- avg\n- cardinality\n- max\n- min\n- sum", + "description": "Sub-aggregations for the geotile_grid.\r\n\r\nSupports the following aggregation types:\r\n- avg\r\n- cardinality\r\n- max\r\n- min\r\n- sum", "name": "aggs", "required": false, "type": { @@ -35697,7 +35697,7 @@ } }, { - "description": "Size, in pixels, of a clipping buffer outside the tile. This allows renderers\nto avoid outline artifacts from geometries that extend past the extent of the tile.", + "description": "Size, in pixels, of a clipping buffer outside the tile. This allows renderers\r\nto avoid outline artifacts from geometries that extend past the extent of the tile.", "name": "buffer", "required": false, "serverDefault": 5, @@ -35710,7 +35710,7 @@ } }, { - "description": "If false, the meta layer’s feature is the bounding box of the tile.\nIf true, the meta layer’s feature is a bounding box resulting from a\ngeo_bounds aggregation. The aggregation runs on values that intersect\nthe // tile with wrap_longitude set to false. The resulting\nbounding box may be larger than the vector tile.", + "description": "If false, the meta layer’s feature is the bounding box of the tile.\r\nIf true, the meta layer’s feature is a bounding box resulting from a\r\ngeo_bounds aggregation. The aggregation runs on values that intersect\r\nthe // tile with wrap_longitude set to false. The resulting\r\nbounding box may be larger than the vector tile.", "name": "exact_bounds", "required": false, "serverDefault": false, @@ -35736,7 +35736,7 @@ } }, { - "description": "Fields to return in the `hits` layer. Supports wildcards (`*`).\nThis parameter does not support fields with array values. Fields with array\nvalues may return inconsistent results.", + "description": "Fields to return in the `hits` layer. Supports wildcards (`*`).\r\nThis parameter does not support fields with array values. Fields with array\r\nvalues may return inconsistent results.", "name": "fields", "required": false, "type": { @@ -35760,7 +35760,7 @@ } }, { - "description": "Additional zoom levels available through the aggs layer. For example, if is 7\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon’t include the aggs layer.", + "description": "Additional zoom levels available through the aggs layer. For example, if is 7\r\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\r\ndon’t include the aggs layer.", "name": "grid_precision", "required": false, "serverDefault": 8, @@ -35773,7 +35773,7 @@ } }, { - "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\neach feature represents a geotile_grid cell. If 'grid' each feature is a Polygon\nof the cells bounding box. If 'point' each feature is a Point that is the centroid\nof the cell.", + "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\r\neach feature represents a geotile_grid cell. If 'grid' each feature is a Polygon\r\nof the cells bounding box. If 'point' each feature is a Point that is the centroid\r\nof the cell.", "name": "grid_type", "required": false, "serverDefault": "grid", @@ -35798,7 +35798,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request. These fields take\nprecedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request. These fields take\r\nprecedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -35810,7 +35810,7 @@ } }, { - "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don’t include the hits layer.", + "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\r\nIf 0, results don’t include the hits layer.", "name": "size", "required": false, "serverDefault": 10000, @@ -35823,7 +35823,7 @@ } }, { - "description": "Sorts features in the hits layer. By default, the API calculates a bounding\nbox for each feature. It sorts features based on this box’s diagonal length,\nfrom longest to shortest.", + "description": "Sorts features in the hits layer. By default, the API calculates a bounding\r\nbox for each feature. It sorts features based on this box’s diagonal length,\r\nfrom longest to shortest.", "name": "sort", "required": false, "type": { @@ -35835,7 +35835,7 @@ } }, { - "description": "Number of hits matching the query to count accurately. If `true`, the exact number\nof hits is returned at the cost of some performance. If `false`, the response does\nnot include the total number of hits matching the query.", + "description": "Number of hits matching the query to count accurately. If `true`, the exact number\r\nof hits is returned at the cost of some performance. If `false`, the response does\r\nnot include the total number of hits matching the query.", "name": "track_total_hits", "required": false, "serverDefault": "10000", @@ -35848,7 +35848,7 @@ } }, { - "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.", + "description": "If `true`, the hits and aggs layers will contain additional point features representing\r\nsuggested label positions for the original features.", "name": "with_labels", "required": false, "type": { @@ -35937,7 +35937,7 @@ ], "query": [ { - "description": "If false, the meta layer’s feature is the bounding box of the tile.\nIf true, the meta layer’s feature is a bounding box resulting from a\ngeo_bounds aggregation. The aggregation runs on values that intersect\nthe // tile with wrap_longitude set to false. The resulting\nbounding box may be larger than the vector tile.", + "description": "If false, the meta layer’s feature is the bounding box of the tile.\r\nIf true, the meta layer’s feature is a bounding box resulting from a\r\ngeo_bounds aggregation. The aggregation runs on values that intersect\r\nthe // tile with wrap_longitude set to false. The resulting\r\nbounding box may be larger than the vector tile.", "name": "exact_bounds", "required": false, "serverDefault": false, @@ -35975,7 +35975,7 @@ } }, { - "description": "Additional zoom levels available through the aggs layer. For example, if is 7\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon’t include the aggs layer.", + "description": "Additional zoom levels available through the aggs layer. For example, if is 7\r\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\r\ndon’t include the aggs layer.", "name": "grid_precision", "required": false, "serverDefault": 8, @@ -35988,7 +35988,7 @@ } }, { - "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\neach feature represents a geotile_grid cell. If 'grid' each feature is a Polygon\nof the cells bounding box. If 'point' each feature is a Point that is the centroid\nof the cell.", + "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\r\neach feature represents a geotile_grid cell. If 'grid' each feature is a Polygon\r\nof the cells bounding box. If 'point' each feature is a Point that is the centroid\r\nof the cell.", "name": "grid_type", "required": false, "serverDefault": "grid", @@ -36001,7 +36001,7 @@ } }, { - "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don’t include the hits layer.", + "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\r\nIf 0, results don’t include the hits layer.", "name": "size", "required": false, "serverDefault": 10000, @@ -36014,7 +36014,7 @@ } }, { - "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.", + "description": "If `true`, the hits and aggs layers will contain additional point features representing\r\nsuggested label positions for the original features.", "name": "with_labels", "required": false, "type": { @@ -36356,7 +36356,7 @@ } }, { - "description": "ID of the search template to use. If no source is specified,\nthis parameter is required.", + "description": "ID of the search template to use. If no source is specified,\r\nthis parameter is required.", "name": "id", "required": false, "type": { @@ -36398,7 +36398,7 @@ } }, { - "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. Also supports Mustache variables. If no id is specified, this\nparameter is required.", + "description": "An inline search template. Supports the same parameters as the search API's\r\nrequest body. Also supports Mustache variables. If no id is specified, this\r\nparameter is required.", "name": "source", "required": false, "type": { @@ -36425,7 +36425,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices,\nand aliases to search. Supports wildcards (*).", + "description": "Comma-separated list of data streams, indices,\r\nand aliases to search. Supports wildcards (*).", "name": "index", "required": false, "type": { @@ -36553,7 +36553,7 @@ } }, { - "description": "Specifies how long a consistent view of the index\nshould be maintained for scrolled search.", + "description": "Specifies how long a consistent view of the index\r\nshould be maintained for scrolled search.", "name": "scroll", "required": false, "type": { @@ -36896,7 +36896,7 @@ { "description": "Allows to filter an index shard if the provided query rewrites to match_none.", "docId": "query-dsl", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html\r", "name": "index_filter", "required": false, "type": { @@ -37636,7 +37636,7 @@ "kind": "properties", "properties": [ { - "description": "Set to false to disable setting 'result' in the response\nto 'noop' if no change to the document occurred.", + "description": "Set to false to disable setting 'result' in the response\r\nto 'noop' if no change to the document occurred.", "name": "detect_noop", "required": false, "serverDefault": true, @@ -37699,7 +37699,7 @@ } }, { - "description": "Set to false to disable source retrieval. You can also specify a comma-separated\nlist of the fields you want to retrieve.", + "description": "Set to false to disable source retrieval. You can also specify a comma-separated\r\nlist of the fields you want to retrieve.", "name": "_source", "required": false, "serverDefault": "true", @@ -37712,7 +37712,7 @@ } }, { - "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\nnew document. If the document exists, the 'script' is executed.", + "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\r\nnew document. If the document exists, the 'script' is executed.", "name": "upsert", "required": false, "type": { @@ -37812,7 +37812,7 @@ } }, { - "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\nvisible to search, if 'false' do nothing with refreshes.", + "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\r\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\r\nvisible to search, if 'false' do nothing with refreshes.", "name": "refresh", "required": false, "serverDefault": "false", @@ -37863,7 +37863,7 @@ } }, { - "description": "Period to wait for dynamic mapping updates and active shards.\nThis guarantees Elasticsearch waits for at least the timeout before failing.\nThe actual wait time could be longer, particularly when multiple waits occur.", + "description": "Period to wait for dynamic mapping updates and active shards.\r\nThis guarantees Elasticsearch waits for at least the timeout before failing.\r\nThe actual wait time could be longer, particularly when multiple waits occur.", "name": "timeout", "required": false, "serverDefault": "1m", @@ -37876,7 +37876,7 @@ } }, { - "description": "The number of shard copies that must be active before proceeding with the operations.\nSet to 'all' or any positive integer up to the total number of shards in the index\n(number_of_replicas+1). Defaults to 1 meaning the primary shard.", + "description": "The number of shard copies that must be active before proceeding with the operations.\r\nSet to 'all' or any positive integer up to the total number of shards in the index\r\n(number_of_replicas+1). Defaults to 1 meaning the primary shard.", "name": "wait_for_active_shards", "required": false, "serverDefault": "1", @@ -37889,7 +37889,7 @@ } }, { - "description": "Set to false to disable source retrieval. You can also specify a comma-separated\nlist of the fields you want to retrieve.", + "description": "Set to false to disable source retrieval. You can also specify a comma-separated\r\nlist of the fields you want to retrieve.", "name": "_source", "required": false, "serverDefault": "true", @@ -38892,7 +38892,7 @@ "specLocation": "_spec_utils/BaseNode.ts#L25-L32" }, { - "description": "Some APIs will return values such as numbers also as a string (notably epoch timestamps). This behavior\nis used to capture this behavior while keeping the semantics of the field type.\n\nDepending on the target language, code generators can keep the union or remove it and leniently parse\nstrings to the target type.", + "description": "Some APIs will return values such as numbers also as a string (notably epoch timestamps). This behavior\r\nis used to capture this behavior while keeping the semantics of the field type.\r\n\r\nDepending on the target language, code generators can keep the union or remove it and leniently parse\r\nstrings to the target type.", "generics": [ { "name": "T", @@ -38926,7 +38926,7 @@ } }, { - "description": "The absence of any type. This is commonly used in APIs that don't return a body.\n\nAlthough \"void\" is generally used for the unit type that has only one value, this is to be interpreted as\nthe bottom type that has no value at all. Most languages have a unit type, but few have a bottom type.\n\nSee https://en.m.wikipedia.org/wiki/Unit_type and https://en.m.wikipedia.org/wiki/Bottom_type", + "description": "The absence of any type. This is commonly used in APIs that don't return a body.\r\n\r\nAlthough \"void\" is generally used for the unit type that has only one value, this is to be interpreted as\r\nthe bottom type that has no value at all. Most languages have a unit type, but few have a bottom type.\r\n\r\nSee https://en.m.wikipedia.org/wiki/Unit_type and https://en.m.wikipedia.org/wiki/Bottom_type", "kind": "type_alias", "name": { "name": "Void", @@ -38964,7 +38964,7 @@ "specLocation": "_types/Base.ts#L47-L50" }, { - "description": "The aggregation name as returned from the server. Depending whether typed_keys is specified this could come back\nin the form of `name#type` instead of simply `name`", + "description": "The aggregation name as returned from the server. Depending whether typed_keys is specified this could come back\r\nin the form of `name#type` instead of simply `name`", "kind": "type_alias", "name": { "name": "AggregateName", @@ -39173,7 +39173,7 @@ }, { "docId": "byte-units", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/api-conventions.html#byte-units", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/api-conventions.html#byte-units\r", "kind": "type_alias", "name": { "name": "ByteSize", @@ -39544,7 +39544,7 @@ }, { "docId": "data-stream-path-param", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-data-stream.html#indices-create-data-stream-api-path-params", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-data-stream.html#indices-create-data-stream-api-path-params\r", "kind": "type_alias", "name": { "name": "DataStreamName", @@ -39591,7 +39591,7 @@ }, { "docId": "mapping-date-format", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-date-format.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-date-format.html\r", "kind": "type_alias", "name": { "name": "DateFormat", @@ -39622,7 +39622,7 @@ } }, { - "description": "A date and time, either as a string whose format can depend on the context (defaulting to ISO 8601), or a\nnumber of milliseconds since the Epoch. Elasticsearch accepts both as input, but will generally output a string\nrepresentation.", + "description": "A date and time, either as a string whose format can depend on the context (defaulting to ISO 8601), or a\r\nnumber of milliseconds since the Epoch. Elasticsearch accepts both as input, but will generally output a string\r\nrepresentation.", "kind": "type_alias", "name": { "name": "DateTime", @@ -39727,7 +39727,7 @@ }, "properties": [ { - "description": "Total number of non-deleted documents across all primary shards assigned to selected nodes.\nThis number is based on documents in Lucene segments and may include documents from nested fields.", + "description": "Total number of non-deleted documents across all primary shards assigned to selected nodes.\r\nThis number is based on documents in Lucene segments and may include documents from nested fields.", "name": "count", "required": true, "type": { @@ -39739,7 +39739,7 @@ } }, { - "description": "Total number of deleted documents across all primary shards assigned to selected nodes.\nThis number is based on documents in Lucene segments.\nElasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged.", + "description": "Total number of deleted documents across all primary shards assigned to selected nodes.\r\nThis number is based on documents in Lucene segments.\r\nElasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged.", "name": "deleted", "required": false, "type": { @@ -39754,9 +39754,9 @@ "specLocation": "_types/Stats.ts#L76-L88" }, { - "description": "A duration. Units can be `nanos`, `micros`, `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours) and\n`d` (days). Also accepts \"0\" without a unit and \"-1\" to indicate an unspecified value.", + "description": "A duration. Units can be `nanos`, `micros`, `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours) and\r\n`d` (days). Also accepts \"0\" without a unit and \"-1\" to indicate an unspecified value.", "docId": "time-value", - "docUrl": "https://github.com/elastic/elasticsearch/blob/{branch}/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java", + "docUrl": "https://github.com/elastic/elasticsearch/blob/{branch}/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java\r", "kind": "type_alias", "name": { "name": "Duration", @@ -39785,7 +39785,7 @@ } }, { - "description": "A date histogram interval. Similar to `Duration` with additional units: `w` (week), `M` (month), `q` (quarter) and\n`y` (year)", + "description": "A date histogram interval. Similar to `Duration` with additional units: `w` (week), `M` (month), `q` (quarter) and\r\n`y` (year)", "kind": "type_alias", "name": { "name": "DurationLarge", @@ -39985,7 +39985,7 @@ } } ], - "description": "Cause and details about a request failure. This class defines the properties common to all error types.\nAdditional details are also provided, that depend on the error type.", + "description": "Cause and details about a request failure. This class defines the properties common to all error types.\r\nAdditional details are also provided, that depend on the error type.", "kind": "interface", "name": { "name": "ErrorCause", @@ -40605,7 +40605,7 @@ "trbl", "wkt" ], - "description": "A geo bounding box. It can be represented in various ways:\n- as 4 top/bottom/left/right coordinates\n- as 2 top_left / bottom_right points\n- as 2 top_right / bottom_left points\n- as a WKT bounding box", + "description": "A geo bounding box. It can be represented in various ways:\r\n- as 4 top/bottom/left/right coordinates\r\n- as 2 top_left / bottom_right points\r\n- as 2 top_right / bottom_left points\r\n- as a WKT bounding box", "kind": "type_alias", "name": { "name": "GeoBounds", @@ -40901,7 +40901,7 @@ "coords", "text" ], - "description": "A latitude/longitude as a 2 dimensional point. It can be represented in various ways:\n- as a `{lat, long}` object\n- as a geo hash value\n- as a `[lon, lat]` array\n- as a string in `\", \"` or WKT point formats", + "description": "A latitude/longitude as a 2 dimensional point. It can be represented in various ways:\r\n- as a `{lat, long}` object\r\n- as a geo hash value\r\n- as a `[lon, lat]` array\r\n- as a string in `\", \"` or WKT point formats", "kind": "type_alias", "name": { "name": "GeoLocation", @@ -41628,7 +41628,7 @@ } }, { - "description": "Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded\nto actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices.", + "description": "Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded\r\nto actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices.", "kind": "interface", "name": { "name": "IndicesOptions", @@ -41636,7 +41636,7 @@ }, "properties": [ { - "description": "If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only\nmissing or closed indices. This behavior applies even if the request targets other open indices. For example,\na request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", + "description": "If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only\r\nmissing or closed indices. This behavior applies even if the request targets other open indices. For example,\r\na request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "name": "allow_no_indices", "required": false, "type": { @@ -41648,7 +41648,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as `open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\r\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\r\nsuch as `open,hidden`.", "name": "expand_wildcards", "required": false, "type": { @@ -42365,7 +42365,7 @@ { "description": "The minimum number of terms that should match as integer, percentage or range", "docId": "query-dsl-minimum-should-match", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-minimum-should-match.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-minimum-should-match.html\r", "kind": "type_alias", "name": { "name": "MinimumShouldMatch", @@ -42394,7 +42394,7 @@ }, { "docId": "query-dsl-multi-term-rewrite", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-multi-term-rewrite.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-multi-term-rewrite.html\r", "kind": "type_alias", "name": { "name": "MultiTermQueryRewrite", @@ -42680,7 +42680,7 @@ }, { "docId": "modules-node", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-node.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-node.html\r", "kind": "type_alias", "name": { "name": "NodeName", @@ -43214,7 +43214,7 @@ }, "properties": [ { - "description": "Total number of entries added to the query cache across all shards assigned to selected nodes.\nThis number includes current and evicted entries.", + "description": "Total number of entries added to the query cache across all shards assigned to selected nodes.\r\nThis number includes current and evicted entries.", "name": "cache_count", "required": true, "type": { @@ -44474,7 +44474,7 @@ } }, { - "description": "This object is not populated by the cluster stats API.\nTo get information on segment files, use the node stats API.", + "description": "This object is not populated by the cluster stats API.\r\nTo get information on segment files, use the node stats API.", "name": "file_sizes", "required": true, "type": { @@ -44497,7 +44497,7 @@ } }, { - "description": "Total amount of memory used by fixed bit sets across all shards assigned to selected nodes.\nFixed bit sets are used for nested object field types and type filters for join fields.", + "description": "Total amount of memory used by fixed bit sets across all shards assigned to selected nodes.\r\nFixed bit sets are used for nested object field types and type filters for join fields.", "name": "fixed_bit_set", "required": false, "type": { @@ -45127,7 +45127,7 @@ } ], "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "kind": "interface", "name": { "name": "SortOptions", @@ -45274,7 +45274,7 @@ } }, { - "description": "Total data set size of all shards assigned to selected nodes.\nThis includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices.", + "description": "Total data set size of all shards assigned to selected nodes.\r\nThis includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices.", "name": "total_data_set_size", "required": false, "type": { @@ -45286,7 +45286,7 @@ } }, { - "description": "Total data set size, in bytes, of all shards assigned to selected nodes.\nThis includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices.", + "description": "Total data set size, in bytes, of all shards assigned to selected nodes.\r\nThis includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices.", "name": "total_data_set_size_in_bytes", "required": false, "type": { @@ -45401,7 +45401,7 @@ "specLocation": "_types/common.ts#L256-L260" }, { - "description": "The suggestion name as returned from the server. Depending whether typed_keys is specified this could come back\nin the form of `name#type` instead of simply `name`", + "description": "The suggestion name as returned from the server. Depending whether typed_keys is specified this could come back\r\nin the form of `name#type` instead of simply `name`", "kind": "type_alias", "name": { "name": "SuggestionName", @@ -47203,7 +47203,7 @@ } }, "docId": "search-aggregations-bucket-count-ks-test-aggregation", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-count-ks-test-aggregation.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-count-ks-test-aggregation.html\r", "name": "bucket_count_ks_test", "required": false, "stability": "experimental", @@ -47225,7 +47225,7 @@ } }, "docId": "search-aggregations-bucket-correlation-aggregation", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-correlation-aggregation.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-correlation-aggregation.html\r", "name": "bucket_correlation", "required": false, "stability": "experimental", @@ -47258,7 +47258,7 @@ } }, "docId": "search-aggregations-bucket-categorize-text-aggregation", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-categorize-text-aggregation.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-bucket-categorize-text-aggregation.html\r", "name": "categorize_text", "required": false, "stability": "experimental", @@ -48617,7 +48617,7 @@ }, "properties": [ { - "description": "The total number of documents that initially created the expectations. It’s required to be greater\nthan or equal to the sum of all values in the buckets_path as this is the originating superset of data\nto which the term values are correlated.", + "description": "The total number of documents that initially created the expectations. It’s required to be greater\r\nthan or equal to the sum of all values in the buckets_path as this is the originating superset of data\r\nto which the term values are correlated.", "name": "doc_count", "required": true, "type": { @@ -48629,7 +48629,7 @@ } }, { - "description": "An array of numbers with which to correlate the configured `bucket_path` values.\nThe length of this value must always equal the number of buckets returned by the `bucket_path`.", + "description": "An array of numbers with which to correlate the configured `bucket_path` values.\r\nThe length of this value must always equal the number of buckets returned by the `bucket_path`.", "name": "expectations", "required": true, "type": { @@ -48644,7 +48644,7 @@ } }, { - "description": "An array of fractions to use when averaging and calculating variance. This should be used if\nthe pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided,\nmust equal expectations.", + "description": "An array of fractions to use when averaging and calculating variance. This should be used if\r\nthe pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided,\r\nmust equal expectations.", "name": "fractions", "required": false, "type": { @@ -48662,7 +48662,7 @@ "specLocation": "_types/aggregations/pipeline.ts#L134-L152" }, { - "description": "A sibling pipeline aggregation which executes a two sample Kolmogorov–Smirnov test (referred\nto as a \"K-S test\" from now on) against a provided distribution, and the distribution implied\nby the documents counts in the configured sibling aggregation. Specifically, for some metric,\nassuming that the percentile intervals of the metric are known beforehand or have been computed\nby an aggregation, then one would use range aggregation for the sibling to compute the p-value\nof the distribution difference between the metric and the restriction of that metric to a subset\nof the documents. A natural use case is if the sibling aggregation range aggregation nested in a\nterms aggregation, in which case one compares the overall distribution of metric to its restriction\nto each term.", + "description": "A sibling pipeline aggregation which executes a two sample Kolmogorov–Smirnov test (referred\r\nto as a \"K-S test\" from now on) against a provided distribution, and the distribution implied\r\nby the documents counts in the configured sibling aggregation. Specifically, for some metric,\r\nassuming that the percentile intervals of the metric are known beforehand or have been computed\r\nby an aggregation, then one would use range aggregation for the sibling to compute the p-value\r\nof the distribution difference between the metric and the restriction of that metric to a subset\r\nof the documents. A natural use case is if the sibling aggregation range aggregation nested in a\r\nterms aggregation, in which case one compares the overall distribution of metric to its restriction\r\nto each term.", "inherits": { "type": { "name": "BucketPathAggregation", @@ -48676,7 +48676,7 @@ }, "properties": [ { - "description": "A list of string values indicating which K-S test alternative to calculate. The valid values\nare: \"greater\", \"less\", \"two_sided\". This parameter is key for determining the K-S statistic used\nwhen calculating the K-S test. Default value is all possible alternative hypotheses.", + "description": "A list of string values indicating which K-S test alternative to calculate. The valid values\r\nare: \"greater\", \"less\", \"two_sided\". This parameter is key for determining the K-S statistic used\r\nwhen calculating the K-S test. Default value is all possible alternative hypotheses.", "name": "alternative", "required": false, "type": { @@ -48691,7 +48691,7 @@ } }, { - "description": "A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results.\nIn typical usage this is the overall proportion of documents in each bucket, which is compared with the actual\ndocument proportions in each bucket from the sibling aggregation counts. The default is to assume that overall\ndocuments are uniformly distributed on these buckets, which they would be if one used equal percentiles of a\nmetric to define the bucket end points.", + "description": "A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results.\r\nIn typical usage this is the overall proportion of documents in each bucket, which is compared with the actual\r\ndocument proportions in each bucket from the sibling aggregation counts. The default is to assume that overall\r\ndocuments are uniformly distributed on these buckets, which they would be if one used equal percentiles of a\r\nmetric to define the bucket end points.", "name": "fractions", "required": false, "type": { @@ -48706,7 +48706,7 @@ } }, { - "description": "Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values.\nThis determines the cumulative distribution function (CDF) points used comparing the two samples. Default is\n`upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`,\nand `lower_tail`.", + "description": "Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values.\r\nThis determines the cumulative distribution function (CDF) points used comparing the two samples. Default is\r\n`upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`,\r\nand `lower_tail`.", "name": "sampling_method", "required": false, "type": { @@ -48767,7 +48767,7 @@ { "description": "Path to the buckets that contain one set of values to correlate.", "docId": "search-aggregations-pipeline-bucket-path", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-pipeline.html#buckets-path-syntax", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-aggregations-pipeline.html#buckets-path-syntax\r", "name": "buckets_path", "required": false, "type": { @@ -48900,7 +48900,7 @@ "keyed", "array" ], - "description": "Aggregation buckets. By default they are returned as an array, but if the aggregation has keys configured for\nthe different buckets, the result is a dictionary.", + "description": "Aggregation buckets. By default they are returned as an array, but if the aggregation has keys configured for\r\nthe different buckets, the result is a dictionary.", "generics": [ { "name": "TBucket", @@ -48953,7 +48953,7 @@ "array", "dict" ], - "description": "Buckets path can be expressed in different ways, and an aggregation may accept some or all of these\nforms depending on its type. Please refer to each aggregation's documentation to know what buckets\npath forms they accept.", + "description": "Buckets path can be expressed in different ways, and an aggregation may accept some or all of these\r\nforms depending on its type. Please refer to each aggregation's documentation to know what buckets\r\npath forms they accept.", "kind": "type_alias", "name": { "name": "BucketsPath", @@ -49162,7 +49162,7 @@ "specLocation": "_types/aggregations/metric.ts#L54-L60" }, { - "description": "A multi-bucket aggregation that groups semi-structured text into buckets. Each text\nfield is re-analyzed using a custom analyzer. The resulting tokens are then categorized\ncreating buckets of similarly formatted text values. This aggregation works best with machine\ngenerated text like system logs. Only the first 100 analyzed tokens are used to categorize the text.", + "description": "A multi-bucket aggregation that groups semi-structured text into buckets. Each text\r\nfield is re-analyzed using a custom analyzer. The resulting tokens are then categorized\r\ncreating buckets of similarly formatted text values. This aggregation works best with machine\r\ngenerated text like system logs. Only the first 100 analyzed tokens are used to categorize the text.", "inherits": { "type": { "name": "Aggregation", @@ -49188,7 +49188,7 @@ } }, { - "description": "The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.\nSmaller values use less memory and create fewer categories. Larger values will use more memory and\ncreate narrower categories. Max allowed value is 100.", + "description": "The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.\r\nSmaller values use less memory and create fewer categories. Larger values will use more memory and\r\ncreate narrower categories. Max allowed value is 100.", "name": "max_unique_tokens", "required": false, "serverDefault": 50, @@ -49201,7 +49201,7 @@ } }, { - "description": "The maximum number of token positions to match on before attempting to merge categories. Larger\nvalues will use more memory and create narrower categories. Max allowed value is 100.", + "description": "The maximum number of token positions to match on before attempting to merge categories. Larger\r\nvalues will use more memory and create narrower categories. Max allowed value is 100.", "name": "max_matched_tokens", "required": false, "serverDefault": 5, @@ -49214,7 +49214,7 @@ } }, { - "description": "The minimum percentage of tokens that must match for text to be added to the category bucket. Must\nbe between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory\nusage and create narrower categories.", + "description": "The minimum percentage of tokens that must match for text to be added to the category bucket. Must\r\nbe between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory\r\nusage and create narrower categories.", "name": "similarity_threshold", "required": false, "serverDefault": 50, @@ -49227,7 +49227,7 @@ } }, { - "description": "This property expects an array of regular expressions. The expressions are used to filter out matching\nsequences from the categorization field values. You can use this functionality to fine tune the categorization\nby excluding sequences from consideration when categories are defined. For example, you can exclude SQL\nstatements that appear in your log files. This property cannot be used at the same time as categorization_analyzer.\nIf you only want to define simple regular expression filters that are applied prior to tokenization, setting\nthis property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering,\nuse the categorization_analyzer property instead and include the filters as pattern_replace character filters.", + "description": "This property expects an array of regular expressions. The expressions are used to filter out matching\r\nsequences from the categorization field values. You can use this functionality to fine tune the categorization\r\nby excluding sequences from consideration when categories are defined. For example, you can exclude SQL\r\nstatements that appear in your log files. This property cannot be used at the same time as categorization_analyzer.\r\nIf you only want to define simple regular expression filters that are applied prior to tokenization, setting\r\nthis property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering,\r\nuse the categorization_analyzer property instead and include the filters as pattern_replace character filters.", "name": "categorization_filters", "required": false, "type": { @@ -49242,7 +49242,7 @@ } }, { - "description": "The categorization analyzer specifies how the text is analyzed and tokenized before being categorized.\nThe syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property\ncannot be used at the same time as categorization_filters.", + "description": "The categorization analyzer specifies how the text is analyzed and tokenized before being categorized.\r\nThe syntax is very similar to that used to define the analyzer in the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsearch/reference/8.0/indices-analyze.html). This property\r\ncannot be used at the same time as categorization_filters.", "name": "categorization_analyzer", "required": false, "type": { @@ -50029,7 +50029,7 @@ "specLocation": "_types/aggregations/Aggregate.ts#L351-L354" }, { - "description": "Result of a `date_range` aggregation. Same format as a for a `range` aggregation: `from` and `to`\nin `buckets` are milliseconds since the Epoch, represented as a floating point number.", + "description": "Result of a `date_range` aggregation. Same format as a for a `range` aggregation: `from` and `to`\r\nin `buckets` are milliseconds since the Epoch, represented as a floating point number.", "inherits": { "type": { "name": "RangeAggregate", @@ -50789,7 +50789,7 @@ "expr", "value" ], - "description": "A date range limit, represented either as a DateMath expression or a number expressed\naccording to the target field's precision.", + "description": "A date range limit, represented either as a DateMath expression or a number expressed\r\naccording to the target field's precision.", "kind": "type_alias", "name": { "name": "FieldDateMath", @@ -51234,7 +51234,7 @@ "kind": "enum", "members": [ { - "description": "Treats missing data as if the bucket does not exist. It will skip the bucket and\ncontinue calculating using the next available value.", + "description": "Treats missing data as if the bucket does not exist. It will skip the bucket and\r\ncontinue calculating using the next available value.", "name": "skip" }, { @@ -51242,7 +51242,7 @@ "name": "insert_zeros" }, { - "description": "Similar to skip, except if the metric provides a non-null, non-NaN value this value is used,\notherwise the empty bucket is skipped.", + "description": "Similar to skip, except if the metric provides a non-null, non-NaN value this value is used,\r\notherwise the empty bucket is skipped.", "name": "keep_values" } ], @@ -51949,7 +51949,7 @@ }, "properties": [ { - "description": "Field containing indexed geo-point values. Must be explicitly\nmapped as a `geo_point` field. If the field contains an array\n`geohex_grid` aggregates all array values.", + "description": "Field containing indexed geo-point values. Must be explicitly\r\nmapped as a `geo_point` field. If the field contains an array\r\n`geohex_grid` aggregates all array values.", "name": "field", "required": true, "type": { @@ -51961,7 +51961,7 @@ } }, { - "description": "Integer zoom of the key used to defined cells or buckets\nin the results. Value should be between 0-15.", + "description": "Integer zoom of the key used to defined cells or buckets\r\nin the results. Value should be between 0-15.", "name": "precision", "required": false, "serverDefault": 6, @@ -52875,7 +52875,7 @@ } }, { - "description": "Length of the network prefix. For IPv4 addresses the accepted range is [0, 32].\nFor IPv6 addresses the accepted range is [0, 128].", + "description": "Length of the network prefix. For IPv4 addresses the accepted range is [0, 32].\r\nFor IPv6 addresses the accepted range is [0, 128].", "name": "prefix_length", "required": true, "type": { @@ -56310,7 +56310,7 @@ }, "properties": [ { - "description": "The metric value. A missing value generally means that there was no data to aggregate,\nunless specified otherwise.", + "description": "The metric value. A missing value generally means that there was no data to aggregate,\r\nunless specified otherwise.", "name": "value", "required": true, "type": { @@ -56572,7 +56572,7 @@ "specLocation": "_types/aggregations/Aggregate.ts#L269-L276" }, { - "description": "Statistics aggregation result. `min`, `max` and `avg` are missing if there were no values to process\n(`count` is zero).", + "description": "Statistics aggregation result. `min`, `max` and `avg` are missing if there were no values to process\r\n(`count` is zero).", "inherits": { "type": { "name": "AggregateBase", @@ -58752,10 +58752,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } } @@ -59449,10 +59458,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } } @@ -61432,10 +61450,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } } @@ -61489,10 +61516,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } } @@ -61765,7 +61801,7 @@ }, { "docId": "analysis-normalizers", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/analysis-normalizers.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/analysis-normalizers.html\r", "kind": "type_alias", "name": { "name": "Normalizer", @@ -61987,10 +62023,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } } @@ -63093,7 +63138,7 @@ "specLocation": "_types/analysis/token_filters.ts#L97-L103" }, { - "description": "Language value, such as _arabic_ or _thai_. Defaults to _english_.\nEach language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words.\nAlso accepts an array of stop words.", + "description": "Language value, such as _arabic_ or _thai_. Defaults to _english_.\r\nEach language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words.\r\nAlso accepts an array of stop words.", "kind": "type_alias", "name": { "name": "StopWords", @@ -64268,10 +64313,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } }, @@ -64441,10 +64495,19 @@ "name": "preserve_original", "required": false, "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } }, @@ -65470,7 +65533,7 @@ "specLocation": "_types/mapping/range.ts#L34-L36" }, { - "esQuirk": "This is a boolean that evolved into an enum. Boolean values should be accepted on reading, and\ntrue and false must be serialized as JSON booleans, or it may break Kibana (see elasticsearch-java#139)", + "esQuirk": "This is a boolean that evolved into an enum. Boolean values should be accepted on reading, and\r\ntrue and false must be serialized as JSON booleans, or it may break Kibana (see elasticsearch-java#139)", "kind": "enum", "members": [ { @@ -66346,9 +66409,9 @@ "specLocation": "_types/mapping/geo.ts#L23-L28" }, { - "description": "The `geo_shape` data type facilitates the indexing of and searching with arbitrary geo shapes such as rectangles\nand polygons.", + "description": "The `geo_shape` data type facilitates the indexing of and searching with arbitrary geo shapes such as rectangles\r\nand polygons.", "docId": "geo-shape", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/geo-shape.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/geo-shape.html\r", "inherits": { "type": { "name": "DocValuesPropertyBase", @@ -67015,7 +67078,7 @@ "specLocation": "_types/mapping/range.ts#L50-L52" }, { - "description": "A variant of text that trades scoring and efficiency of positional queries for space efficiency. This field\neffectively stores data the same way as a text field that only indexes documents (index_options: docs) and\ndisables norms (norms: false). Term queries perform as fast if not faster as on text fields, however queries\nthat need positions such as the match_phrase query perform slower as they need to look at the _source document\nto verify whether a phrase matches. All queries return constant scores that are equal to 1.0.", + "description": "A variant of text that trades scoring and efficiency of positional queries for space efficiency. This field\r\neffectively stores data the same way as a text field that only indexes documents (index_options: docs) and\r\ndisables norms (norms: false). Term queries perform as fast if not faster as on text fields, however queries\r\nthat need positions such as the match_phrase query perform slower as they need to look at the _source document\r\nto verify whether a phrase matches. All queries return constant scores that are equal to 1.0.", "kind": "interface", "name": { "name": "MatchOnlyTextProperty", @@ -67031,7 +67094,7 @@ } }, { - "description": "Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one\nfield for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers.", + "description": "Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one\r\nfield for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers.", "docId": "multi-fields", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/multi-fields.html", "name": "fields", @@ -67058,7 +67121,7 @@ { "description": "Metadata about the field.", "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "meta", "required": false, "type": { @@ -67081,7 +67144,7 @@ } }, { - "description": "Allows you to copy the values of multiple fields into a group\nfield, which can then be queried as a single field.", + "description": "Allows you to copy the values of multiple fields into a group\r\nfield, which can then be queried as a single field.", "name": "copy_to", "required": false, "type": { @@ -67800,7 +67863,7 @@ { "description": "Metadata about the field.", "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "meta", "required": false, "type": { @@ -68045,7 +68108,7 @@ { "description": "A custom format for `date` type runtime fields.", "docId": "mapping-date-format", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-date-format.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-date-format.html\r", "name": "format", "required": false, "type": { @@ -68371,9 +68434,9 @@ "specLocation": "_types/mapping/core.ts#L193-L203" }, { - "description": "The `shape` data type facilitates the indexing of and searching with arbitrary `x, y` cartesian shapes such as\nrectangles and polygons.", + "description": "The `shape` data type facilitates the indexing of and searching with arbitrary `x, y` cartesian shapes such as\r\nrectangles and polygons.", "docId": "shape", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/shape.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/shape.html\r", "inherits": { "type": { "name": "DocValuesPropertyBase", @@ -68589,7 +68652,7 @@ "name": "stored" }, { - "description": "Instead of storing source documents on disk exactly as you send them,\n Elasticsearch can reconstruct source content on the fly upon retrieval.", + "description": "Instead of storing source documents on disk exactly as you send them,\r\n Elasticsearch can reconstruct source content on the fly upon retrieval.", "name": "synthetic" } ], @@ -69113,7 +69176,7 @@ }, { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -70504,7 +70567,7 @@ "specLocation": "_types/query_dsl/compound.ts#L138-L145" }, { - "esQuirk": "this container is valid without a variant. Despite being documented as a function, 'weight'\nis actually a container property that can be combined with a function. Comment in the ES code\n(SearchModule#registerScoreFunctions) says: Weight doesn't have its own parser, so every function\nsupports it out of the box. Can be a single function too when not associated to any other function,\nwhich is why it needs to be registered manually here.", + "esQuirk": "this container is valid without a variant. Despite being documented as a function, 'weight'\r\nis actually a container property that can be combined with a function. Comment in the ES code\r\n(SearchModule#registerScoreFunctions) says: Weight doesn't have its own parser, so every function\r\nsupports it out of the box. Can be a single function too when not associated to any other function,\r\nwhich is why it needs to be registered manually here.", "kind": "interface", "name": { "name": "FunctionScoreContainer", @@ -72110,7 +72173,7 @@ ], "description": "Text that we want similar documents for or a lookup to a document's field for the text.", "docId": "document-input-parameters", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-mlt-query.html#_document_input_parameters", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-mlt-query.html#_document_input_parameters\r", "kind": "type_alias", "name": { "name": "Like", @@ -73831,7 +73894,7 @@ }, { "docId": "query-dsl", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html\r", "kind": "interface", "name": { "name": "QueryContainer", @@ -75569,7 +75632,7 @@ ], "description": "Query flags can be either a single flag or a combination of flags, e.g. `OR|AND|PREFIX`", "docId": "supported-flags", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-simple-query-string-query.html#supported-flags", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-simple-query-string-query.html#supported-flags\r", "kind": "type_alias", "name": { "name": "SimpleQueryStringFlags", @@ -76835,7 +76898,7 @@ } }, { - "description": "Indicates how many reductions of the results have been performed.\nIf this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response.", + "description": "Indicates how many reductions of the results have been performed.\r\nIf this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response.", "name": "num_reduce_phases", "required": false, "type": { @@ -76880,7 +76943,7 @@ } }, { - "description": "Indicates how many shards have run the query.\nNote that in order for shard results to be included in the search response, they need to be reduced first.", + "description": "Indicates how many shards have run the query.\r\nNote that in order for shard results to be included in the search response, they need to be reduced first.", "name": "_shards", "required": true, "type": { @@ -77022,7 +77085,7 @@ } }, { - "description": "When the query is no longer running, this property indicates whether the search failed or was successfully completed on all shards.\nWhile the query is running, `is_partial` is always set to `true`.", + "description": "When the query is no longer running, this property indicates whether the search failed or was successfully completed on all shards.\r\nWhile the query is running, `is_partial` is always set to `true`.", "name": "is_partial", "required": true, "type": { @@ -77034,7 +77097,7 @@ } }, { - "description": "Indicates whether the search is still running or has completed.\nNOTE: If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`.", + "description": "Indicates whether the search is still running or has completed.\r\nNOTE: If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`.", "name": "is_running", "required": true, "type": { @@ -77118,7 +77181,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes an async search by identifier.\nIf the search is still running, the search request will be cancelled.\nOtherwise, the saved search results are deleted.\nIf the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege.", + "description": "Deletes an async search by identifier.\r\nIf the search is still running, the search request will be cancelled.\r\nOtherwise, the saved search results are deleted.\r\nIf the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege.", "inherits": { "type": { "name": "RequestBase", @@ -77172,7 +77235,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves the results of a previously submitted async search request given its identifier.\nIf the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.", + "description": "Retrieves the results of a previously submitted async search request given its identifier.\r\nIf the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.", "inherits": { "type": { "name": "RequestBase", @@ -77200,7 +77263,7 @@ ], "query": [ { - "description": "Specifies how long the async search should be available in the cluster.\nWhen not specified, the `keep_alive` set with the corresponding submit async request will be used.\nOtherwise, it is possible to override the value and extend the validity of the request.\nWhen this period expires, the search, if still running, is cancelled.\nIf the search is completed, its saved results are deleted.", + "description": "Specifies how long the async search should be available in the cluster.\r\nWhen not specified, the `keep_alive` set with the corresponding submit async request will be used.\r\nOtherwise, it is possible to override the value and extend the validity of the request.\r\nWhen this period expires, the search, if still running, is cancelled.\r\nIf the search is completed, its saved results are deleted.", "name": "keep_alive", "required": false, "type": { @@ -77224,7 +77287,7 @@ } }, { - "description": "Specifies to wait for the search to be completed up until the provided timeout.\nFinal results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires.\nBy default no timeout is set meaning that the currently available results will be returned without any additional wait.", + "description": "Specifies to wait for the search to be completed up until the provided timeout.\r\nFinal results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires.\r\nBy default no timeout is set meaning that the currently available results will be returned without any additional wait.", "name": "wait_for_completion_timeout", "required": false, "type": { @@ -77278,7 +77341,7 @@ "body": { "kind": "no_body" }, - "description": "Retreives the status of a previously submitted async search request given its identifier, without retrieving search results.\nIf the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role.", + "description": "Retreives the status of a previously submitted async search request given its identifier, without retrieving search results.\r\nIf the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role.", "inherits": { "type": { "name": "RequestBase", @@ -77351,7 +77414,7 @@ } }, { - "description": "If the async search completed, this field shows the status code of the search.\nFor example, 200 indicates that the async search was successfully completed.\n503 indicates that the async search was completed with an error.", + "description": "If the async search completed, this field shows the status code of the search.\r\nFor example, 200 indicates that the async search was successfully completed.\r\n503 indicates that the async search was completed with an error.", "name": "completion_status", "required": false, "type": { @@ -77441,7 +77504,7 @@ } }, { - "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "description": "Starting document offset. By default, you cannot page through more than 10,000\r\nhits using the from and size parameters. To page through more hits, use the\r\nsearch_after parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -77465,7 +77528,7 @@ } }, { - "description": "Number of hits matching the query to count accurately. If true, the exact\nnumber of hits is returned at the cost of some performance. If false, the\nresponse does not include the total number of hits matching the query.\nDefaults to 10,000 hits.", + "description": "Number of hits matching the query to count accurately. If true, the exact\r\nnumber of hits is returned at the cost of some performance. If false, the\r\nresponse does not include the total number of hits matching the query.\r\nDefaults to 10,000 hits.", "name": "track_total_hits", "required": false, "type": { @@ -77503,7 +77566,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns doc values for field\nnames matching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns doc values for field\r\nnames matching these patterns in the hits.fields property of the response.", "name": "docvalue_fields", "required": false, "type": { @@ -77552,7 +77615,7 @@ } }, { - "description": "Minimum _score for matching documents. Documents with a lower _score are\nnot included in the search results.", + "description": "Minimum _score for matching documents. Documents with a lower _score are\r\nnot included in the search results.", "name": "min_score", "required": false, "type": { @@ -77658,7 +77721,7 @@ } }, { - "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "description": "The number of hits to return. By default, you cannot page through more\r\nthan 10,000 hits using the from and size parameters. To page through more\r\nhits, use the search_after parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -77683,7 +77746,7 @@ }, { "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -77695,7 +77758,7 @@ } }, { - "description": "Indicates which source fields are returned for matching documents. These\nfields are returned in the hits._source property of the search response.", + "description": "Indicates which source fields are returned for matching documents. These\r\nfields are returned in the hits._source property of the search response.", "name": "_source", "required": false, "type": { @@ -77707,7 +77770,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns values for field names\nmatching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns values for field names\r\nmatching these patterns in the hits.fields property of the response.", "name": "fields", "required": false, "type": { @@ -77733,7 +77796,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "description": "Maximum number of documents to collect for each shard. If a query reaches this\r\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\r\nbefore sorting. Defaults to 0, which does not terminate query execution early.", "name": "terminate_after", "required": false, "serverDefault": 0, @@ -77746,7 +77809,7 @@ } }, { - "description": "Specifies the period of time to wait for a response from each shard. If no response\nis received before the timeout expires, the request fails and returns an error.\nDefaults to no timeout.", + "description": "Specifies the period of time to wait for a response from each shard. If no response\r\nis received before the timeout expires, the request fails and returns an error.\r\nDefaults to no timeout.", "name": "timeout", "required": false, "type": { @@ -77784,7 +77847,7 @@ } }, { - "description": "If true, returns sequence number and primary term of the last modification\nof each hit. See Optimistic concurrency control.", + "description": "If true, returns sequence number and primary term of the last modification\r\nof each hit. See Optimistic concurrency control.", "name": "seq_no_primary_term", "required": false, "type": { @@ -77796,7 +77859,7 @@ } }, { - "description": "List of stored fields to return as part of a hit. If no fields are specified,\nno stored fields are included in the response. If this field is specified, the _source\nparameter defaults to false. You can pass _source: true to return both source fields\nand stored fields in the search response.", + "description": "List of stored fields to return as part of a hit. If no fields are specified,\r\nno stored fields are included in the response. If this field is specified, the _source\r\nparameter defaults to false. You can pass _source: true to return both source fields\r\nand stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -77808,7 +77871,7 @@ } }, { - "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\ncannot specify an in the request path.", + "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\r\ncannot specify an in the request path.", "name": "pit", "required": false, "type": { @@ -77820,7 +77883,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request. These fields take\nprecedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request. These fields take\r\nprecedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -77832,7 +77895,7 @@ } }, { - "description": "Stats groups to associate with the search. Each group maintains a statistics\naggregation for its associated searches. You can retrieve these stats using\nthe indices stats API.", + "description": "Stats groups to associate with the search. Each group maintains a statistics\r\naggregation for its associated searches. You can retrieve these stats using\r\nthe indices stats API.", "name": "stats", "required": false, "type": { @@ -77848,7 +77911,7 @@ } ] }, - "description": "Runs a search request asynchronously.\nWhen the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested.\nWarning: Async search does not support scroll nor search requests that only include the suggest section.\nBy default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error.\nThe maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting.", + "description": "Runs a search request asynchronously.\r\nWhen the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field, hence partial results become available following the sort criteria that was requested.\r\nWarning: Async search does not support scroll nor search requests that only include the suggest section.\r\nBy default, Elasticsearch doesn’t allow you to store an async search response larger than 10Mb and an attempt to do this results in an error.\r\nThe maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting.", "inherits": { "type": { "name": "RequestBase", @@ -77876,7 +77939,7 @@ ], "query": [ { - "description": "Blocks and waits until the search is completed up to a certain timeout.\nWhen the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster.", + "description": "Blocks and waits until the search is completed up to a certain timeout.\r\nWhen the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster.", "name": "wait_for_completion_timeout", "required": false, "serverDefault": "1s", @@ -77902,7 +77965,7 @@ } }, { - "description": "Specifies how long the async search needs to be available.\nOngoing async searches and any saved search results are deleted after this period.", + "description": "Specifies how long the async search needs to be available.\r\nOngoing async searches and any saved search results are deleted after this period.", "name": "keep_alive", "required": false, "serverDefault": "5d", @@ -77963,7 +78026,7 @@ } }, { - "description": "Affects how often partial results become available, which happens whenever shard results are reduced.\nA partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default).", + "description": "Affects how often partial results become available, which happens whenever shard results are reduced.\r\nA partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default).", "name": "batched_reduce_size", "required": false, "serverDefault": 5, @@ -78955,7 +79018,7 @@ "aliases": [ "ae" ], - "description": "For open anomaly detection jobs only, contains messages relating to the\nselection of a node to run the job.", + "description": "For open anomaly detection jobs only, contains messages relating to the\r\nselection of a node to run the job.", "name": "assignment_explanation" }, { @@ -78979,7 +79042,7 @@ "bteah", "bucketsTimeExpAvgHour" ], - "description": "Exponentially-weighted moving average of bucket processing times calculated\nin a 1 hour time window, in milliseconds.", + "description": "Exponentially-weighted moving average of bucket processing times calculated\r\nin a 1 hour time window, in milliseconds.", "name": "buckets.time.exp_avg_hour" }, { @@ -79043,7 +79106,7 @@ "dif", "dataInputFields" ], - "description": "The total number of fields in input documents posted to the anomaly\ndetection job. This count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the\nrequired fields from the documents it retrieves before posting them to the job.", + "description": "The total number of fields in input documents posted to the anomaly\r\ndetection job. This count includes fields that are not used in the analysis.\r\nHowever, be aware that if you are using a datafeed, it extracts only the\r\nrequired fields from the documents it retrieves before posting them to the job.", "name": "data.input_fields" }, { @@ -79059,7 +79122,7 @@ "did", "dataInvalidDates" ], - "description": "The number of input documents with either a missing date field or a date\nthat could not be parsed.", + "description": "The number of input documents with either a missing date field or a date\r\nthat could not be parsed.", "name": "data.invalid_dates" }, { @@ -79099,7 +79162,7 @@ "dmf", "dataMissingFields" ], - "description": "The number of input documents that are missing a field that the anomaly\ndetection job is configured to analyze. Input documents with missing fields\nare still processed because it is possible that not all fields are missing.", + "description": "The number of input documents that are missing a field that the anomaly\r\ndetection job is configured to analyze. Input documents with missing fields\r\nare still processed because it is possible that not all fields are missing.", "name": "data.missing_fields" }, { @@ -79107,7 +79170,7 @@ "doot", "dataOutOfOrderTimestamps" ], - "description": "The number of input documents that have a timestamp chronologically\npreceding the start of the current anomaly detection bucket offset by the\nlatency window. This information is applicable only when you provide data\nto the anomaly detection job by using the post data API. These out of order\ndocuments are discarded, since jobs require time series data to be in\nascending chronological order.", + "description": "The number of input documents that have a timestamp chronologically\r\npreceding the start of the current anomaly detection bucket offset by the\r\nlatency window. This information is applicable only when you provide data\r\nto the anomaly detection job by using the post data API. These out of order\r\ndocuments are discarded, since jobs require time series data to be in\r\nascending chronological order.", "name": "data.out_of_order_timestamps" }, { @@ -79115,7 +79178,7 @@ "dpf", "dataProcessedFields" ], - "description": "The total number of fields in all the documents that have been processed by\nthe anomaly detection job. Only fields that are specified in the detector\nconfiguration object contribute to this count. The timestamp is not\nincluded in this count.", + "description": "The total number of fields in all the documents that have been processed by\r\nthe anomaly detection job. Only fields that are specified in the detector\r\nconfiguration object contribute to this count. The timestamp is not\r\nincluded in this count.", "name": "data.processed_fields" }, { @@ -79123,7 +79186,7 @@ "dpr", "dataProcessedRecords" ], - "description": "The number of input documents that have been processed by the anomaly\ndetection job. This value includes documents with missing fields, since\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\nin your search query, the processed record count is the number of\naggregation results processed, not the number of Elasticsearch documents.", + "description": "The number of input documents that have been processed by the anomaly\r\ndetection job. This value includes documents with missing fields, since\r\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\r\nin your search query, the processed record count is the number of\r\naggregation results processed, not the number of Elasticsearch documents.", "name": "data.processed_records" }, { @@ -79131,7 +79194,7 @@ "dsb", "dataSparseBuckets" ], - "description": "The number of buckets that contained few data points compared to the\nexpected number of data points.", + "description": "The number of buckets that contained few data points compared to the\r\nexpected number of data points.", "name": "data.sparse_buckets" }, { @@ -79139,7 +79202,7 @@ "fmavg", "forecastsMemoryAvg" ], - "description": "The average memory usage in bytes for forecasts related to the anomaly\ndetection job.", + "description": "The average memory usage in bytes for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.memory.avg" }, { @@ -79147,7 +79210,7 @@ "fmmax", "forecastsMemoryMax" ], - "description": "The maximum memory usage in bytes for forecasts related to the anomaly\ndetection job.", + "description": "The maximum memory usage in bytes for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.memory.max" }, { @@ -79155,7 +79218,7 @@ "fmmin", "forecastsMemoryMin" ], - "description": "The minimum memory usage in bytes for forecasts related to the anomaly\ndetection job.", + "description": "The minimum memory usage in bytes for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.memory.min" }, { @@ -79163,7 +79226,7 @@ "fmt", "forecastsMemoryTotal" ], - "description": "The total memory usage in bytes for forecasts related to the anomaly\ndetection job.", + "description": "The total memory usage in bytes for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.memory.total" }, { @@ -79171,7 +79234,7 @@ "fravg", "forecastsRecordsAvg" ], - "description": "The average number of `m`odel_forecast` documents written for forecasts\nrelated to the anomaly detection job.", + "description": "The average number of `m`odel_forecast` documents written for forecasts\r\nrelated to the anomaly detection job.", "name": "forecasts.records.avg" }, { @@ -79179,7 +79242,7 @@ "frmax", "forecastsRecordsMax" ], - "description": "The maximum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.", + "description": "The maximum number of `model_forecast` documents written for forecasts\r\nrelated to the anomaly detection job.", "name": "forecasts.records.max" }, { @@ -79187,7 +79250,7 @@ "frmin", "forecastsRecordsMin" ], - "description": "The minimum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.", + "description": "The minimum number of `model_forecast` documents written for forecasts\r\nrelated to the anomaly detection job.", "name": "forecasts.records.min" }, { @@ -79195,7 +79258,7 @@ "frt", "forecastsRecordsTotal" ], - "description": "The total number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.", + "description": "The total number of `model_forecast` documents written for forecasts\r\nrelated to the anomaly detection job.", "name": "forecasts.records.total" }, { @@ -79203,7 +79266,7 @@ "ftavg", "forecastsTimeAvg" ], - "description": "The average runtime in milliseconds for forecasts related to the anomaly\ndetection job.", + "description": "The average runtime in milliseconds for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.time.avg" }, { @@ -79211,7 +79274,7 @@ "ftmax", "forecastsTimeMax" ], - "description": "The maximum runtime in milliseconds for forecasts related to the anomaly\ndetection job.", + "description": "The maximum runtime in milliseconds for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.time.max" }, { @@ -79219,7 +79282,7 @@ "ftmin", "forecastsTimeMin" ], - "description": "The minimum runtime in milliseconds for forecasts related to the anomaly\ndetection job.", + "description": "The minimum runtime in milliseconds for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.time.min" }, { @@ -79227,7 +79290,7 @@ "ftt", "forecastsTimeTotal" ], - "description": "The total runtime in milliseconds for forecasts related to the anomaly\ndetection job.", + "description": "The total runtime in milliseconds for forecasts related to the anomaly\r\ndetection job.", "name": "forecasts.time.total" }, { @@ -79247,7 +79310,7 @@ "mbaf", "modelBucketAllocationFailures" ], - "description": "The number of buckets for which new entities in incoming data were not\nprocessed due to insufficient model memory.", + "description": "The number of buckets for which new entities in incoming data were not\r\nprocessed due to insufficient model memory.", "name": "model.bucket_allocation_failures" }, { @@ -79255,7 +79318,7 @@ "mbf", "modelByFields" ], - "description": "The number of by field values that were analyzed by the models. This value\nis cumulative for all detectors in the job.", + "description": "The number of by field values that were analyzed by the models. This value\r\nis cumulative for all detectors in the job.", "name": "model.by_fields" }, { @@ -79263,7 +79326,7 @@ "mb", "modelBytes" ], - "description": "The number of bytes of memory used by the models. This is the maximum value\nsince the last time the model was persisted. If the job is closed, this\nvalue indicates the latest size.", + "description": "The number of bytes of memory used by the models. This is the maximum value\r\nsince the last time the model was persisted. If the job is closed, this\r\nvalue indicates the latest size.", "name": "model.bytes" }, { @@ -79271,7 +79334,7 @@ "mbe", "modelBytesExceeded" ], - "description": "The number of bytes over the high limit for memory usage at the last\nallocation failure.", + "description": "The number of bytes over the high limit for memory usage at the last\r\nallocation failure.", "name": "model.bytes_exceeded" }, { @@ -79279,7 +79342,7 @@ "mcs", "modelCategorizationStatus" ], - "description": "The status of categorization for the job: `ok` or `warn`. If `ok`,\ncategorization is performing acceptably well (or not being used at all). If\n`warn`, categorization is detecting a distribution of categories that\nsuggests the input data is inappropriate for categorization. Problems could\nbe that there is only one category, more than 90% of categories are rare,\nthe number of categories is greater than 50% of the number of categorized\ndocuments, there are no frequently matched categories, or more than 50% of\ncategories are dead.", + "description": "The status of categorization for the job: `ok` or `warn`. If `ok`,\r\ncategorization is performing acceptably well (or not being used at all). If\r\n`warn`, categorization is detecting a distribution of categories that\r\nsuggests the input data is inappropriate for categorization. Problems could\r\nbe that there is only one category, more than 90% of categories are rare,\r\nthe number of categories is greater than 50% of the number of categorized\r\ndocuments, there are no frequently matched categories, or more than 50% of\r\ncategories are dead.", "name": "model.categorization_status" }, { @@ -79295,7 +79358,7 @@ "mdcc", "modelDeadCategoryCount" ], - "description": "The number of categories created by categorization that will never be\nassigned again because another category’s definition makes it a superset of\nthe dead category. Dead categories are a side effect of the way\ncategorization has no prior training.", + "description": "The number of categories created by categorization that will never be\r\nassigned again because another category’s definition makes it a superset of\r\nthe dead category. Dead categories are a side effect of the way\r\ncategorization has no prior training.", "name": "model.dead_category_count" }, { @@ -79303,7 +79366,7 @@ "mdcc", "modelFailedCategoryCount" ], - "description": "The number of times that categorization wanted to create a new category but\ncouldn’t because the job had hit its model memory limit. This count does\nnot track which specific categories failed to be created. Therefore, you\ncannot use this value to determine the number of unique categories that\nwere missed.", + "description": "The number of times that categorization wanted to create a new category but\r\ncouldn’t because the job had hit its model memory limit. This count does\r\nnot track which specific categories failed to be created. Therefore, you\r\ncannot use this value to determine the number of unique categories that\r\nwere missed.", "name": "model.failed_category_count" }, { @@ -79335,7 +79398,7 @@ "mms", "modelMemoryStatus" ], - "description": "The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\nmodels used more than 60% of the configured memory limit and older unused\nmodels will be pruned to free up space. Additionally, in categorization jobs\nno further category examples will be stored. If `hard_limit`, the models\nused more space than the configured memory limit. As a result, not all\nincoming data was processed.", + "description": "The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\r\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\r\nmodels used more than 60% of the configured memory limit and older unused\r\nmodels will be pruned to free up space. Additionally, in categorization jobs\r\nno further category examples will be stored. If `hard_limit`, the models\r\nused more space than the configured memory limit. As a result, not all\r\nincoming data was processed.", "name": "model.memory_status" }, { @@ -79343,7 +79406,7 @@ "mof", "modelOverFields" ], - "description": "The number of over field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.", + "description": "The number of over field values that were analyzed by the models. This\r\nvalue is cumulative for all detectors in the job.", "name": "model.over_fields" }, { @@ -79351,7 +79414,7 @@ "mpf", "modelPartitionFields" ], - "description": "The number of partition field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.", + "description": "The number of partition field values that were analyzed by the models. This\r\nvalue is cumulative for all detectors in the job.", "name": "model.partition_fields" }, { @@ -79383,7 +79446,7 @@ "na", "nodeAddress" ], - "description": "The network address of the node that runs the job. This information is\navailable only for open jobs.", + "description": "The network address of the node that runs the job. This information is\r\navailable only for open jobs.", "name": "node.address" }, { @@ -79391,7 +79454,7 @@ "ne", "nodeEphemeralId" ], - "description": "The ephemeral ID of the node that runs the job. This information is\navailable only for open jobs.", + "description": "The ephemeral ID of the node that runs the job. This information is\r\navailable only for open jobs.", "name": "node.ephemeral_id" }, { @@ -79399,7 +79462,7 @@ "ni", "nodeId" ], - "description": "The unique identifier of the node that runs the job. This information is\navailable only for open jobs.", + "description": "The unique identifier of the node that runs the job. This information is\r\navailable only for open jobs.", "name": "node.id" }, { @@ -79407,7 +79470,7 @@ "nn", "nodeName" ], - "description": "The name of the node that runs the job. This information is available only\nfor open jobs.", + "description": "The name of the node that runs the job. This information is available only\r\nfor open jobs.", "name": "node.name" }, { @@ -79421,7 +79484,7 @@ "aliases": [ "s" ], - "description": "The status of the anomaly detection job: `closed`, `closing`, `failed`,\n`opened`, or `opening`. If `closed`, the job finished successfully with its\nmodel state persisted. The job must be opened before it can accept further\ndata. If `closing`, the job close action is in progress and has not yet\ncompleted. A closing job cannot accept further data. If `failed`, the job\ndid not finish successfully due to an error. This situation can occur due\nto invalid input data, a fatal error occurring during the analysis, or an\nexternal interaction such as the process being killed by the Linux out of\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\nclosed and then deleted. If the datafeed can be corrected, the job can be\nclosed and then re-opened. If `opened`, the job is available to receive and\nprocess data. If `opening`, the job open action is in progress and has not\nyet completed.", + "description": "The status of the anomaly detection job: `closed`, `closing`, `failed`,\r\n`opened`, or `opening`. If `closed`, the job finished successfully with its\r\nmodel state persisted. The job must be opened before it can accept further\r\ndata. If `closing`, the job close action is in progress and has not yet\r\ncompleted. A closing job cannot accept further data. If `failed`, the job\r\ndid not finish successfully due to an error. This situation can occur due\r\nto invalid input data, a fatal error occurring during the analysis, or an\r\nexternal interaction such as the process being killed by the Linux out of\r\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\r\nclosed and then deleted. If the datafeed can be corrected, the job can be\r\nclosed and then re-opened. If `opened`, the job is available to receive and\r\nprocess data. If `opening`, the job open action is in progress and has not\r\nyet completed.", "name": "state" } ], @@ -79468,7 +79531,7 @@ "aliases": [ "assignment_explanation" ], - "description": "For started datafeeds only, contains messages relating to the selection of\na node.", + "description": "For started datafeeds only, contains messages relating to the selection of\r\na node.", "name": "ae" }, { @@ -79488,7 +79551,7 @@ "node.address", "nodeAddress" ], - "description": "For started datafeeds only, the network address of the node where the\ndatafeed is started.", + "description": "For started datafeeds only, the network address of the node where the\r\ndatafeed is started.", "name": "na" }, { @@ -79496,7 +79559,7 @@ "node.ephemeral_id", "nodeEphemeralId" ], - "description": "For started datafeeds only, the ephemeral ID of the node where the\ndatafeed is started.", + "description": "For started datafeeds only, the ephemeral ID of the node where the\r\ndatafeed is started.", "name": "ne" }, { @@ -79504,7 +79567,7 @@ "node.id", "nodeId" ], - "description": "For started datafeeds only, the unique identifier of the node where the\ndatafeed is started.", + "description": "For started datafeeds only, the unique identifier of the node where the\r\ndatafeed is started.", "name": "ni" }, { @@ -79512,7 +79575,7 @@ "node.name", "nodeName" ], - "description": "For started datafeeds only, the name of the node where the datafeed is\nstarted.", + "description": "For started datafeeds only, the name of the node where the datafeed is\r\nstarted.", "name": "nn" }, { @@ -79551,7 +79614,7 @@ "aliases": [ "state" ], - "description": "The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\nIf `starting`, the datafeed has been requested to start but has not yet\nstarted. If `started`, the datafeed is actively receiving data. If\n`stopping`, the datafeed has been requested to stop gracefully and is\ncompleting its final action. If `stopped`, the datafeed is stopped and will\nnot receive data until it is re-started.", + "description": "The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\r\nIf `starting`, the datafeed has been requested to start but has not yet\r\nstarted. If `started`, the datafeed is actively receiving data. If\r\n`stopping`, the datafeed has been requested to stop gracefully and is\r\ncompleting its final action. If `stopped`, the datafeed is stopped and will\r\nnot receive data until it is re-started.", "name": "s" } ], @@ -79641,7 +79704,7 @@ "mml", "modelMemoryLimit" ], - "description": "The approximate maximum amount of memory resources that are permitted for\nthe data frame analytics job.", + "description": "The approximate maximum amount of memory resources that are permitted for\r\nthe data frame analytics job.", "name": "model_memory_limit" }, { @@ -79649,7 +79712,7 @@ "na", "nodeAddress" ], - "description": "The network address of the node that the data frame analytics job is\nassigned to.", + "description": "The network address of the node that the data frame analytics job is\r\nassigned to.", "name": "node.address" }, { @@ -79657,7 +79720,7 @@ "ne", "nodeEphemeralId" ], - "description": "The ephemeral ID of the node that the data frame analytics job is assigned\nto.", + "description": "The ephemeral ID of the node that the data frame analytics job is assigned\r\nto.", "name": "node.ephemeral_id" }, { @@ -79665,7 +79728,7 @@ "ni", "nodeId" ], - "description": "The unique identifier of the node that the data frame analytics job is\nassigned to.", + "description": "The unique identifier of the node that the data frame analytics job is\r\nassigned to.", "name": "node.id" }, { @@ -79709,7 +79772,7 @@ "aliases": [ "v" ], - "description": "The Elasticsearch version number in which the data frame analytics job was\ncreated.", + "description": "The Elasticsearch version number in which the data frame analytics job was\r\ncreated.", "name": "version" } ], @@ -79800,7 +79863,7 @@ "dataFrameAnalytics", "dfid" ], - "description": "Identifier for the data frame analytics job that created the model. Only\ndisplayed if it is still available.", + "description": "Identifier for the data frame analytics job that created the model. Only\r\ndisplayed if it is still available.", "name": "data_frame_analytics_id" }, { @@ -79835,7 +79898,7 @@ "icurr", "ingestCurrent" ], - "description": "The total number of document that are currently being handled by the\ntrained model.", + "description": "The total number of document that are currently being handled by the\r\ntrained model.", "name": "ingest.current" }, { @@ -79851,7 +79914,7 @@ "ip", "ingestPipelines" ], - "description": "The total number of ingest pipelines that are referencing the trained\nmodel.", + "description": "The total number of ingest pipelines that are referencing the trained\r\nmodel.", "name": "ingest.pipelines" }, { @@ -79874,7 +79937,7 @@ "o", "modelOperations" ], - "description": "The estimated number of operations to use the trained model. This number\nhelps measuring the computational complexity of the model.", + "description": "The estimated number of operations to use the trained model. This number\r\nhelps measuring the computational complexity of the model.", "name": "operations" }, { @@ -79943,7 +80006,7 @@ "cdtea", "checkpointTimeExpAvg" ], - "description": "Exponential moving average of the duration of the checkpoint, in\nmilliseconds.", + "description": "Exponential moving average of the duration of the checkpoint, in\r\nmilliseconds.", "name": "checkpoint_duration_time_exp_avg" }, { @@ -79981,42 +80044,42 @@ "di", "destIndex" ], - "description": "The destination index for the transform. The mappings of the destination\nindex are deduced based on the source fields when possible. If alternate\nmappings are required, use the Create index API prior to starting the\ntransform.", + "description": "The destination index for the transform. The mappings of the destination\r\nindex are deduced based on the source fields when possible. If alternate\r\nmappings are required, use the Create index API prior to starting the\r\ntransform.", "name": "dest_index" }, { "aliases": [ "docd" ], - "description": "The number of documents that have been deleted from the destination index\ndue to the retention policy for this transform.", + "description": "The number of documents that have been deleted from the destination index\r\ndue to the retention policy for this transform.", "name": "documents_deleted" }, { "aliases": [ "doci" ], - "description": "The number of documents that have been indexed into the destination index\nfor the transform.", + "description": "The number of documents that have been indexed into the destination index\r\nfor the transform.", "name": "documents_indexed" }, { "aliases": [ "dps" ], - "description": "Specifies a limit on the number of input documents per second. This setting\nthrottles the transform by adding a wait time between search requests. The\ndefault value is `null`, which disables throttling.", + "description": "Specifies a limit on the number of input documents per second. This setting\r\nthrottles the transform by adding a wait time between search requests. The\r\ndefault value is `null`, which disables throttling.", "name": "docs_per_second" }, { "aliases": [ "docp" ], - "description": "The number of documents that have been processed from the source index of\nthe transform.", + "description": "The number of documents that have been processed from the source index of\r\nthe transform.", "name": "documents_processed" }, { "aliases": [ "f" ], - "description": "The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\nvalue is `1m`.", + "description": "The interval between checks for changes in the source indices when the\r\ntransform is running continuously. Also determines the retry interval in\r\nthe event of transient failures while the transform is searching or\r\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\r\nvalue is `1m`.", "name": "frequency" }, { @@ -80048,7 +80111,7 @@ "aliases": [ "idea" ], - "description": "Exponential moving average of the number of new documents that have been\nindexed.", + "description": "Exponential moving average of the number of new documents that have been\r\nindexed.", "name": "indexed_documents_exp_avg" }, { @@ -80056,21 +80119,21 @@ "lst", "lastSearchTime" ], - "description": "The timestamp of the last search in the source indices. This field is only\nshown if the transform is running.", + "description": "The timestamp of the last search in the source indices. This field is only\r\nshown if the transform is running.", "name": "last_search_time" }, { "aliases": [ "mpsz" ], - "description": "Defines the initial page size to use for the composite aggregation for each\ncheckpoint. If circuit breaker exceptions occur, the page size is\ndynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`. The default value is `500`.", + "description": "Defines the initial page size to use for the composite aggregation for each\r\ncheckpoint. If circuit breaker exceptions occur, the page size is\r\ndynamically adjusted to a lower value. The minimum value is `10` and the\r\nmaximum is `65,536`. The default value is `500`.", "name": "max_page_search_size" }, { "aliases": [ "pp" ], - "description": "The number of search or bulk index operations processed. Documents are\nprocessed in batches instead of individually.", + "description": "The number of search or bulk index operations processed. Documents are\r\nprocessed in batches instead of individually.", "name": "pages_processed" }, { @@ -80084,7 +80147,7 @@ "aliases": [ "pdea" ], - "description": "Exponential moving average of the number of documents that have been\nprocessed.", + "description": "Exponential moving average of the number of documents that have been\r\nprocessed.", "name": "processed_documents_exp_avg" }, { @@ -80098,7 +80161,7 @@ "aliases": [ "r" ], - "description": "If a transform has a `failed` state, this property provides details about\nthe reason for the failure.", + "description": "If a transform has a `failed` state, this property provides details about\r\nthe reason for the failure.", "name": "reason" }, { @@ -80127,14 +80190,14 @@ "si", "sourceIndex" ], - "description": "The source indices for the transform. It can be a single index, an index\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\nclusters then the master node and at least one transform node must have the\n`remote_cluster_client` node role.", + "description": "The source indices for the transform. It can be a single index, an index\r\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\r\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\r\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\r\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\r\nclusters then the master node and at least one transform node must have the\r\n`remote_cluster_client` node role.", "name": "source_index" }, { "aliases": [ "s" ], - "description": "The status of the transform, which can be one of the following values:\n\n* `aborting`: The transform is aborting.\n* `failed`: The transform failed. For more information about the failure,\ncheck the reason field.\n* `indexing`: The transform is actively processing data and creating new\ndocuments.\n* `started`: The transform is running but not actively indexing data.\n* `stopped`: The transform is stopped.\n* `stopping`: The transform is stopping.", + "description": "The status of the transform, which can be one of the following values:\r\n\r\n* `aborting`: The transform is aborting.\r\n* `failed`: The transform failed. For more information about the failure,\r\ncheck the reason field.\r\n* `indexing`: The transform is actively processing data and creating new\r\ndocuments.\r\n* `started`: The transform is running but not actively indexing data.\r\n* `stopped`: The transform is stopped.\r\n* `stopping`: The transform is stopping.", "name": "state" }, { @@ -80148,14 +80211,14 @@ "aliases": [ "tc" ], - "description": "The number of times the transform has been triggered by the scheduler. For\nexample, the scheduler triggers the transform indexer to check for updates\nor ingest new data at an interval specified in the `frequency` property.", + "description": "The number of times the transform has been triggered by the scheduler. For\r\nexample, the scheduler triggers the transform indexer to check for updates\r\nor ingest new data at an interval specified in the `frequency` property.", "name": "trigger_count" }, { "aliases": [ "v" ], - "description": "The version of Elasticsearch that existed on the node when the transform\nwas created.", + "description": "The version of Elasticsearch that existed on the node when the transform\r\nwas created.", "name": "version" } ], @@ -80308,7 +80371,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Retrieves the cluster’s index aliases, including filter and routing information.\r\nThe API does not return data stream aliases.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "inherits": { "type": { "name": "CatRequestBase", @@ -80398,7 +80461,7 @@ "di", "diskIndices" ], - "description": "Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards.\nIMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index.", + "description": "Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards.\r\nIMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index.", "name": "disk.indices", "required": false, "type": { @@ -80426,7 +80489,7 @@ "du", "diskUsed" ], - "description": "Total disk space in use.\nElasticsearch retrieves this metric from the node’s operating system (OS).\nThe metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node.\nUnlike `disk.indices`, this metric does not double-count disk space for hard-linked files.", + "description": "Total disk space in use.\r\nElasticsearch retrieves this metric from the node’s operating system (OS).\r\nThe metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node.\r\nUnlike `disk.indices`, this metric does not double-count disk space for hard-linked files.", "name": "disk.used", "required": false, "type": { @@ -80454,7 +80517,7 @@ "da", "diskAvail" ], - "description": "Free disk space available to Elasticsearch.\nElasticsearch retrieves this metric from the node’s operating system.\nDisk-based shard allocation uses this metric to assign shards to nodes based on available disk space.", + "description": "Free disk space available to Elasticsearch.\r\nElasticsearch retrieves this metric from the node’s operating system.\r\nDisk-based shard allocation uses this metric to assign shards to nodes based on available disk space.", "name": "disk.avail", "required": false, "type": { @@ -80610,7 +80673,7 @@ "body": { "kind": "no_body" }, - "description": "Provides a snapshot of the number of shards allocated to each data node and their disk space.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Provides a snapshot of the number of shards allocated to each data node and their disk space.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "inherits": { "type": { "name": "CatRequestBase", @@ -80768,7 +80831,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Returns information about component templates in a cluster.\r\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\r\nThey are not intended for use by applications. For application consumption, use the get component template API.", "inherits": { "type": { "name": "CatRequestBase", @@ -80904,7 +80967,7 @@ "body": { "kind": "no_body" }, - "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\nNOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\r\nNOTE: The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\r\nThey are not intended for use by applications. For application consumption, use the count API.", "inherits": { "type": { "name": "CatRequestBase", @@ -80918,7 +80981,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\r\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -81053,7 +81116,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "description": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\r\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", "inherits": { "type": { "name": "CatRequestBase", @@ -81067,7 +81130,7 @@ }, "path": [ { - "description": "Comma-separated list of fields used to limit returned information.\nTo retrieve all fields, omit this parameter.", + "description": "Comma-separated list of fields used to limit returned information.\r\nTo retrieve all fields, omit this parameter.", "name": "fields", "required": false, "type": { @@ -81392,7 +81455,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the health status of a cluster, similar to the cluster health API.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", + "description": "Returns the health status of a cluster, similar to the cluster health API.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\r\nThey are not intended for use by applications. For application consumption, use the cluster health API.\r\nThis API is often used to check malfunctioning clusters.\r\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\r\n`HH:MM:SS`, which is human-readable but includes no date information;\r\n`Unix epoch time`, which is machine-sortable and includes date information.\r\nThe latter format is useful for cluster recoveries that take multiple days.\r\nYou can use the cat health API to verify cluster health across multiple nodes.\r\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", "inherits": { "type": { "name": "CatRequestBase", @@ -83577,7 +83640,7 @@ "body": { "kind": "no_body" }, - "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get index API.\nUse the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas.\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.", + "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\r\nThey are not intended for use by applications. For application consumption, use the get index API.\r\nUse the cat indices API to get the following information for each index in a cluster: shard count; document count; deleted document count; primary store size; total store size of all shards, including shard replicas.\r\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\r\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.", "inherits": { "type": { "name": "CatRequestBase", @@ -83591,7 +83654,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\r\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -83774,7 +83837,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the master node, including the ID, bound IP address, and name.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Returns information about the master node, including the ID, bound IP address, and name.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -84076,7 +84139,7 @@ "body": { "kind": "no_body" }, - "description": "Returns configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Returns configuration and usage information about data frame analytics jobs.\r\n\r\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\r\nconsole or command line. They are not intended for use by applications. For\r\napplication consumption, use the get data frame analytics jobs statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -84141,7 +84204,7 @@ } }, { - "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.", + "description": "Comma-separated list of column names or column aliases used to sort the\r\nresponse.", "name": "s", "required": false, "type": { @@ -84322,7 +84385,7 @@ "ni", "nodeId" ], - "description": "The unique identifier of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", + "description": "The unique identifier of the assigned node.\r\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.id", "required": false, "type": { @@ -84338,7 +84401,7 @@ "nn", "nodeName" ], - "description": "The name of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", + "description": "The name of the assigned node.\r\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.name", "required": false, "type": { @@ -84354,7 +84417,7 @@ "ne", "nodeEphemeralId" ], - "description": "The ephemeral identifier of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", + "description": "The ephemeral identifier of the assigned node.\r\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.ephemeral_id", "required": false, "type": { @@ -84370,7 +84433,7 @@ "na", "nodeAddress" ], - "description": "The network address of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", + "description": "The network address of the assigned node.\r\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.address", "required": false, "type": { @@ -84392,7 +84455,7 @@ "body": { "kind": "no_body" }, - "description": "Returns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Returns configuration and usage information about datafeeds.\r\nThis API returns a maximum of 10,000 datafeeds.\r\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\r\ncluster privileges to use this API.\r\n\r\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\r\nconsole or command line. They are not intended for use by applications. For\r\napplication consumption, use the get datafeed statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -84420,7 +84483,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n* Contains wildcard expressions and there are no datafeeds that match.\n* Contains the `_all` string or no identifiers and there are no matches.\n* Contains wildcard expressions and there are only partial matches.\n\nIf `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when\nthere are partial matches. If `false`, the API returns a 404 status code when there are no matches or only\npartial matches.", + "description": "Specifies what to do when the request:\r\n\r\n* Contains wildcard expressions and there are no datafeeds that match.\r\n* Contains the `_all` string or no identifiers and there are no matches.\r\n* Contains wildcard expressions and there are only partial matches.\r\n\r\nIf `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when\r\nthere are partial matches. If `false`, the API returns a 404 status code when there are no matches or only\r\npartial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -84562,7 +84625,7 @@ "dpr", "dataProcessedRecords" ], - "description": "The number of input documents that have been processed by the anomaly detection job.\nThis value includes documents with missing fields, since they are nonetheless analyzed.\nIf you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents.", + "description": "The number of input documents that have been processed by the anomaly detection job.\r\nThis value includes documents with missing fields, since they are nonetheless analyzed.\r\nIf you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents.", "name": "data.processed_records", "required": false, "type": { @@ -84578,7 +84641,7 @@ "dpf", "dataProcessedFields" ], - "description": "The total number of fields in all the documents that have been processed by the anomaly detection job.\nOnly fields that are specified in the detector configuration object contribute to this count.\nThe timestamp is not included in this count.", + "description": "The total number of fields in all the documents that have been processed by the anomaly detection job.\r\nOnly fields that are specified in the detector configuration object contribute to this count.\r\nThe timestamp is not included in this count.", "name": "data.processed_fields", "required": false, "type": { @@ -84626,7 +84689,7 @@ "dif", "dataInputFields" ], - "description": "The total number of fields in input documents posted to the anomaly detection job.\nThis count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job.", + "description": "The total number of fields in input documents posted to the anomaly detection job.\r\nThis count includes fields that are not used in the analysis.\r\nHowever, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job.", "name": "data.input_fields", "required": false, "type": { @@ -84658,7 +84721,7 @@ "dmf", "dataMissingFields" ], - "description": "The number of input documents that are missing a field that the anomaly detection job is configured to analyze.\nInput documents with missing fields are still processed because it is possible that not all fields are missing.\nIf you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues.\nIt is not necessarily a cause for concern.", + "description": "The number of input documents that are missing a field that the anomaly detection job is configured to analyze.\r\nInput documents with missing fields are still processed because it is possible that not all fields are missing.\r\nIf you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues.\r\nIt is not necessarily a cause for concern.", "name": "data.missing_fields", "required": false, "type": { @@ -84674,7 +84737,7 @@ "doot", "dataOutOfOrderTimestamps" ], - "description": "The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window.\nThis information is applicable only when you provide data to the anomaly detection job by using the post data API.\nThese out of order documents are discarded, since jobs require time series data to be in ascending chronological order.", + "description": "The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window.\r\nThis information is applicable only when you provide data to the anomaly detection job by using the post data API.\r\nThese out of order documents are discarded, since jobs require time series data to be in ascending chronological order.", "name": "data.out_of_order_timestamps", "required": false, "type": { @@ -84690,7 +84753,7 @@ "deb", "dataEmptyBuckets" ], - "description": "The number of buckets which did not contain any data.\nIf your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`.", + "description": "The number of buckets which did not contain any data.\r\nIf your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`.", "name": "data.empty_buckets", "required": false, "type": { @@ -84706,7 +84769,7 @@ "dsb", "dataSparseBuckets" ], - "description": "The number of buckets that contained few data points compared to the expected number of data points.\nIf your data contains many sparse buckets, consider using a longer `bucket_span`.", + "description": "The number of buckets that contained few data points compared to the expected number of data points.\r\nIf your data contains many sparse buckets, consider using a longer `bucket_span`.", "name": "data.sparse_buckets", "required": false, "type": { @@ -84818,7 +84881,7 @@ "mb", "modelBytes" ], - "description": "The number of bytes of memory used by the models.\nThis is the maximum value since the last time the model was persisted.\nIf the job is closed, this value indicates the latest size.", + "description": "The number of bytes of memory used by the models.\r\nThis is the maximum value since the last time the model was persisted.\r\nIf the job is closed, this value indicates the latest size.", "name": "model.bytes", "required": false, "type": { @@ -84882,7 +84945,7 @@ "mbf", "modelByFields" ], - "description": "The number of `by` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", + "description": "The number of `by` field values that were analyzed by the models.\r\nThis value is cumulative for all detectors in the job.", "name": "model.by_fields", "required": false, "type": { @@ -84898,7 +84961,7 @@ "mof", "modelOverFields" ], - "description": "The number of `over` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", + "description": "The number of `over` field values that were analyzed by the models.\r\nThis value is cumulative for all detectors in the job.", "name": "model.over_fields", "required": false, "type": { @@ -84914,7 +84977,7 @@ "mpf", "modelPartitionFields" ], - "description": "The number of `partition` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", + "description": "The number of `partition` field values that were analyzed by the models.\r\nThis value is cumulative for all detectors in the job.", "name": "model.partition_fields", "required": false, "type": { @@ -84930,7 +84993,7 @@ "mbaf", "modelBucketAllocationFailures" ], - "description": "The number of buckets for which new entities in incoming data were not processed due to insufficient model memory.\nThis situation is also signified by a `hard_limit: memory_status` property value.", + "description": "The number of buckets for which new entities in incoming data were not processed due to insufficient model memory.\r\nThis situation is also signified by a `hard_limit: memory_status` property value.", "name": "model.bucket_allocation_failures", "required": false, "type": { @@ -85025,7 +85088,7 @@ "mdcc", "modelDeadCategoryCount" ], - "description": "The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category.\nDead categories are a side effect of the way categorization has no prior training.", + "description": "The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category.\r\nDead categories are a side effect of the way categorization has no prior training.", "name": "model.dead_category_count", "required": false, "type": { @@ -85041,7 +85104,7 @@ "mfcc", "modelFailedCategoryCount" ], - "description": "The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`.\nThis count does not track which specific categories failed to be created.\nTherefore you cannot use this value to determine the number of unique categories that were missed.", + "description": "The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`.\r\nThis count does not track which specific categories failed to be created.\r\nTherefore you cannot use this value to determine the number of unique categories that were missed.", "name": "model.failed_category_count", "required": false, "type": { @@ -85089,7 +85152,7 @@ "ft", "forecastsTotal" ], - "description": "The number of individual forecasts currently available for the job.\nA value of one or more indicates that forecasts exist.", + "description": "The number of individual forecasts currently available for the job.\r\nA value of one or more indicates that forecasts exist.", "name": "forecasts.total", "required": false, "type": { @@ -85463,7 +85526,7 @@ "body": { "kind": "no_body" }, - "description": "Returns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Returns configuration and usage information for anomaly detection jobs.\r\nThis API returns a maximum of 10,000 jobs.\r\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\r\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\r\n\r\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\r\nconsole or command line. They are not intended for use by applications. For\r\napplication consumption, use the get anomaly detection job statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -85491,7 +85554,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n* Contains wildcard expressions and there are no jobs that match.\n* Contains the `_all` string or no identifiers and there are no matches.\n* Contains wildcard expressions and there are only partial matches.\n\nIf `true`, the API returns an empty jobs array when there are no matches and the subset of results when there\nare partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial\nmatches.", + "description": "Specifies what to do when the request:\r\n\r\n* Contains wildcard expressions and there are no jobs that match.\r\n* Contains the `_all` string or no identifiers and there are no matches.\r\n* Contains wildcard expressions and there are only partial matches.\r\n\r\nIf `true`, the API returns an empty jobs array when there are no matches and the subset of results when there\r\nare partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial\r\nmatches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -85584,7 +85647,7 @@ "body": { "kind": "no_body" }, - "description": "Returns configuration and usage information about inference trained models.\n\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Returns configuration and usage information about inference trained models.\r\n\r\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\r\nconsole or command line. They are not intended for use by applications. For\r\napplication consumption, use the get trained models statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -85612,7 +85675,7 @@ ], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\nIf `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the API returns a 404 status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\r\nIf `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches.\r\nIf `false`, the API returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -85764,7 +85827,7 @@ "o", "modelOperations" ], - "description": "The estimated number of operations to use the model.\nThis number helps to measure the computational complexity of the model.", + "description": "The estimated number of operations to use the model.\r\nThis number helps to measure the computational complexity of the model.", "name": "operations", "required": false, "type": { @@ -85920,7 +85983,7 @@ "dfid", "dataFrameAnalytics" ], - "description": "The identifier for the data frame analytics job that created the model.\nOnly displayed if the job is still available.", + "description": "The identifier for the data frame analytics job that created the model.\r\nOnly displayed if the job is still available.", "name": "data_frame.id", "required": false, "type": { @@ -86120,7 +86183,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Returns information about custom node attributes.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -86596,7 +86659,7 @@ "role", "nodeRole" ], - "description": "The roles of the node.\nReturned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only).", + "description": "The roles of the node.\r\nReturned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only).", "name": "node.role", "required": false, "type": { @@ -86611,7 +86674,7 @@ "aliases": [ "m" ], - "description": "Indicates whether the node is the elected master node.\nReturned values include `*`(elected master) and `-`(not elected master).", + "description": "Indicates whether the node is the elected master node.\r\nReturned values include `*`(elected master) and `-`(not elected master).", "name": "master", "required": false, "type": { @@ -87680,7 +87743,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Returns information about the nodes in a cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -87846,7 +87909,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster-level changes that have not yet been executed.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", + "description": "Returns cluster-level changes that have not yet been executed.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", "inherits": { "type": { "name": "CatRequestBase", @@ -87988,7 +88051,7 @@ "body": { "kind": "no_body" }, - "description": "Returns a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Returns a list of plugins running on each node of a cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -88454,7 +88517,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "description": "Returns information about ongoing and completed shard recoveries.\r\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\r\nFor data streams, the API returns information about the stream’s backing indices.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", "inherits": { "type": { "name": "CatRequestBase", @@ -88468,7 +88531,7 @@ }, "path": [ { - "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\r\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -88591,7 +88654,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", + "description": "Returns the snapshot repositories for a cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", "inherits": { "type": { "name": "CatRequestBase", @@ -88636,7 +88699,7 @@ "body": { "kind": "no_body" }, - "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "description": "Returns low-level information about the Lucene segments in index shards.\r\nFor data streams, the API returns information about the backing indices.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "inherits": { "type": { "name": "CatRequestBase", @@ -88650,7 +88713,7 @@ }, "path": [ { - "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\r\nSupports wildcards (`*`).\r\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -88799,7 +88862,7 @@ "g", "gen" ], - "description": "The segment generation number.\nElasticsearch increments this generation number for each segment written then uses this number to derive the segment name.", + "description": "The segment generation number.\r\nElasticsearch increments this generation number for each segment written then uses this number to derive the segment name.", "name": "generation", "required": false, "type": { @@ -88815,7 +88878,7 @@ "dc", "docsCount" ], - "description": "The number of documents in the segment.\nThis excludes deleted documents and counts any nested documents separately from their parents.\nIt also excludes documents which were indexed recently and do not yet belong to a segment.", + "description": "The number of documents in the segment.\r\nThis excludes deleted documents and counts any nested documents separately from their parents.\r\nIt also excludes documents which were indexed recently and do not yet belong to a segment.", "name": "docs.count", "required": false, "type": { @@ -88831,7 +88894,7 @@ "dd", "docsDeleted" ], - "description": "The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed.\nThis number excludes deletes that were performed recently and do not yet belong to a segment.\nDeleted documents are cleaned up by the automatic merge process if it makes sense to do so.\nAlso, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard.", + "description": "The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed.\r\nThis number excludes deletes that were performed recently and do not yet belong to a segment.\r\nDeleted documents are cleaned up by the automatic merge process if it makes sense to do so.\r\nAlso, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard.", "name": "docs.deleted", "required": false, "type": { @@ -88862,7 +88925,7 @@ "sm", "sizeMemory" ], - "description": "The segment memory in bytes.\nA value of `-1` indicates Elasticsearch was unable to compute this number.", + "description": "The segment memory in bytes.\r\nA value of `-1` indicates Elasticsearch was unable to compute this number.", "name": "size.memory", "required": false, "type": { @@ -88878,7 +88941,7 @@ "ic", "isCommitted" ], - "description": "If `true`, the segment is synced to disk.\nSegments that are synced can survive a hard reboot.\nIf `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start.", + "description": "If `true`, the segment is synced to disk.\r\nSegments that are synced can survive a hard reboot.\r\nIf `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start.", "name": "committed", "required": false, "type": { @@ -88894,7 +88957,7 @@ "is", "isSearchable" ], - "description": "If `true`, the segment is searchable.\nIf `false`, the segment has most likely been written to disk but needs a refresh to be searchable.", + "description": "If `true`, the segment is searchable.\r\nIf `false`, the segment has most likely been written to disk but needs a refresh to be searchable.", "name": "searchable", "required": false, "type": { @@ -88925,7 +88988,7 @@ "ico", "isCompound" ], - "description": "If `true`, the segment is stored in a compound file.\nThis means Lucene merged all files from the segment in a single file to save file descriptors.", + "description": "If `true`, the segment is stored in a compound file.\r\nThis means Lucene merged all files from the segment in a single file to save file descriptors.", "name": "compound", "required": false, "type": { @@ -88947,7 +89010,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Returns information about the shards in a cluster.\r\nFor data streams, the API returns information about the backing indices.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "inherits": { "type": { "name": "CatRequestBase", @@ -88961,7 +89024,7 @@ }, "path": [ { - "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\r\nSupports wildcards (`*`).\r\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -89070,7 +89133,7 @@ "aliases": [ "st" ], - "description": "The shard state.\nReturned values include:\n`INITIALIZING`: The shard is recovering from a peer shard or gateway.\n`RELOCATING`: The shard is relocating.\n`STARTED`: The shard has started.\n`UNASSIGNED`: The shard is not assigned to any node.", + "description": "The shard state.\r\nReturned values include:\r\n`INITIALIZING`: The shard is recovering from a peer shard or gateway.\r\n`RELOCATING`: The shard is relocating.\r\n`STARTED`: The shard has started.\r\n`UNASSIGNED`: The shard is not assigned to any node.", "name": "state", "required": false, "type": { @@ -89215,7 +89278,7 @@ "aliases": [ "ur" ], - "description": "The reason for the last change to the state of an unassigned shard.\nIt does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information.\nReturned values include:\n`ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard.\n`CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery.\n`DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index.\n`EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index.\n`FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API.\n`INDEX_CLOSED`: Unassigned because the index was closed.\n`INDEX_CREATED`: Unassigned as a result of an API creation of an index.\n`INDEX_REOPENED`: Unassigned as a result of opening a closed index.\n`MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API.\n`NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index.\n`NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster.\n`NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API.\n`PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed.\n`REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled.\n`REINITIALIZED`: When a shard moves from started back to initializing.\n`REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica.\n`REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command.", + "description": "The reason for the last change to the state of an unassigned shard.\r\nIt does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information.\r\nReturned values include:\r\n`ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard.\r\n`CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery.\r\n`DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index.\r\n`EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index.\r\n`FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API.\r\n`INDEX_CLOSED`: Unassigned because the index was closed.\r\n`INDEX_CREATED`: Unassigned as a result of an API creation of an index.\r\n`INDEX_REOPENED`: Unassigned as a result of opening a closed index.\r\n`MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API.\r\n`NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index.\r\n`NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster.\r\n`NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API.\r\n`PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed.\r\n`REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled.\r\n`REINITIALIZED`: When a shard moves from started back to initializing.\r\n`REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica.\r\n`REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command.", "name": "unassigned.reason", "required": false, "type": { @@ -89260,7 +89323,7 @@ "aliases": [ "ud" ], - "description": "Additional details as to why the shard became unassigned.\nIt does not explain why the shard is not assigned; use the cluster allocation explain API for that information.", + "description": "Additional details as to why the shard became unassigned.\r\nIt does not explain why the shard is not assigned; use the cluster allocation explain API for that information.", "name": "unassigned.details", "required": false, "type": { @@ -90265,7 +90328,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "description": "Returns information about the snapshots stored in one or more repositories.\r\nA snapshot is a backup of an index or running Elasticsearch cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90279,7 +90342,7 @@ }, "path": [ { - "description": "A comma-separated list of snapshot repositories used to limit the request.\nAccepts wildcard expressions.\n`_all` returns all repositories.\nIf any repository fails during the request, Elasticsearch returns an error.", + "description": "A comma-separated list of snapshot repositories used to limit the request.\r\nAccepts wildcard expressions.\r\n`_all` returns all repositories.\r\nIf any repository fails during the request, Elasticsearch returns an error.", "name": "repository", "required": false, "type": { @@ -90371,7 +90434,7 @@ "aliases": [ "s" ], - "description": "The state of the snapshot process.\nReturned values include:\n`FAILED`: The snapshot process failed.\n`INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version.\n`IN_PROGRESS`: The snapshot process started but has not completed.\n`PARTIAL`: The snapshot process completed with a partial success.\n`SUCCESS`: The snapshot process completed with a full success.", + "description": "The state of the snapshot process.\r\nReturned values include:\r\n`FAILED`: The snapshot process failed.\r\n`INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version.\r\n`IN_PROGRESS`: The snapshot process started but has not completed.\r\n`PARTIAL`: The snapshot process completed with a partial success.\r\n`SUCCESS`: The snapshot process completed with a full success.", "name": "status", "required": false, "type": { @@ -90583,7 +90646,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about tasks currently executing in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", + "description": "Returns information about tasks currently executing in the cluster.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90930,7 +90993,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "description": "Returns information about index templates in a cluster.\r\nYou can use index templates to apply index settings and field mappings to new indices at creation.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90944,7 +91007,7 @@ }, "path": [ { - "description": "The name of the template to return.\nAccepts wildcard expressions. If omitted, all templates are returned.", + "description": "The name of the template to return.\r\nAccepts wildcard expressions. If omitted, all templates are returned.", "name": "name", "required": false, "type": { @@ -91086,7 +91149,7 @@ "body": { "kind": "no_body" }, - "description": "Returns thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Returns thread pool statistics for each node in a cluster.\r\nReturned information includes all built-in thread pools and custom thread pools.\r\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -91100,7 +91163,7 @@ }, "path": [ { - "description": "A comma-separated list of thread pool names used to limit the request.\nAccepts wildcard expressions.", + "description": "A comma-separated list of thread pool names used to limit the request.\r\nAccepts wildcard expressions.", "name": "thread_pool_patterns", "required": false, "type": { @@ -91280,7 +91343,7 @@ "aliases": [ "t" ], - "description": "The thread pool type.\nReturned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`.", + "description": "The thread pool type.\r\nReturned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`.", "name": "type", "required": false, "type": { @@ -91515,7 +91578,7 @@ "body": { "kind": "no_body" }, - "description": "Returns configuration and usage information about transforms.\n\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Returns configuration and usage information about transforms.\r\n\r\nIMPORTANT: cat APIs are only intended for human consumption using the Kibana\r\nconsole or command line. They are not intended for use by applications. For\r\napplication consumption, use the get transform statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -91529,7 +91592,7 @@ }, "path": [ { - "description": "A transform identifier or a wildcard expression.\nIf you do not specify one of these options, the API returns information for all transforms.", + "description": "A transform identifier or a wildcard expression.\r\nIf you do not specify one of these options, the API returns information for all transforms.", "name": "transform_id", "required": false, "type": { @@ -91543,7 +91606,7 @@ ], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\nIf `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\r\nIf `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches.\r\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -91665,7 +91728,7 @@ "aliases": [ "s" ], - "description": "The status of the transform.\nReturned values include:\n`aborting`: The transform is aborting.\n`failed: The transform failed. For more information about the failure, check the `reason` field.\n`indexing`: The transform is actively processing data and creating new documents.\n`started`: The transform is running but not actively indexing data.\n`stopped`: The transform is stopped.\n`stopping`: The transform is stopping.", + "description": "The status of the transform.\r\nReturned values include:\r\n`aborting`: The transform is aborting.\r\n`failed: The transform failed. For more information about the failure, check the `reason` field.\r\n`indexing`: The transform is actively processing data and creating new documents.\r\n`started`: The transform is running but not actively indexing data.\r\n`stopped`: The transform is stopped.\r\n`stopping`: The transform is stopping.", "name": "state", "required": false, "type": { @@ -91740,7 +91803,7 @@ "lst", "lastSearchTime" ], - "description": "The timestamp of the last search in the source indices.\nThis field is shown only if the transform is running.", + "description": "The timestamp of the last search in the source indices.\r\nThis field is shown only if the transform is running.", "name": "last_search_time", "required": false, "type": { @@ -92097,7 +92160,7 @@ "aliases": [ "tc" ], - "description": "The number of times the transform has been triggered by the scheduler.\nFor example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property.", + "description": "The number of times the transform has been triggered by the scheduler.\r\nFor example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property.", "name": "trigger_count", "required": false, "type": { @@ -92112,7 +92175,7 @@ "aliases": [ "pp" ], - "description": "The number of search or bulk index operations processed.\nDocuments are processed in batches instead of individually.", + "description": "The number of search or bulk index operations processed.\r\nDocuments are processed in batches instead of individually.", "name": "pages_processed", "required": false, "type": { @@ -93694,7 +93757,7 @@ { "description": "The remote cluster containing the leader indices to match against.", "docId": "modules-remote-clusters", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-remote-clusters.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-remote-clusters.html\r", "name": "remote_cluster", "required": true, "type": { @@ -94481,7 +94544,7 @@ }, { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -94504,7 +94567,7 @@ "properties": [ { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -95837,7 +95900,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Deletes component templates.\r\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", "inherits": { "type": { "name": "RequestBase", @@ -95865,7 +95928,7 @@ ], "query": [ { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -95878,7 +95941,7 @@ } }, { - "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a response.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -95933,7 +95996,7 @@ "path": [], "query": [ { - "description": "Specifies whether to wait for all excluded nodes to be removed from the\ncluster before clearing the voting configuration exclusions list.\nDefaults to true, meaning that all excluded nodes must be removed from\nthe cluster before this API takes any action. If set to false then the\nvoting configuration exclusions list is cleared even if some excluded\nnodes are still in the cluster.", + "description": "Specifies whether to wait for all excluded nodes to be removed from the\r\ncluster before clearing the voting configuration exclusions list.\r\nDefaults to true, meaning that all excluded nodes must be removed from\r\nthe cluster before this API takes any action. If set to false then the\r\nvoting configuration exclusions list is cleared even if some excluded\r\nnodes are still in the cluster.", "name": "wait_for_removal", "required": false, "serverDefault": true, @@ -95980,7 +96043,7 @@ }, "path": [ { - "description": "Comma-separated list of component template names used to limit the request.\nWildcard (*) expressions are supported.", + "description": "Comma-separated list of component template names used to limit the request.\r\nWildcard (*) expressions are supported.", "name": "name", "required": true, "type": { @@ -95994,7 +96057,7 @@ ], "query": [ { - "description": "Period to wait for a connection to the master node. If no response is\nreceived before the timeout expires, the request fails and returns an\nerror.", + "description": "Period to wait for a connection to the master node. If no response is\r\nreceived before the timeout expires, the request fails and returns an\r\nerror.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -96007,7 +96070,7 @@ } }, { - "description": "If true, the request retrieves information from the local node only.\nDefaults to false, which means information is retrieved from the master node.", + "description": "If true, the request retrieves information from the local node only.\r\nDefaults to false, which means information is retrieved from the master node.", "name": "local", "required": false, "serverDefault": false, @@ -96054,7 +96117,7 @@ }, "path": [ { - "description": "Comma-separated list of component template names used to limit the request.\nWildcard (`*`) expressions are supported.", + "description": "Comma-separated list of component template names used to limit the request.\r\nWildcard (`*`) expressions are supported.", "name": "name", "required": false, "type": { @@ -96105,7 +96168,7 @@ } }, { - "description": "If `true`, the request retrieves information from the local node only.\nIf `false`, information is retrieved from the master node.", + "description": "If `true`, the request retrieves information from the local node only.\r\nIf `false`, information is retrieved from the master node.", "name": "local", "required": false, "serverDefault": false, @@ -96118,7 +96181,7 @@ } }, { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -96167,7 +96230,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster-wide settings.\nBy default, it returns only settings that have been explicitly defined.", + "description": "Returns cluster-wide settings.\r\nBy default, it returns only settings that have been explicitly defined.", "inherits": { "type": { "name": "RequestBase", @@ -96208,7 +96271,7 @@ } }, { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -96221,7 +96284,7 @@ } }, { - "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a response.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -96662,7 +96725,7 @@ "body": { "kind": "no_body" }, - "description": "The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.\nThe cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status.", + "description": "The cluster health API returns a simple status on the health of the cluster. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.\r\nThe cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster, yellow means that the primary shard is allocated but replicas are not, and green means that all shards are allocated. The index level status is controlled by the worst shard status. The cluster status is controlled by the worst index status.", "inherits": { "type": { "name": "RequestBase", @@ -97101,7 +97164,7 @@ } }, { - "description": "The priority of the pending task.\nThe valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`.", + "description": "The priority of the pending task.\r\nThe valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`.", "name": "priority", "required": true, "type": { @@ -97167,7 +97230,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed.\nNOTE: This API returns a list of any pending updates to the cluster state.\nThese are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests.\nHowever, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.", + "description": "Returns cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet been executed.\r\nNOTE: This API returns a list of any pending updates to the cluster state.\r\nThese are distinct from the tasks reported by the Task Management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests.\r\nHowever, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.", "inherits": { "type": { "name": "RequestBase", @@ -97182,7 +97245,7 @@ "path": [], "query": [ { - "description": "If `true`, the request retrieves information from the local node only.\nIf `false`, information is retrieved from the master node.", + "description": "If `true`, the request retrieves information from the local node only.\r\nIf `false`, information is retrieved from the master node.", "name": "local", "required": false, "serverDefault": false, @@ -97195,7 +97258,7 @@ } }, { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -97259,7 +97322,7 @@ "path": [], "query": [ { - "description": "A comma-separated list of the names of the nodes to exclude from the\nvoting configuration. If specified, you may not also specify node_ids.", + "description": "A comma-separated list of the names of the nodes to exclude from the\r\nvoting configuration. If specified, you may not also specify node_ids.", "name": "node_names", "required": false, "type": { @@ -97271,7 +97334,7 @@ } }, { - "description": "A comma-separated list of the persistent ids of the nodes to exclude\nfrom the voting configuration. If specified, you may not also specify node_names.", + "description": "A comma-separated list of the persistent ids of the nodes to exclude\r\nfrom the voting configuration. If specified, you may not also specify node_names.", "name": "node_ids", "required": false, "type": { @@ -97283,7 +97346,7 @@ } }, { - "description": "When adding a voting configuration exclusion, the API waits for the\nspecified nodes to be excluded from the voting configuration before\nreturning. If the timeout expires before the appropriate condition\nis satisfied, the request fails and returns an error.", + "description": "When adding a voting configuration exclusion, the API waits for the\r\nspecified nodes to be excluded from the voting configuration before\r\nreturning. If the timeout expires before the appropriate condition\r\nis satisfied, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -97317,7 +97380,7 @@ "kind": "properties", "properties": [ { - "description": "This setting overrides the value of the `action.auto_create_index` cluster setting.\nIf set to `true` in a template, then indices can be automatically created using that\ntemplate even if auto-creation of indices is disabled via `actions.auto_create_index`.\nIf set to `false` then data streams matching the template must always be explicitly created.", + "description": "This setting overrides the value of the `action.auto_create_index` cluster setting.\r\nIf set to `true` in a template, then indices can be automatically created using that\r\ntemplate even if auto-creation of indices is disabled via `actions.auto_create_index`.\r\nIf set to `false` then data streams matching the template must always be explicitly created.", "name": "allow_auto_create", "required": false, "type": { @@ -97341,7 +97404,7 @@ } }, { - "description": "Version number used to manage component templates externally.\nThis number isn't automatically generated or incremented by Elasticsearch.\nTo unset a version, replace the template without specifying a version.", + "description": "Version number used to manage component templates externally.\r\nThis number isn't automatically generated or incremented by Elasticsearch.\r\nTo unset a version, replace the template without specifying a version.", "name": "version", "required": false, "type": { @@ -97353,7 +97416,7 @@ } }, { - "description": "Optional user metadata about the component template.\nMay have any contents. This map is not automatically generated by Elasticsearch.\nThis information is stored in the cluster state, so keeping it short is preferable.\nTo unset `_meta`, replace the template without specifying this information.", + "description": "Optional user metadata about the component template.\r\nMay have any contents. This map is not automatically generated by Elasticsearch.\r\nThis information is stored in the cluster state, so keeping it short is preferable.\r\nTo unset `_meta`, replace the template without specifying this information.", "name": "_meta", "required": false, "type": { @@ -97366,7 +97429,7 @@ } ] }, - "description": "Creates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Creates or updates a component template.\r\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\r\n\r\nAn index template can be composed of multiple component templates.\r\nTo use a component template, specify it in an index template’s `composed_of` list.\r\nComponent templates are only applied to new data streams and indices as part of a matching index template.\r\n\r\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\r\n\r\nComponent templates are only used during index creation.\r\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\r\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\r\n\r\nYou can use C-style `/* *\\/` block comments in component templates.\r\nYou can include comments anywhere in the request body except before the opening curly bracket.", "inherits": { "type": { "name": "RequestBase", @@ -97380,7 +97443,7 @@ }, "path": [ { - "description": "Name of the component template to create.\nElasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.\nElastic Agent uses these templates to configure backing indices for its data streams.\nIf you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version.\nIf you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API.", + "description": "Name of the component template to create.\r\nElasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.\r\nElastic Agent uses these templates to configure backing indices for its data streams.\r\nIf you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version.\r\nIf you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API.", "name": "name", "required": true, "type": { @@ -97407,7 +97470,7 @@ } }, { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node.\r\nIf no response is received before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -97820,7 +97883,7 @@ "body": { "kind": "no_body" }, - "description": "The cluster remote info API allows you to retrieve all of the configured\nremote cluster information. It returns connection and endpoint information\nkeyed by the configured remote cluster alias.", + "description": "The cluster remote info API allows you to retrieve all of the configured\r\nremote cluster information. It returns connection and endpoint information\r\nkeyed by the configured remote cluster alias.", "inherits": { "type": { "name": "RequestBase", @@ -97992,7 +98055,7 @@ }, { "docId": "modules-cluster", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cluster.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cluster.html\r", "kind": "interface", "name": { "name": "CommandAllocateReplicaAction", @@ -98459,7 +98522,7 @@ } }, { - "description": "There aren't any guarantees on the output/structure of the raw cluster state.\nHere you will find the internal representation of the cluster, which can\ndiffer from the external representation.", + "description": "There aren't any guarantees on the output/structure of the raw cluster state.\r\nHere you will find the internal representation of the cluster, which can\r\ndiffer from the external representation.", "name": "state", "required": false, "type": { @@ -98777,7 +98840,7 @@ }, "properties": [ { - "description": "Total number of bytes available to JVM in file stores across all selected nodes.\nDepending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`.\nThis is the actual amount of free disk space the selected Elasticsearch nodes can use.", + "description": "Total number of bytes available to JVM in file stores across all selected nodes.\r\nDepending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`.\r\nThis is the actual amount of free disk space the selected Elasticsearch nodes can use.", "name": "available_in_bytes", "required": true, "type": { @@ -98873,7 +98936,7 @@ { "description": "Contains statistics about the field data cache of selected nodes.", "docId": "modules-fielddata", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-fielddata.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-fielddata.html\r", "name": "fielddata", "required": true, "type": { @@ -98947,7 +99010,7 @@ { "description": "Contains statistics about analyzers and analyzer components used in selected nodes.", "docId": "analyzer-anatomy", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/analyzer-anatomy.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/analyzer-anatomy.html\r", "name": "versions", "required": false, "type": { @@ -99296,7 +99359,7 @@ } }, { - "description": "Full version number of JVM.\nThe full version number includes a plus sign (+) followed by the build number.", + "description": "Full version number of JVM.\r\nThe full version number includes a plus sign (+) followed by the build number.", "name": "vm_version", "required": true, "type": { @@ -99559,7 +99622,7 @@ { "description": "Contains statistics about the discovery types used by selected nodes.", "docId": "modules-discovery-hosts-providers", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-discovery-hosts-providers.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-discovery-hosts-providers.html\r", "name": "discovery_types", "required": true, "type": { @@ -99674,7 +99737,7 @@ } }, { - "description": "Contains statistics about installed plugins and modules by selected nodes.\nIf no plugins or modules are installed, this array is empty.", + "description": "Contains statistics about installed plugins and modules by selected nodes.\r\nIf no plugins or modules are installed, this array is empty.", "name": "plugins", "required": true, "type": { @@ -99726,7 +99789,7 @@ }, "properties": [ { - "description": "Number of processors used to calculate thread pool size across all selected nodes.\nThis number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system.\nIn both cases, this number will never be larger than 32.", + "description": "Number of processors used to calculate thread pool size across all selected nodes.\r\nThis number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system.\r\nIn both cases, this number will never be larger than 32.", "name": "allocated_processors", "required": true, "type": { @@ -99953,7 +100016,7 @@ }, "properties": [ { - "description": "Percentage of CPU used across all selected nodes.\nReturns `-1` if not supported.", + "description": "Percentage of CPU used across all selected nodes.\r\nReturns `-1` if not supported.", "name": "percent", "required": true, "type": { @@ -99975,7 +100038,7 @@ }, "properties": [ { - "description": "Average number of concurrently open file descriptors.\nReturns `-1` if not supported.", + "description": "Average number of concurrently open file descriptors.\r\nReturns `-1` if not supported.", "name": "avg", "required": true, "type": { @@ -99987,7 +100050,7 @@ } }, { - "description": "Maximum number of concurrently open file descriptors allowed across all selected nodes.\nReturns `-1` if not supported.", + "description": "Maximum number of concurrently open file descriptors allowed across all selected nodes.\r\nReturns `-1` if not supported.", "name": "max", "required": true, "type": { @@ -99999,7 +100062,7 @@ } }, { - "description": "Minimum number of concurrently open file descriptors across all selected nodes.\nReturns -1 if not supported.", + "description": "Minimum number of concurrently open file descriptors across all selected nodes.\r\nReturns -1 if not supported.", "name": "min", "required": true, "type": { @@ -100680,7 +100743,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster statistics.\nIt returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).", + "description": "Returns cluster statistics.\r\nIt returns basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).", "inherits": { "type": { "name": "RequestBase", @@ -100721,7 +100784,7 @@ } }, { - "description": "Period to wait for each node to respond.\nIf a node does not respond before its timeout expires, the response does not include its stats.\nHowever, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout.", + "description": "Period to wait for each node to respond.\r\nIf a node does not respond before its timeout expires, the response does not include its stats.\r\nHowever, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout.", "name": "timeout", "required": false, "type": { @@ -100986,7 +101049,7 @@ { "description": "Contains statistics about nodes selected by the request’s node filters.", "docId": "cluster-nodes", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster.html#cluster-nodes", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster.html#cluster-nodes\r", "name": "nodes", "required": true, "type": { @@ -102078,7 +102141,7 @@ { "description": "Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence.", "docId": "eql-sequences", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-sequences", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-sequences\r", "name": "sequences", "required": false, "type": { @@ -102323,7 +102386,7 @@ { "description": "Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax.", "docId": "eql-sequences", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-sequences", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-sequences\r", "name": "join_keys", "required": true, "type": { @@ -102633,7 +102696,7 @@ { "description": "EQL query you wish to run.", "docId": "eql-syntax", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html\r", "name": "query", "required": true, "type": { @@ -102671,7 +102734,7 @@ { "description": "Field used to sort hits with the same timestamp in ascending order", "docId": "sort-tiebreaker", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql.html#eql-search-specify-a-sort-tiebreaker", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql.html#eql-search-specify-a-sort-tiebreaker\r", "name": "tiebreaker_field", "required": false, "type": { @@ -102770,7 +102833,7 @@ { "description": "For basic queries, the maximum number of matching events to return. Defaults to 10", "docId": "eql-basic-syntax", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-basic-syntax", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-syntax.html#eql-basic-syntax\r", "name": "size", "required": false, "type": { @@ -103190,7 +103253,7 @@ ], "query": [ { - "description": "A boolean value which controls whether to wait (until the timeout) for the global checkpoints\nto advance past the provided `checkpoints`.", + "description": "A boolean value which controls whether to wait (until the timeout) for the global checkpoints\r\nto advance past the provided `checkpoints`.", "name": "wait_for_advance", "required": false, "serverDefault": false, @@ -103203,7 +103266,7 @@ } }, { - "description": "A boolean value which controls whether to wait (until the timeout) for the target index to exist\nand all primary shards be active. Can only be true when `wait_for_advance` is true.", + "description": "A boolean value which controls whether to wait (until the timeout) for the target index to exist\r\nand all primary shards be active. Can only be true when `wait_for_advance` is true.", "name": "wait_for_index", "required": false, "serverDefault": false, @@ -103216,7 +103279,7 @@ } }, { - "description": "A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`,\nthe API will only return once the global checkpoints advances past the checkpoints. Providing an empty list\nwill cause Elasticsearch to immediately return the current global checkpoints.", + "description": "A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`,\r\nthe API will only return once the global checkpoints advances past the checkpoints. Providing an empty list\r\nwill cause Elasticsearch to immediately return the current global checkpoints.", "name": "checkpoints", "required": false, "serverDefault": [], @@ -103303,7 +103366,7 @@ } } }, - "description": "Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request.\nThe API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it\nsupports the wait_for_checkpoints parameter.", + "description": "Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request.\r\nThe API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it\r\nsupports the wait_for_checkpoints parameter.", "inherits": { "type": { "name": "RequestBase", @@ -103357,7 +103420,7 @@ { "description": "If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests.", "docId": "ccs-network-delays", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/modules-cross-cluster-search.html#ccs-network-delays\r", "name": "ccs_minimize_roundtrips", "required": false, "serverDefault": true, @@ -103482,7 +103545,7 @@ } }, { - "description": "A comma separated list of checkpoints. When configured, the search API will only be executed on a shard\nafter the relevant checkpoint has become visible for search. Defaults to an empty list which will cause\nElasticsearch to immediately execute the search.", + "description": "A comma separated list of checkpoints. When configured, the search API will only be executed on a shard\r\nafter the relevant checkpoint has become visible for search. Defaults to an empty list which will cause\r\nElasticsearch to immediately execute the search.", "name": "wait_for_checkpoints", "required": false, "serverDefault": [], @@ -103498,7 +103561,7 @@ } }, { - "description": "If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns\nan error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`\nwhich is true by default.", + "description": "If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns\r\nan error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`\r\nwhich is true by default.", "name": "allow_partial_search_results", "required": false, "type": { @@ -103630,7 +103693,7 @@ } }, { - "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "description": "Starting document offset. By default, you cannot page through more than 10,000\r\nhits using the from and size parameters. To page through more hits, use the\r\nsearch_after parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -103654,7 +103717,7 @@ } }, { - "description": "Number of hits matching the query to count accurately. If true, the exact\nnumber of hits is returned at the cost of some performance. If false, the\nresponse does not include the total number of hits matching the query.\nDefaults to 10,000 hits.", + "description": "Number of hits matching the query to count accurately. If true, the exact\r\nnumber of hits is returned at the cost of some performance. If false, the\r\nresponse does not include the total number of hits matching the query.\r\nDefaults to 10,000 hits.", "name": "track_total_hits", "required": false, "type": { @@ -103692,7 +103755,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns doc values for field\nnames matching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns doc values for field\r\nnames matching these patterns in the hits.fields property of the response.", "name": "docvalue_fields", "required": false, "type": { @@ -103707,7 +103770,7 @@ } }, { - "description": "Minimum _score for matching documents. Documents with a lower _score are\nnot included in the search results.", + "description": "Minimum _score for matching documents. Documents with a lower _score are\r\nnot included in the search results.", "name": "min_score", "required": false, "type": { @@ -103813,7 +103876,7 @@ } }, { - "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "description": "The number of hits to return. By default, you cannot page through more\r\nthan 10,000 hits using the from and size parameters. To page through more\r\nhits, use the search_after parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -103838,7 +103901,7 @@ }, { "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -103850,7 +103913,7 @@ } }, { - "description": "Indicates which source fields are returned for matching documents. These\nfields are returned in the hits._source property of the search response.", + "description": "Indicates which source fields are returned for matching documents. These\r\nfields are returned in the hits._source property of the search response.", "name": "_source", "required": false, "type": { @@ -103862,7 +103925,7 @@ } }, { - "description": "Array of wildcard (*) patterns. The request returns values for field names\nmatching these patterns in the hits.fields property of the response.", + "description": "Array of wildcard (*) patterns. The request returns values for field names\r\nmatching these patterns in the hits.fields property of the response.", "name": "fields", "required": false, "type": { @@ -103888,7 +103951,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "description": "Maximum number of documents to collect for each shard. If a query reaches this\r\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\r\nbefore sorting. Defaults to 0, which does not terminate query execution early.", "name": "terminate_after", "required": false, "serverDefault": 0, @@ -103901,7 +103964,7 @@ } }, { - "description": "Specifies the period of time to wait for a response from each shard. If no response\nis received before the timeout expires, the request fails and returns an error.\nDefaults to no timeout.", + "description": "Specifies the period of time to wait for a response from each shard. If no response\r\nis received before the timeout expires, the request fails and returns an error.\r\nDefaults to no timeout.", "name": "timeout", "required": false, "type": { @@ -103939,7 +104002,7 @@ } }, { - "description": "If true, returns sequence number and primary term of the last modification\nof each hit. See Optimistic concurrency control.", + "description": "If true, returns sequence number and primary term of the last modification\r\nof each hit. See Optimistic concurrency control.", "name": "seq_no_primary_term", "required": false, "type": { @@ -103951,7 +104014,7 @@ } }, { - "description": "List of stored fields to return as part of a hit. If no fields are specified,\nno stored fields are included in the response. If this field is specified, the _source\nparameter defaults to false. You can pass _source: true to return both source fields\nand stored fields in the search response.", + "description": "List of stored fields to return as part of a hit. If no fields are specified,\r\nno stored fields are included in the response. If this field is specified, the _source\r\nparameter defaults to false. You can pass _source: true to return both source fields\r\nand stored fields in the search response.", "name": "stored_fields", "required": false, "type": { @@ -103963,7 +104026,7 @@ } }, { - "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\ncannot specify an in the request path.", + "description": "Limits the search to a point in time (PIT). If you provide a PIT, you\r\ncannot specify an in the request path.", "name": "pit", "required": false, "type": { @@ -103975,7 +104038,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request. These fields take\nprecedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request. These fields take\r\nprecedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -103987,7 +104050,7 @@ } }, { - "description": "Stats groups to associate with the search. Each group maintains a statistics\naggregation for its associated searches. You can retrieve these stats using\nthe indices stats API.", + "description": "Stats groups to associate with the search. Each group maintains a statistics\r\naggregation for its associated searches. You can retrieve these stats using\r\nthe indices stats API.", "name": "stats", "required": false, "type": { @@ -104003,7 +104066,7 @@ } ] }, - "description": "The purpose of the fleet search api is to provide a search api where the search will only be executed\nafter provided checkpoint has been processed and is visible for searches inside of Elasticsearch.", + "description": "The purpose of the fleet search api is to provide a search api where the search will only be executed\r\nafter provided checkpoint has been processed and is visible for searches inside of Elasticsearch.", "inherits": { "type": { "name": "RequestBase", @@ -104525,7 +104588,7 @@ } }, { - "description": "A comma separated list of checkpoints. When configured, the search API will only be executed on a shard\nafter the relevant checkpoint has become visible for search. Defaults to an empty list which will cause\nElasticsearch to immediately execute the search.", + "description": "A comma separated list of checkpoints. When configured, the search API will only be executed on a shard\r\nafter the relevant checkpoint has become visible for search. Defaults to an empty list which will cause\r\nElasticsearch to immediately execute the search.", "name": "wait_for_checkpoints", "required": false, "serverDefault": [], @@ -104541,7 +104604,7 @@ } }, { - "description": "If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns\nan error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`\nwhich is true by default.", + "description": "If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns\r\nan error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`\r\nwhich is true by default.", "name": "allow_partial_search_results", "required": false, "type": { @@ -106102,7 +106165,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`).\nTo target all data streams and indices, use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`).\r\nTo target all data streams and indices, use `*` or `_all`.", "name": "index", "required": true, "type": { @@ -106417,7 +106480,7 @@ } ] }, - "description": "Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and\nattribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+\nUsing node roles enables ILM to automatically move the indices between data tiers.", + "description": "Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and\r\nattribute-based allocation filters to using data tiers, and optionally deletes one legacy index template.+\r\nUsing node roles enables ILM to automatically move the indices between data tiers.", "inherits": { "type": { "name": "RequestBase", @@ -106432,7 +106495,7 @@ "path": [], "query": [ { - "description": "If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration.\nThis provides a way to retrieve the indices and ILM policies that need to be migrated.", + "description": "If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration.\r\nThis provides a way to retrieve the indices and ILM policies that need to be migrated.", "name": "dry_run", "required": false, "serverDefault": false, @@ -107224,7 +107287,7 @@ "specLocation": "indices/_types/DataLifecycle.ts#L24-L29" }, { - "description": "Data lifecycle with rollover can be used to display the configuration including the default rollover conditions,\nif asked.", + "description": "Data lifecycle with rollover can be used to display the configuration including the default rollover conditions,\r\nif asked.", "kind": "interface", "name": { "name": "DataLifecycleWithRollover", @@ -107232,7 +107295,7 @@ }, "properties": [ { - "description": "If defined, every document added to this data stream will be stored at least for this time frame.\nAny time after this duration the document could be deleted.\nWhen empty, every document in this data stream will be stored indefinitely.", + "description": "If defined, every document added to this data stream will be stored at least for this time frame.\r\nAny time after this duration the document could be deleted.\r\nWhen empty, every document in this data stream will be stored indefinitely.", "name": "data_retention", "required": false, "type": { @@ -107244,7 +107307,7 @@ } }, { - "description": "The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`.\nThis property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true.\nThe contents of this field are subject to change.", + "description": "The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`.\r\nThis property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true.\r\nThe contents of this field are subject to change.", "name": "rollover", "required": false, "type": { @@ -107266,9 +107329,9 @@ }, "properties": [ { - "description": "Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template.\nIf empty, the response omits this property.", + "description": "Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template.\r\nIf empty, the response omits this property.", "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -107316,7 +107379,7 @@ } }, { - "description": "Name of the current ILM lifecycle policy in the stream’s matching index template.\nThis lifecycle policy is set in the `index.lifecycle.name` setting.\nIf the template does not include a lifecycle policy, this property is not included in the response.\nNOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API.", + "description": "Name of the current ILM lifecycle policy in the stream’s matching index template.\r\nThis lifecycle policy is set in the `index.lifecycle.name` setting.\r\nIf the template does not include a lifecycle policy, this property is not included in the response.\r\nNOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API.", "name": "ilm_policy", "required": false, "type": { @@ -107328,7 +107391,7 @@ } }, { - "description": "Array of objects containing information about the data stream’s backing indices.\nThe last item in this array contains information about the stream’s current write index.", + "description": "Array of objects containing information about the data stream’s backing indices.\r\nThe last item in this array contains information about the stream’s current write index.", "name": "indices", "required": true, "type": { @@ -107390,7 +107453,7 @@ } }, { - "description": "Health status of the data stream.\nThis health status is based on the state of the primary and replica shards of the stream’s backing indices.", + "description": "Health status of the data stream.\r\nThis health status is based on the state of the primary and replica shards of the stream’s backing indices.", "name": "status", "required": true, "type": { @@ -107421,7 +107484,7 @@ } }, { - "description": "Name of the index template used to create the data stream’s backing indices.\nThe template’s index pattern must match the name of this data stream.", + "description": "Name of the index template used to create the data stream’s backing indices.\r\nThe template’s index pattern must match the name of this data stream.", "name": "template", "required": true, "type": { @@ -108199,7 +108262,7 @@ } ], "docId": "index-modules-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules.html#index-modules-settings", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules.html#index-modules-settings\r", "kind": "interface", "name": { "name": "IndexSettings", @@ -109119,12 +109182,21 @@ "description": "Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action.\nYou can explicitly set it to skip rollover.", "name": "indexing_complete", "required": false, - "serverDefault": false, + "serverDefault": "false", "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "Stringified", + "namespace": "_spec_utils" } } }, @@ -109401,7 +109473,7 @@ }, { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -109627,7 +109699,7 @@ { "description": "Mapping Limit Settings", "docId": "mapping-settings-limit", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-settings-limit.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-settings-limit.html\r", "kind": "interface", "name": { "name": "MappingLimitSettings", @@ -110706,7 +110778,7 @@ { "description": "The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`.\nLog and the thresholds are configured in the same way as the search slowlog.", "docId": "index-modules-slowlog-slowlog", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-slowlog.html#index-slow-log", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-slowlog.html#index-slow-log\r", "name": "index", "required": false, "type": { @@ -112420,7 +112492,7 @@ } }, { - "description": "Mapping for fields in the index. If specified, this mapping can include:\n- Field names\n- Field data types\n- Mapping parameters", + "description": "Mapping for fields in the index. If specified, this mapping can include:\r\n- Field names\r\n- Field data types\r\n- Mapping parameters", "name": "mappings", "required": false, "type": { @@ -112563,7 +112635,7 @@ "body": { "kind": "no_body" }, - "description": "Creates a data stream.\nYou must have a matching index template with data stream enabled.", + "description": "Creates a data stream.\r\nYou must have a matching index template with data stream enabled.", "inherits": { "type": { "name": "RequestBase", @@ -112577,7 +112649,7 @@ }, "path": [ { - "description": "Name of the data stream, which must meet the following criteria:\nLowercase only;\nCannot include `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character;\nCannot start with `-`, `_`, `+`, or `.ds-`;\nCannot be `.` or `..`;\nCannot be longer than 255 bytes. Multi-byte characters count towards this limit faster.", + "description": "Name of the data stream, which must meet the following criteria:\r\nLowercase only;\r\nCannot include `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character;\r\nCannot start with `-`, `_`, `+`, or `.ds-`;\r\nCannot be `.` or `..`;\r\nCannot be longer than 255 bytes. Multi-byte characters count towards this limit faster.", "name": "name", "required": true, "type": { @@ -113186,7 +113258,7 @@ "body": { "kind": "no_body" }, - "description": "The provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.", + "description": "The provided may contain multiple template names separated by a comma. If multiple template\r\nnames are specified then there is no wildcard support and the provided names should match completely with\r\nexisting templates.", "inherits": { "type": { "name": "RequestBase", @@ -114389,7 +114461,7 @@ ], "query": [ { - "description": "If false, the request returns an error if any wildcard expression, index alias, or _all value targets\nonly missing or closed indices. This behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index\nstarts with `bar`.", + "description": "If false, the request returns an error if any wildcard expression, index alias, or _all value targets\r\nonly missing or closed indices. This behavior applies even if the request targets other open indices.\r\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index\r\nstarts with `bar`.", "name": "allow_no_indices", "required": false, "type": { @@ -114401,7 +114473,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as `open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\r\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\r\nsuch as `open,hidden`.", "name": "expand_wildcards", "required": false, "type": { @@ -114438,7 +114510,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is received before the timeout expires,\nthe request fails and returns an error.", + "description": "Period to wait for a connection to the master node. If no response is received before the timeout expires,\r\nthe request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -114451,7 +114523,7 @@ } }, { - "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails\nand returns an error.", + "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails\r\nand returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -114464,7 +114536,7 @@ } }, { - "description": "The number of shard copies that must be active before proceeding with the operation. Set to all or any\npositive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation. Set to all or any\r\npositive integer up to the total number of shards in the index (`number_of_replicas+1`).", "name": "wait_for_active_shards", "required": false, "serverDefault": "1", @@ -114895,7 +114967,7 @@ }, "properties": [ { - "description": "task contains a task id returned when wait_for_completion=false,\nyou can use the task_id to get the status of the task at _tasks/", + "description": "task contains a task id returned when wait_for_completion=false,\r\nyou can use the task_id to get the status of the task at _tasks/", "name": "task", "required": false, "type": { @@ -114965,7 +115037,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Returns information about one or more indices. For data streams, the API returns information about the\r\nstream’s backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -114979,7 +115051,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and index aliases used to limit the request.\nWildcard expressions (*) are supported.", + "description": "Comma-separated list of data streams, indices, and index aliases used to limit the request.\r\nWildcard expressions (*) are supported.", "name": "index", "required": true, "type": { @@ -114993,7 +115065,7 @@ ], "query": [ { - "description": "If false, the request returns an error if any wildcard expression, index alias, or _all value targets only\nmissing or closed indices. This behavior applies even if the request targets other open indices. For example,\na request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar.", + "description": "If false, the request returns an error if any wildcard expression, index alias, or _all value targets only\r\nmissing or closed indices. This behavior applies even if the request targets other open indices. For example,\r\na request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -115006,7 +115078,7 @@ } }, { - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as open,hidden.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\r\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\r\nsuch as open,hidden.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -115463,7 +115535,7 @@ }, "path": [ { - "description": "Comma-separated list of data stream names used to limit the request.\nWildcard (`*`) expressions are supported. If omitted, all data streams are returned.", + "description": "Comma-separated list of data stream names used to limit the request.\r\nWildcard (`*`) expressions are supported. If omitted, all data streams are returned.", "name": "name", "required": false, "type": { @@ -115477,7 +115549,7 @@ ], "query": [ { - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match.\r\nSupports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -116037,7 +116109,7 @@ "body": { "kind": "no_body" }, - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Returns setting information for one or more indices. For data streams,\r\nreturns setting information for the stream’s backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -116051,7 +116123,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit\nthe request. Supports wildcards (`*`). To target all data streams and\nindices, omit this parameter or use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases used to limit\r\nthe request. Supports wildcards (`*`). To target all data streams and\r\nindices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -116077,7 +116149,7 @@ ], "query": [ { - "description": "If `false`, the request returns an error if any wildcard expression, index\nalias, or `_all` value targets only missing or closed indices. This\nbehavior applies even if the request targets other open indices. For\nexample, a request targeting `foo*,bar*` returns an error if an index\nstarts with foo but no index starts with `bar`.", + "description": "If `false`, the request returns an error if any wildcard expression, index\r\nalias, or `_all` value targets only missing or closed indices. This\r\nbehavior applies even if the request targets other open indices. For\r\nexample, a request targeting `foo*,bar*` returns an error if an index\r\nstarts with foo but no index starts with `bar`.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -116090,7 +116162,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\r\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\r\nSupports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -116142,7 +116214,7 @@ } }, { - "description": "If `true`, the request retrieves information from the local node only. If\n`false`, information is retrieved from the master node.", + "description": "If `true`, the request retrieves information from the local node only. If\r\n`false`, information is retrieved from the master node.", "name": "local", "required": false, "serverDefault": false, @@ -116155,7 +116227,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is\nreceived before the timeout expires, the request fails and returns an\nerror.", + "description": "Period to wait for a connection to the master node. If no response is\r\nreceived before the timeout expires, the request fails and returns an\r\nerror.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -117080,7 +117152,7 @@ }, { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -117185,7 +117257,7 @@ } }, { - "description": "If date detection is enabled then new string fields are checked\nagainst 'dynamic_date_formats' and if the value matches then\na new date field is added instead of string.", + "description": "If date detection is enabled then new string fields are checked\r\nagainst 'dynamic_date_formats' and if the value matches then\r\na new date field is added instead of string.", "name": "dynamic_date_formats", "required": false, "type": { @@ -117261,7 +117333,7 @@ } }, { - "description": "A mapping type can have custom meta data associated with it. These are\nnot used at all by Elasticsearch, but can be used to store\napplication-specific metadata.", + "description": "A mapping type can have custom meta data associated with it. These are\r\nnot used at all by Elasticsearch, but can be used to store\r\napplication-specific metadata.", "name": "_meta", "required": false, "type": { @@ -117286,7 +117358,7 @@ } }, { - "description": "Mapping for a field. For new fields, this mapping can include:\n\n- Field name\n- Field data type\n- Mapping parameters", + "description": "Mapping for a field. For new fields, this mapping can include:\r\n\r\n- Field name\r\n- Field data type\r\n- Mapping parameters", "name": "properties", "required": false, "type": { @@ -117481,7 +117553,7 @@ } } }, - "description": "Changes a dynamic index setting in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Changes a dynamic index setting in real time. For data streams, index setting\r\nchanges are applied to all backing indices by default.", "inherits": { "type": { "name": "RequestBase", @@ -117495,7 +117567,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit\nthe request. Supports wildcards (`*`). To target all data streams and\nindices, omit this parameter or use `*` or `_all`.", + "description": "Comma-separated list of data streams, indices, and aliases used to limit\r\nthe request. Supports wildcards (`*`). To target all data streams and\r\nindices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -117509,7 +117581,7 @@ ], "query": [ { - "description": "If `false`, the request returns an error if any wildcard expression, index\nalias, or `_all` value targets only missing or closed indices. This\nbehavior applies even if the request targets other open indices. For\nexample, a request targeting `foo*,bar*` returns an error if an index\nstarts with `foo` but no index starts with `bar`.", + "description": "If `false`, the request returns an error if any wildcard expression, index\r\nalias, or `_all` value targets only missing or closed indices. This\r\nbehavior applies even if the request targets other open indices. For\r\nexample, a request targeting `foo*,bar*` returns an error if an index\r\nstarts with `foo` but no index starts with `bar`.", "name": "allow_no_indices", "required": false, "serverDefault": false, @@ -117522,7 +117594,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target\ndata streams, this argument determines whether wildcard expressions match\nhidden data streams. Supports comma-separated values, such as\n`open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target\r\ndata streams, this argument determines whether wildcard expressions match\r\nhidden data streams. Supports comma-separated values, such as\r\n`open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -117561,7 +117633,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is\nreceived before the timeout expires, the request fails and returns an\nerror.", + "description": "Period to wait for a connection to the master node. If no response is\r\nreceived before the timeout expires, the request fails and returns an\r\nerror.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -117587,7 +117659,7 @@ } }, { - "description": "Period to wait for a response. If no response is received before the\n timeout expires, the request fails and returns an error.", + "description": "Period to wait for a response. If no response is received before the\r\n timeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -117651,7 +117723,7 @@ } }, { - "description": "Array of wildcard expressions used to match the names\nof indices during creation.", + "description": "Array of wildcard expressions used to match the names\r\nof indices during creation.", "name": "index_patterns", "required": false, "type": { @@ -117690,7 +117762,7 @@ } }, { - "description": "Order in which Elasticsearch applies this template if index\nmatches multiple templates.\n\nTemplates with lower 'order' values are merged first. Templates with higher\n'order' values are merged later, overriding templates with lower values.", + "description": "Order in which Elasticsearch applies this template if index\r\nmatches multiple templates.\r\n\r\nTemplates with lower 'order' values are merged first. Templates with higher\r\n'order' values are merged later, overriding templates with lower values.", "name": "order", "required": false, "type": { @@ -117721,7 +117793,7 @@ } }, { - "description": "Version number used to manage index templates externally. This number\nis not automatically generated by Elasticsearch.", + "description": "Version number used to manage index templates externally. This number\r\nis not automatically generated by Elasticsearch.", "name": "version", "required": false, "type": { @@ -117786,7 +117858,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is\nreceived before the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node. If no response is\r\nreceived before the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -117810,7 +117882,7 @@ } }, { - "description": "Order in which Elasticsearch applies this template if index\nmatches multiple templates.\n\nTemplates with lower 'order' values are merged first. Templates with higher\n'order' values are merged later, overriding templates with lower values.", + "description": "Order in which Elasticsearch applies this template if index\r\nmatches multiple templates.\r\n\r\nTemplates with lower 'order' values are merged first. Templates with higher\r\n'order' values are merged later, overriding templates with lower values.", "name": "order", "required": false, "type": { @@ -120305,7 +120377,7 @@ ], "query": [ { - "description": "If false, the request returns an error if any wildcard expression, index alias, or _all\nvalue targets only missing or closed indices. This behavior applies even if the request\ntargets other open indices.", + "description": "If false, the request returns an error if any wildcard expression, index alias, or _all\r\nvalue targets only missing or closed indices. This behavior applies even if the request\r\ntargets other open indices.", "name": "allow_no_indices", "required": false, "type": { @@ -120317,7 +120389,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams,\nthis argument determines whether wildcard expressions match hidden data streams.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams,\r\nthis argument determines whether wildcard expressions match hidden data streams.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -120930,7 +121002,7 @@ }, { "docId": "mapping-meta-field", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html\r", "name": "_meta", "required": false, "type": { @@ -120971,7 +121043,7 @@ ], "query": [ { - "description": "If `true`, the template passed in the body is only used if no existing\ntemplates match the same index patterns. If `false`, the simulation uses\nthe template with the highest priority. Note that the template is not\npermanently added or updated in either case; it is only used for the\nsimulation.", + "description": "If `true`, the template passed in the body is only used if no existing\r\ntemplates match the same index patterns. If `false`, the simulation uses\r\nthe template with the highest priority. Note that the template is not\r\npermanently added or updated in either case; it is only used for the\r\nsimulation.", "name": "create", "required": false, "serverDefault": false, @@ -120984,7 +121056,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is received\nbefore the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node. If no response is received\r\nbefore the timeout expires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -121099,7 +121171,7 @@ }, "path": [ { - "description": "Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit\nthis parameter and specify the template configuration in the request body.", + "description": "Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit\r\nthis parameter and specify the template configuration in the request body.", "name": "name", "required": false, "type": { @@ -121895,7 +121967,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as `open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\r\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\r\nsuch as `open,hidden`.", "name": "expand_wildcards", "required": false, "type": { @@ -124226,7 +124298,7 @@ } }, { - "description": "How to round the date when formatting the date into the index name. Valid values are:\n`y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).\nSupports template snippets.", + "description": "How to round the date when formatting the date into the index name. Valid values are:\r\n`y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).\r\nSupports template snippets.", "name": "date_rounding", "required": true, "type": { @@ -128059,7 +128131,7 @@ "body": { "kind": "no_body" }, - "description": "This API returns information about the type of license, when it was issued, and when it expires, for example.\nFor more information about the different types of licenses, see https://www.elastic.co/subscriptions.", + "description": "This API returns information about the type of license, when it was issued, and when it expires, for example.\r\nFor more information about the different types of licenses, see https://www.elastic.co/subscriptions.", "inherits": { "type": { "name": "RequestBase", @@ -128078,7 +128150,7 @@ "description": "", "version": "7.6.0" }, - "description": "If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility.\nThis parameter is deprecated and will always be set to true in 8.x.", + "description": "If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility.\r\nThis parameter is deprecated and will always be set to true in 8.x.", "name": "accept_enterprise", "required": false, "serverDefault": true, @@ -128377,7 +128449,7 @@ "body": { "kind": "no_body" }, - "description": "The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.\nTo check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html).", + "description": "The start basic API enables you to initiate an indefinite basic license, which gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.\r\nTo check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html).", "inherits": { "type": { "name": "RequestBase", @@ -129434,7 +129506,7 @@ }, "properties": [ { - "description": "The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a\nwhole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation.", + "description": "The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a\r\nwhole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation.", "name": "bucket_span", "required": false, "serverDefault": "5m", @@ -129607,7 +129679,7 @@ }, "properties": [ { - "description": "The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a\nwhole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation.", + "description": "The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a\r\nwhole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation.", "name": "bucket_span", "required": true, "serverDefault": "5m", @@ -130505,7 +130577,7 @@ }, "properties": [ { - "description": "A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as\nnewer data is analyzed.", + "description": "A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as\r\nnewer data is analyzed.", "name": "anomaly_score", "required": true, "type": { @@ -130550,7 +130622,7 @@ } }, { - "description": "The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the\ntime the bucket was processed.", + "description": "The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the\r\ntime the bucket was processed.", "name": "initial_anomaly_score", "required": true, "type": { @@ -130586,7 +130658,7 @@ } }, { - "description": "The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision\nof over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of\nthis.", + "description": "The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision\r\nof over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of\r\nthis.", "name": "probability", "required": true, "type": { @@ -130665,7 +130737,7 @@ }, "properties": [ { - "description": "The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited\nscore for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as\nnew data is analyzed.", + "description": "The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited\r\nscore for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as\r\nnew data is analyzed.", "name": "anomaly_score", "required": true, "type": { @@ -130724,7 +130796,7 @@ } }, { - "description": "The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the\ntime the bucket was processed.", + "description": "The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the\r\ntime the bucket was processed.", "name": "initial_anomaly_score", "required": true, "type": { @@ -130793,7 +130865,7 @@ } }, { - "description": "The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the\ntimestamp of the bucket are included in the results for the bucket.", + "description": "The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the\r\ntimestamp of the bucket are included in the results for the bucket.", "name": "timestamp", "required": true, "type": { @@ -130814,7 +130886,7 @@ } }, { - "description": "The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the\ntimestamp of the bucket are included in the results for the bucket.", + "description": "The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the\r\ntimestamp of the bucket are included in the results for the bucket.", "name": "timestamp_string", "required": false, "type": { @@ -131185,7 +131257,7 @@ }, "properties": [ { - "description": "If the mode is `auto`, the chunk size is dynamically calculated;\nthis is the recommended value when the datafeed does not use aggregations.\nIf the mode is `manual`, chunking is applied according to the specified `time_span`;\nuse this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied.", + "description": "If the mode is `auto`, the chunk size is dynamically calculated;\r\nthis is the recommended value when the datafeed does not use aggregations.\r\nIf the mode is `manual`, chunking is applied according to the specified `time_span`;\r\nuse this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied.", "name": "mode", "required": true, "type": { @@ -131253,7 +131325,7 @@ { "description": "Specifies the maximum number of feature importance values per document.", "docId": "ml-feature-importance", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-feature-importance.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-feature-importance.html\r", "name": "num_top_feature_importance_values", "required": false, "serverDefault": 0, @@ -132356,7 +132428,7 @@ } }, { - "description": "Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable.\nFor classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field.\nFor regression analysis, the data type of the field must be numeric.", + "description": "Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable.\r\nFor classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field.\r\nFor regression analysis, the data type of the field must be numeric.", "name": "dependent_variable", "required": true, "type": { @@ -132662,7 +132734,7 @@ { "description": "The configuration information necessary to perform classification.", "docId": "ml-classification", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-classification.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-classification.html\r", "name": "classification", "required": false, "type": { @@ -132676,7 +132748,7 @@ { "description": "The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters.", "docId": "ml-classification", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-classification.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-classification.html\r", "name": "outlier_detection", "required": false, "type": { @@ -132690,7 +132762,7 @@ { "description": "The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters.", "docId": "ml-regression", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-regression.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-dfa-regression.html\r", "name": "regression", "required": false, "type": { @@ -133504,7 +133576,7 @@ { "description": "The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {\"match_all\": {}}.", "docId": "query-dsl", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html\r", "name": "query", "required": false, "type": { @@ -134551,7 +134623,7 @@ }, "properties": [ { - "description": "The window of time that is searched for late data. This window of time ends with the latest finalized bucket.\nIt defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs.\nIn particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`.", + "description": "The window of time that is searched for late data. This window of time ends with the latest finalized bucket.\r\nIt defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs.\r\nIn particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`.", "name": "check_window", "required": false, "type": { @@ -135104,7 +135176,7 @@ }, "properties": [ { - "description": "The string/token which will be removed from incoming documents and replaced with the inference prediction(s).\nIn a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer\nhas a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests.\nHowever, if this field is present in a request, its value must match the predefined value for that model/tokenizer,\notherwise the request will fail.", + "description": "The string/token which will be removed from incoming documents and replaced with the inference prediction(s).\r\nIn a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer\r\nhas a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests.\r\nHowever, if this field is present in a request, its value must match the predefined value for that model/tokenizer,\r\notherwise the request will fail.", "name": "mask_token", "required": false, "type": { @@ -135344,7 +135416,7 @@ { "description": "A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization.", "docId": "ml-regression-loss", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/dfa-regression-lossfunction.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/dfa-regression-lossfunction.html\r", "name": "absolute_importance", "required": false, "type": { @@ -135582,11 +135654,11 @@ "name": "feature_importance_baseline" }, { - "description": "Includes the information about hyperparameters used to train the model.\nThis information consists of the value, the absolute and relative\nimportance of the hyperparameter as well as an indicator of whether it was\nspecified by the user or tuned during hyperparameter optimization.", + "description": "Includes the information about hyperparameters used to train the model.\r\nThis information consists of the value, the absolute and relative\r\nimportance of the hyperparameter as well as an indicator of whether it was\r\nspecified by the user or tuned during hyperparameter optimization.", "name": "hyperparameters" }, { - "description": "Includes the total feature importance for the training data set. The\nbaseline and total feature importance values are returned in the metadata\nfield in the response body.", + "description": "Includes the total feature importance for the training data set. The\r\nbaseline and total feature importance values are returned in the metadata\r\nfield in the response body.", "name": "total_feature_importance" }, { @@ -135946,7 +136018,7 @@ } }, { - "description": "Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property\nis present only when it is true.", + "description": "Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property\r\nis present only when it is true.", "name": "is_truncated", "required": false, "type": { @@ -135958,7 +136030,7 @@ } }, { - "description": "If the model is trained for a text classification or zero shot classification task, the response is the\npredicted class.\nFor named entity recognition (NER) tasks, it contains the annotated text output.\nFor fill mask tasks, it contains the top prediction for replacing the mask token.\nFor text embedding tasks, it contains the raw numerical text embedding values.\nFor regression models, its a numerical value\nFor classification models, it may be an integer, double, boolean or string depending on prediction type", + "description": "If the model is trained for a text classification or zero shot classification task, the response is the\r\npredicted class.\r\nFor named entity recognition (NER) tasks, it contains the annotated text output.\r\nFor fill mask tasks, it contains the top prediction for replacing the mask token.\r\nFor text embedding tasks, it contains the raw numerical text embedding values.\r\nFor regression models, its a numerical value\r\nFor classification models, it may be an integer, double, boolean or string depending on prediction type", "name": "predicted_value", "required": false, "type": { @@ -135973,7 +136045,7 @@ } }, { - "description": "For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted\nvalue.\nAdditionally", + "description": "For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted\r\nvalue.\r\nAdditionally", "name": "predicted_value_sequence", "required": false, "type": { @@ -136009,7 +136081,7 @@ } }, { - "description": "For fill mask, text classification, and zero shot classification tasks, the response contains a list of top\nclass entries.", + "description": "For fill mask, text classification, and zero shot classification tasks, the response contains a list of top\r\nclass entries.", "name": "top_classes", "required": false, "type": { @@ -136117,7 +136189,7 @@ } }, { - "description": "A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated\nacross detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new\ndata is analyzed.", + "description": "A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated\r\nacross detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new\r\ndata is analyzed.", "name": "influencer_score", "required": true, "type": { @@ -136153,7 +136225,7 @@ } }, { - "description": "A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors.\nThis is the initial value that was calculated at the time the bucket was processed.", + "description": "A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors.\r\nThis is the initial value that was calculated at the time the bucket was processed.", "name": "initial_influencer_score", "required": true, "type": { @@ -136189,7 +136261,7 @@ } }, { - "description": "The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high\nprecision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly\ninterpretation of this value.", + "description": "The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high\r\nprecision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly\r\ninterpretation of this value.", "name": "probability", "required": true, "type": { @@ -136234,7 +136306,7 @@ } }, { - "description": "Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s\nanalyzing `user_name` as an influencer, a field `user_name` is added to the result document. This\ninformation enables you to filter the anomaly results more easily.", + "description": "Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s\r\nanalyzing `user_name` as an influencer, a field `user_name` is added to the result document. This\r\ninformation enables you to filter the anomaly results more easily.", "name": "foo", "required": false, "type": { @@ -136868,7 +136940,7 @@ "name": "opened" }, { - "description": "The job did not finish successfully due to an error.\nThis situation can occur due to invalid input data, a fatal error occurring during the analysis, or an external interaction such as the process being killed by the Linux out of memory (OOM) killer.\nIf the job had irrevocably failed, it must be force closed and then deleted.\nIf the datafeed can be corrected, the job can be closed and then re-opened.", + "description": "The job did not finish successfully due to an error.\r\nThis situation can occur due to invalid input data, a fatal error occurring during the analysis, or an external interaction such as the process being killed by the Linux out of memory (OOM) killer.\r\nIf the job had irrevocably failed, it must be force closed and then deleted.\r\nIf the datafeed can be corrected, the job can be closed and then re-opened.", "name": "failed" }, { @@ -138571,7 +138643,7 @@ { "description": "Specifies the maximum number of feature importance values per document.", "docId": "ml-feature-importance", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-feature-importance.html", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-feature-importance.html\r", "name": "num_top_feature_importance_values", "required": false, "serverDefault": 0, @@ -139445,7 +139517,7 @@ }, "properties": [ { - "description": "The reason for the current state. It is usually populated only when the\n`routing_state` is `failed`.", + "description": "The reason for the current state. It is usually populated only when the\r\n`routing_state` is `failed`.", "name": "reason", "required": true, "type": { @@ -140279,7 +140351,7 @@ } }, { - "description": "The sum of `rejected_execution_count` for all nodes in the deployment.\nIndividual nodes reject an inference request if the inference queue is full.\nThe queue size is controlled by the `queue_capacity` setting in the start\ntrained model deployment API.", + "description": "The sum of `rejected_execution_count` for all nodes in the deployment.\r\nIndividual nodes reject an inference request if the inference queue is full.\r\nThe queue size is controlled by the `queue_capacity` setting in the start\r\ntrained model deployment API.", "name": "rejected_execution_count", "required": true, "type": { @@ -140291,7 +140363,7 @@ } }, { - "description": "The reason for the current deployment state. Usually only populated when\nthe model is not deployed to a node.", + "description": "The reason for the current deployment state. Usually only populated when\r\nthe model is not deployed to a node.", "name": "reason", "required": true, "type": { @@ -140513,9 +140585,9 @@ }, "properties": [ { - "description": "The number of times the model was loaded for inference and was not retrieved from the cache.\nIf this number is close to the `inference_count`, the cache is not being appropriately used.\nThis can be solved by increasing the cache size or its time-to-live (TTL).\nRefer to general machine learning settings for the appropriate settings.", + "description": "The number of times the model was loaded for inference and was not retrieved from the cache.\r\nIf this number is close to the `inference_count`, the cache is not being appropriately used.\r\nThis can be solved by increasing the cache size or its time-to-live (TTL).\r\nRefer to general machine learning settings for the appropriate settings.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "cache_miss_count", "required": true, "type": { @@ -140539,7 +140611,7 @@ } }, { - "description": "The total number of times the model has been called for inference.\nThis is across all inference contexts, including all pipelines.", + "description": "The total number of times the model has been called for inference.\r\nThis is across all inference contexts, including all pipelines.", "name": "inference_count", "required": true, "type": { @@ -140685,9 +140757,9 @@ } }, { - "description": "A collection of ingest stats for the model across all nodes.\nThe values are summations of the individual node statistics.\nThe format matches the ingest section in the nodes stats API.", + "description": "A collection of ingest stats for the model across all nodes.\r\nThe values are summations of the individual node statistics.\r\nThe format matches the ingest section in the nodes stats API.", "docId": "cluster-nodes-stats", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-nodes-stats.html\r", "name": "ingest", "required": false, "type": { @@ -140756,7 +140828,7 @@ "name": "lang_ident" }, { - "description": "The stored definition is a PyTorch (specifically a TorchScript) model.\nCurrently only NLP models are supported.", + "description": "The stored definition is a PyTorch (specifically a TorchScript) model.\r\nCurrently only NLP models are supported.", "name": "pytorch" } ], @@ -140923,7 +140995,7 @@ } }, { - "description": "The zero shot classification labels indicating entailment, neutral, and contradiction\nMust contain exactly and only entailment, neutral, and contradiction", + "description": "The zero shot classification labels indicating entailment, neutral, and contradiction\r\nMust contain exactly and only entailment, neutral, and contradiction", "name": "classification_labels", "required": true, "type": { @@ -141048,7 +141120,7 @@ "body": { "kind": "no_body" }, - "description": "Clears a trained model deployment cache on all nodes where the trained model is assigned.\nA trained model deployment may have an inference cache enabled.\nAs requests are handled by each allocated node, their responses may be cached on that individual node.\nCalling this API clears the caches without restarting the deployment.", + "description": "Clears a trained model deployment cache on all nodes where the trained model is assigned.\r\nA trained model deployment may have an inference cache enabled.\r\nAs requests are handled by each allocated node, their responses may be cached on that individual node.\r\nCalling this API clears the caches without restarting the deployment.", "inherits": { "type": { "name": "RequestBase", @@ -141149,7 +141221,7 @@ } ] }, - "description": "Closes one or more anomaly detection jobs.\nA job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", + "description": "Closes one or more anomaly detection jobs.\r\nA job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\r\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\r\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\r\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", "inherits": { "type": { "name": "RequestBase", @@ -141177,7 +141249,7 @@ ], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no jobs that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty jobs array when there are no matches and the subset of results when there are partial matches.\r\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -141190,7 +141262,7 @@ } }, { - "description": "Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots.\nIf you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future.", + "description": "Use to close a failed job, or to forcefully close a job which has not responded to its initial close request; the request returns without performing the associated actions such as flushing buffers and persisting the model snapshots.\r\nIf you want the job to be in a consistent state after the close job API returns, do not set to `true`. This parameter should be used only in situations where the job has already failed or where you are not interested in results the job might have recently produced or might produce in the future.", "name": "force", "required": false, "serverDefault": false, @@ -141395,7 +141467,7 @@ } }, { - "description": "An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a\ncomma-separated list of jobs or groups.", + "description": "An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a\r\ncomma-separated list of jobs or groups.", "name": "job_id", "required": true, "type": { @@ -141561,7 +141633,7 @@ }, "path": [ { - "description": "A numerical character string that uniquely identifies the datafeed. This\nidentifier can contain lowercase alphanumeric characters (a-z and 0-9),\nhyphens, and underscores. It must start and end with alphanumeric\ncharacters.", + "description": "A numerical character string that uniquely identifies the datafeed. This\r\nidentifier can contain lowercase alphanumeric characters (a-z and 0-9),\r\nhyphens, and underscores. It must start and end with alphanumeric\r\ncharacters.", "name": "datafeed_id", "required": true, "type": { @@ -141575,7 +141647,7 @@ ], "query": [ { - "description": "Use to forcefully delete a started datafeed; this method is quicker than\nstopping and deleting the datafeed.", + "description": "Use to forcefully delete a started datafeed; this method is quicker than\r\nstopping and deleting the datafeed.", "name": "force", "required": false, "type": { @@ -141615,7 +141687,7 @@ "kind": "properties", "properties": [ { - "description": "The desired requests per second for the deletion processes. The default\nbehavior is no throttling.", + "description": "The desired requests per second for the deletion processes. The default\r\nbehavior is no throttling.", "name": "requests_per_second", "required": false, "type": { @@ -141641,7 +141713,7 @@ } ] }, - "description": "Deletes expired and unused machine learning data.\nDeletes all job results, model snapshots and forecast data that have exceeded\ntheir retention days period. Machine learning state documents that are not\nassociated with any job are also deleted.\nYou can limit the request to a single or set of anomaly detection jobs by\nusing a job identifier, a group name, a comma-separated list of jobs, or a\nwildcard expression. You can delete expired data for all anomaly detection\njobs by using _all, by specifying * as the , or by omitting the\n.", + "description": "Deletes expired and unused machine learning data.\r\nDeletes all job results, model snapshots and forecast data that have exceeded\r\ntheir retention days period. Machine learning state documents that are not\r\nassociated with any job are also deleted.\r\nYou can limit the request to a single or set of anomaly detection jobs by\r\nusing a job identifier, a group name, a comma-separated list of jobs, or a\r\nwildcard expression. You can delete expired data for all anomaly detection\r\njobs by using _all, by specifying * as the , or by omitting the\r\n.", "inherits": { "type": { "name": "RequestBase", @@ -141655,7 +141727,7 @@ }, "path": [ { - "description": "Identifier for an anomaly detection job. It can be a job identifier, a\ngroup name, or a wildcard expression.", + "description": "Identifier for an anomaly detection job. It can be a job identifier, a\r\ngroup name, or a wildcard expression.", "name": "job_id", "required": false, "type": { @@ -141669,7 +141741,7 @@ ], "query": [ { - "description": "The desired requests per second for the deletion processes. The default\nbehavior is no throttling.", + "description": "The desired requests per second for the deletion processes. The default\r\nbehavior is no throttling.", "name": "requests_per_second", "required": false, "type": { @@ -141727,7 +141799,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes a filter.\nIf an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", + "description": "Deletes a filter.\r\nIf an anomaly detection job references the filter, you cannot delete the\r\nfilter. You must update or delete the job before you can delete the filter.", "inherits": { "type": { "name": "RequestBase", @@ -141781,7 +141853,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes forecasts from a machine learning job.\nBy default, forecasts are retained for 14 days. You can specify a\ndifferent retention period with the `expires_in` parameter in the forecast\njobs API. The delete forecast API enables you to delete one or more\nforecasts before they expire.", + "description": "Deletes forecasts from a machine learning job.\r\nBy default, forecasts are retained for 14 days. You can specify a\r\ndifferent retention period with the `expires_in` parameter in the forecast\r\njobs API. The delete forecast API enables you to delete one or more\r\nforecasts before they expire.", "inherits": { "type": { "name": "RequestBase", @@ -141807,7 +141879,7 @@ } }, { - "description": "A comma-separated list of forecast identifiers. If you do not specify\nthis optional parameter or if you specify `_all` or `*` the API deletes\nall forecasts from the job.", + "description": "A comma-separated list of forecast identifiers. If you do not specify\r\nthis optional parameter or if you specify `_all` or `*` the API deletes\r\nall forecasts from the job.", "name": "forecast_id", "required": false, "type": { @@ -141821,7 +141893,7 @@ ], "query": [ { - "description": "Specifies whether an error occurs when there are no forecasts. In\nparticular, if this parameter is set to `false` and there are no\nforecasts associated with the job, attempts to delete all forecasts\nreturn an error.", + "description": "Specifies whether an error occurs when there are no forecasts. In\r\nparticular, if this parameter is set to `false` and there are no\r\nforecasts associated with the job, attempts to delete all forecasts\r\nreturn an error.", "name": "allow_no_forecasts", "required": false, "serverDefault": true, @@ -141834,7 +141906,7 @@ } }, { - "description": "Specifies the period of time to wait for the completion of the delete\noperation. When this period of time elapses, the API fails and returns an\nerror.", + "description": "Specifies the period of time to wait for the completion of the delete\r\noperation. When this period of time elapses, the API fails and returns an\r\nerror.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -141874,7 +141946,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes an anomaly detection job.\n\nAll job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "description": "Deletes an anomaly detection job.\r\n\r\nAll job configuration, model state and results are deleted.\r\nIt is not currently possible to delete multiple jobs using wildcards or a\r\ncomma separated list. If you delete a job that has a datafeed, the request\r\nfirst tries to delete the datafeed. This behavior is equivalent to calling\r\nthe delete datafeed API with the same timeout and force parameters as the\r\ndelete job request.", "inherits": { "type": { "name": "RequestBase", @@ -141902,7 +141974,7 @@ ], "query": [ { - "description": "Use to forcefully delete an opened job; this method is quicker than\nclosing and deleting the job.", + "description": "Use to forcefully delete an opened job; this method is quicker than\r\nclosing and deleting the job.", "name": "force", "required": false, "type": { @@ -141914,7 +141986,7 @@ } }, { - "description": "Specifies whether annotations that have been added by the\nuser should be deleted along with any auto-generated annotations when the job is\nreset.", + "description": "Specifies whether annotations that have been added by the\r\nuser should be deleted along with any auto-generated annotations when the job is\r\nreset.", "name": "delete_user_annotations", "required": false, "serverDefault": false, @@ -141927,7 +141999,7 @@ } }, { - "description": "Specifies whether the request should return immediately or wait until the\njob deletion completes.", + "description": "Specifies whether the request should return immediately or wait until the\r\njob deletion completes.", "name": "wait_for_completion", "required": false, "serverDefault": true, @@ -141967,7 +142039,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes an existing model snapshot.\nYou cannot delete the active model snapshot. To delete that snapshot, first\nrevert to a different one. To identify the active model snapshot, refer to\nthe `model_snapshot_id` in the results from the get jobs API.", + "description": "Deletes an existing model snapshot.\r\nYou cannot delete the active model snapshot. To delete that snapshot, first\r\nrevert to a different one. To identify the active model snapshot, refer to\r\nthe `model_snapshot_id` in the results from the get jobs API.", "inherits": { "type": { "name": "RequestBase", @@ -142033,7 +142105,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes an existing trained inference model that is currently not referenced\nby an ingest pipeline.", + "description": "Deletes an existing trained inference model that is currently not referenced\r\nby an ingest pipeline.", "inherits": { "type": { "name": "RequestBase", @@ -142100,7 +142172,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes a trained model alias.\nThis API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "description": "Deletes a trained model alias.\r\nThis API deletes an existing model alias that refers to a trained model. If\r\nthe model alias is missing or refers to a model other than the one identified\r\nby the `model_id`, this API returns an error.", "inherits": { "type": { "name": "RequestBase", @@ -142167,7 +142239,7 @@ "kind": "properties", "properties": [ { - "description": "For a list of the properties that you can specify in the\n`analysis_config` component of the body of this API.", + "description": "For a list of the properties that you can specify in the\r\n`analysis_config` component of the body of this API.", "name": "analysis_config", "required": false, "type": { @@ -142179,7 +142251,7 @@ } }, { - "description": "Estimates of the highest cardinality in a single bucket that is observed\nfor influencer fields over the time period that the job analyzes data.\nTo produce a good answer, values must be provided for all influencer\nfields. Providing values for fields that are not listed as `influencers`\nhas no effect on the estimation.", + "description": "Estimates of the highest cardinality in a single bucket that is observed\r\nfor influencer fields over the time period that the job analyzes data.\r\nTo produce a good answer, values must be provided for all influencer\r\nfields. Providing values for fields that are not listed as `influencers`\r\nhas no effect on the estimation.", "name": "max_bucket_cardinality", "required": false, "type": { @@ -142202,7 +142274,7 @@ } }, { - "description": "Estimates of the cardinality that is observed for fields over the whole\ntime period that the job analyzes data. To produce a good answer, values\nmust be provided for fields referenced in the `by_field_name`,\n`over_field_name` and `partition_field_name` of any detectors. Providing\nvalues for other fields has no effect on the estimation. It can be\nomitted from the request if no detectors have a `by_field_name`,\n`over_field_name` or `partition_field_name`.", + "description": "Estimates of the cardinality that is observed for fields over the whole\r\ntime period that the job analyzes data. To produce a good answer, values\r\nmust be provided for fields referenced in the `by_field_name`,\r\n`over_field_name` and `partition_field_name` of any detectors. Providing\r\nvalues for other fields has no effect on the estimation. It can be\r\nomitted from the request if no detectors have a `by_field_name`,\r\n`over_field_name` or `partition_field_name`.", "name": "overall_cardinality", "required": false, "type": { @@ -142226,7 +142298,7 @@ } ] }, - "description": "Makes an estimation of the memory usage for an anomaly detection job model.\nIt is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.", + "description": "Makes an estimation of the memory usage for an anomaly detection job model.\r\nIt is based on analysis configuration details for the job and cardinality\r\nestimates for the fields it references.", "inherits": { "type": { "name": "RequestBase", @@ -142918,7 +142990,7 @@ { "description": "A query clause that retrieves a subset of data from the source index.", "docId": "query-dsl", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html\r", "name": "query", "required": false, "type": { @@ -142931,7 +143003,7 @@ } ] }, - "description": "Evaluates the data frame analytics for an annotated index.\nThe API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.", + "description": "Evaluates the data frame analytics for an annotated index.\r\nThe API packages together commonly used evaluation metrics for various types\r\nof machine learning features. This has been designed for use on indexes\r\ncreated by data frame analytics. Evaluation requires both a ground truth\r\nfield and an analytics result field to be present.", "inherits": { "type": { "name": "RequestBase", @@ -143001,7 +143073,7 @@ "kind": "properties", "properties": [ { - "description": "The configuration of how to source the analysis data. It requires an\nindex. Optionally, query and _source may be specified.", + "description": "The configuration of how to source the analysis data. It requires an\r\nindex. Optionally, query and _source may be specified.", "name": "source", "required": false, "type": { @@ -143013,7 +143085,7 @@ } }, { - "description": "The destination configuration, consisting of index and optionally\nresults_field (ml by default).", + "description": "The destination configuration, consisting of index and optionally\r\nresults_field (ml by default).", "name": "dest", "required": false, "type": { @@ -143025,7 +143097,7 @@ } }, { - "description": "The analysis configuration, which contains the information necessary to\nperform one of the following types of analysis: classification, outlier\ndetection, or regression.", + "description": "The analysis configuration, which contains the information necessary to\r\nperform one of the following types of analysis: classification, outlier\r\ndetection, or regression.", "name": "analysis", "required": false, "type": { @@ -143049,9 +143121,9 @@ } }, { - "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to\ncreate data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "description": "The approximate maximum amount of memory resources that are permitted for\r\nanalytical processing. If your `elasticsearch.yml` file contains an\r\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to\r\ncreate data frame analytics jobs that have `model_memory_limit` values\r\ngreater than that setting.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "model_memory_limit", "required": false, "serverDefault": "1gb", @@ -143064,7 +143136,7 @@ } }, { - "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "description": "The maximum number of threads to be used by the analysis. Using more\r\nthreads may decrease the time necessary to complete the analysis at the\r\ncost of using more CPU. Note that the process may use additional threads\r\nfor operational functionality other than the analysis itself.", "name": "max_num_threads", "required": false, "serverDefault": 1, @@ -143077,7 +143149,7 @@ } }, { - "description": "Specify includes and/or excludes patterns to select which fields will be\nincluded in the analysis. The patterns specified in excludes are applied\nlast, therefore excludes takes precedence. In other words, if the same\nfield is specified in both includes and excludes, then the field will not\nbe included in the analysis.", + "description": "Specify includes and/or excludes patterns to select which fields will be\r\nincluded in the analysis. The patterns specified in excludes are applied\r\nlast, therefore excludes takes precedence. In other words, if the same\r\nfield is specified in both includes and excludes, then the field will not\r\nbe included in the analysis.", "name": "analyzed_fields", "required": false, "type": { @@ -143089,9 +143161,9 @@ } }, { - "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node.", + "description": "Specifies whether this job can start when there is insufficient machine\r\nlearning node capacity for it to be immediately assigned to a node.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "allow_lazy_start", "required": false, "serverDefault": false, @@ -143105,7 +143177,7 @@ } ] }, - "description": "Explains a data frame analytics config.\nThis API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "description": "Explains a data frame analytics config.\r\nThis API provides explanations for a data frame analytics config that either\r\nexists already or one that has not been created yet. The following\r\nexplanations are provided:\r\n* which fields are included or not in the analysis and why,\r\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\r\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", "inherits": { "type": { "name": "RequestBase", @@ -143119,7 +143191,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. This identifier can contain\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\nunderscores. It must start and end with alphanumeric characters.", + "description": "Identifier for the data frame analytics job. This identifier can contain\r\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\r\nunderscores. It must start and end with alphanumeric characters.", "name": "id", "required": false, "type": { @@ -143243,7 +143315,7 @@ } ] }, - "description": "Forces any buffered data to be processed by the job.\nThe flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.", + "description": "Forces any buffered data to be processed by the job.\r\nThe flush jobs API is only applicable when sending data for analysis using\r\nthe post data API. Depending on the content of the buffer, then it might\r\nadditionally calculate new results. Both flush and close operations are\r\nsimilar, however the flush is more efficient if you are expecting to send\r\nmore data for analysis. When flushing, the job remains open and is available\r\nto continue analyzing data. A close operation additionally prunes and\r\npersists the model state to disk and the job must be opened again before\r\nanalyzing further data.", "inherits": { "type": { "name": "RequestBase", @@ -143271,7 +143343,7 @@ ], "query": [ { - "description": "Specifies to advance to a particular time value. Results are generated\nand the model is updated for data from the specified time interval.", + "description": "Specifies to advance to a particular time value. Results are generated\r\nand the model is updated for data from the specified time interval.", "name": "advance_time", "required": false, "type": { @@ -143283,7 +143355,7 @@ } }, { - "description": "If true, calculates the interim results for the most recent bucket or all\nbuckets within the latency period.", + "description": "If true, calculates the interim results for the most recent bucket or all\r\nbuckets within the latency period.", "name": "calc_interim", "required": false, "type": { @@ -143295,7 +143367,7 @@ } }, { - "description": "When used in conjunction with `calc_interim` and `start`, specifies the\nrange of buckets on which to calculate interim results.", + "description": "When used in conjunction with `calc_interim` and `start`, specifies the\r\nrange of buckets on which to calculate interim results.", "name": "end", "required": false, "type": { @@ -143307,7 +143379,7 @@ } }, { - "description": "Specifies to skip to a particular time value. Results are not generated\nand the model is not updated for data from the specified time interval.", + "description": "Specifies to skip to a particular time value. Results are not generated\r\nand the model is not updated for data from the specified time interval.", "name": "skip_time", "required": false, "type": { @@ -143319,7 +143391,7 @@ } }, { - "description": "When used in conjunction with `calc_interim`, specifies the range of\nbuckets on which to calculate interim results.", + "description": "When used in conjunction with `calc_interim`, specifies the range of\r\nbuckets on which to calculate interim results.", "name": "start", "required": false, "type": { @@ -143349,7 +143421,7 @@ } }, { - "description": "Provides the timestamp (in milliseconds since the epoch) of the end of\nthe last bucket that was processed.", + "description": "Provides the timestamp (in milliseconds since the epoch) of the end of\r\nthe last bucket that was processed.", "name": "last_finalized_bucket_end", "required": false, "type": { @@ -143417,7 +143489,7 @@ } ] }, - "description": "Predicts the future behavior of a time series by using its historical\nbehavior.\n\nForecasts are not supported for jobs that perform population analysis; an\nerror occurs if you try to create a forecast for a job that has an\n`over_field_name` in its configuration.", + "description": "Predicts the future behavior of a time series by using its historical\r\nbehavior.\r\n\r\nForecasts are not supported for jobs that perform population analysis; an\r\nerror occurs if you try to create a forecast for a job that has an\r\n`over_field_name` in its configuration.", "inherits": { "type": { "name": "RequestBase", @@ -143431,7 +143503,7 @@ }, "path": [ { - "description": "Identifier for the anomaly detection job. The job must be open when you\ncreate a forecast; otherwise, an error occurs.", + "description": "Identifier for the anomaly detection job. The job must be open when you\r\ncreate a forecast; otherwise, an error occurs.", "name": "job_id", "required": true, "type": { @@ -143445,7 +143517,7 @@ ], "query": [ { - "description": "A period of time that indicates how far into the future to forecast. For\nexample, `30d` corresponds to 30 days. The forecast starts at the last\nrecord that was processed.", + "description": "A period of time that indicates how far into the future to forecast. For\r\nexample, `30d` corresponds to 30 days. The forecast starts at the last\r\nrecord that was processed.", "name": "duration", "required": false, "serverDefault": "1d", @@ -143458,7 +143530,7 @@ } }, { - "description": "The period of time that forecast results are retained. After a forecast\nexpires, the results are deleted. If set to a value of 0, the forecast is\nnever automatically deleted.", + "description": "The period of time that forecast results are retained. After a forecast\r\nexpires, the results are deleted. If set to a value of 0, the forecast is\r\nnever automatically deleted.", "name": "expires_in", "required": false, "serverDefault": "14d", @@ -143471,7 +143543,7 @@ } }, { - "description": "The maximum memory the forecast can use. If the forecast needs to use\nmore than the provided amount, it will spool to disk. Default is 20mb,\nmaximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s\nconfigured memory limit, it is automatically reduced to below that\namount.", + "description": "The maximum memory the forecast can use. If the forecast needs to use\r\nmore than the provided amount, it will spool to disk. Default is 20mb,\r\nmaximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s\r\nconfigured memory limit, it is automatically reduced to below that\r\namount.", "name": "max_model_memory", "required": false, "serverDefault": "20mb", @@ -143632,7 +143704,7 @@ } ] }, - "description": "Retrieves anomaly detection job results for one or more buckets.\nThe API presents a chronological view of the records, grouped by bucket.", + "description": "Retrieves anomaly detection job results for one or more buckets.\r\nThe API presents a chronological view of the records, grouped by bucket.", "inherits": { "type": { "name": "RequestBase", @@ -143658,7 +143730,7 @@ } }, { - "description": "The timestamp of a single bucket result. If you do not specify this\nparameter, the API returns information about all buckets.", + "description": "The timestamp of a single bucket result. If you do not specify this\r\nparameter, the API returns information about all buckets.", "name": "timestamp", "required": false, "type": { @@ -143698,7 +143770,7 @@ } }, { - "description": "Returns buckets with timestamps earlier than this time. `-1` means it is\nunset and results are not limited to specific timestamps.", + "description": "Returns buckets with timestamps earlier than this time. `-1` means it is\r\nunset and results are not limited to specific timestamps.", "name": "end", "required": false, "serverDefault": "-1", @@ -143776,7 +143848,7 @@ } }, { - "description": "Returns buckets with timestamps after this time. `-1` means it is unset\nand results are not limited to specific timestamps.", + "description": "Returns buckets with timestamps after this time. `-1` means it is unset\r\nand results are not limited to specific timestamps.", "name": "start", "required": false, "serverDefault": "-1", @@ -144176,7 +144248,7 @@ } }, { - "description": "Identifier for the category, which is unique in the job. If you specify\nneither the category ID nor the partition_field_value, the API returns\ninformation about all categories. If you specify only the\npartition_field_value, it returns information about all categories for\nthe specified partition.", + "description": "Identifier for the category, which is unique in the job. If you specify\r\nneither the category ID nor the partition_field_value, the API returns\r\ninformation about all categories. If you specify only the\r\npartition_field_value, it returns information about all categories for\r\nthe specified partition.", "name": "category_id", "required": false, "type": { @@ -144275,7 +144347,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves configuration information for data frame analytics jobs.\nYou can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "description": "Retrieves configuration information for data frame analytics jobs.\r\nYou can get information for multiple data frame analytics jobs in a single\r\nAPI request by using a comma-separated list of data frame analytics jobs or a\r\nwildcard expression.", "inherits": { "type": { "name": "RequestBase", @@ -144289,7 +144361,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. If you do not specify this\noption, the API returns information for the first hundred data frame\nanalytics jobs.", + "description": "Identifier for the data frame analytics job. If you do not specify this\r\noption, the API returns information for the first hundred data frame\r\nanalytics jobs.", "name": "id", "required": false, "type": { @@ -144303,7 +144375,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no data frame analytics\njobs that match.\n2. Contains the `_all` string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value returns an empty data_frame_analytics array when there\nare no matches and the subset of results when there are partial matches.\nIf this parameter is `false`, the request returns a 404 status code when\nthere are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no data frame analytics\r\njobs that match.\r\n2. Contains the `_all` string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value returns an empty data_frame_analytics array when there\r\nare no matches and the subset of results when there are partial matches.\r\nIf this parameter is `false`, the request returns a 404 status code when\r\nthere are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -144342,7 +144414,7 @@ } }, { - "description": "Indicates if certain fields should be removed from the configuration on\nretrieval. This allows the configuration to be in an acceptable format to\nbe retrieved and then added to another cluster.", + "description": "Indicates if certain fields should be removed from the configuration on\r\nretrieval. This allows the configuration to be in an acceptable format to\r\nbe retrieved and then added to another cluster.", "name": "exclude_generated", "required": false, "serverDefault": false, @@ -144417,7 +144489,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. If you do not specify this\noption, the API returns information for the first hundred data frame\nanalytics jobs.", + "description": "Identifier for the data frame analytics job. If you do not specify this\r\noption, the API returns information for the first hundred data frame\r\nanalytics jobs.", "name": "id", "required": false, "type": { @@ -144431,7 +144503,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no data frame analytics\njobs that match.\n2. Contains the `_all` string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value returns an empty data_frame_analytics array when there\nare no matches and the subset of results when there are partial matches.\nIf this parameter is `false`, the request returns a 404 status code when\nthere are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no data frame analytics\r\njobs that match.\r\n2. Contains the `_all` string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value returns an empty data_frame_analytics array when there\r\nare no matches and the subset of results when there are partial matches.\r\nIf this parameter is `false`, the request returns a 404 status code when\r\nthere are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -144531,7 +144603,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves usage information for datafeeds.\nYou can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "Retrieves usage information for datafeeds.\r\nYou can get statistics for multiple datafeeds in a single API request by\r\nusing a comma-separated list of datafeeds or a wildcard expression. You can\r\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\r\n``, or by omitting the ``. If the datafeed is stopped, the\r\nonly information you receive is the `datafeed_id` and the `state`.\r\nThis API returns a maximum of 10,000 datafeeds.", "inherits": { "type": { "name": "RequestBase", @@ -144545,7 +144617,7 @@ }, "path": [ { - "description": "Identifier for the datafeed. It can be a datafeed identifier or a\nwildcard expression. If you do not specify one of these options, the API\nreturns information about all datafeeds.", + "description": "Identifier for the datafeed. It can be a datafeed identifier or a\r\nwildcard expression. If you do not specify one of these options, the API\r\nreturns information about all datafeeds.", "name": "datafeed_id", "required": false, "type": { @@ -144559,7 +144631,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no datafeeds that match.\n2. Contains the `_all` string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value is `true`, which returns an empty `datafeeds` array\nwhen there are no matches and the subset of results when there are\npartial matches. If this parameter is `false`, the request returns a\n`404` status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no datafeeds that match.\r\n2. Contains the `_all` string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value is `true`, which returns an empty `datafeeds` array\r\nwhen there are no matches and the subset of results when there are\r\npartial matches. If this parameter is `false`, the request returns a\r\n`404` status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "type": { @@ -144618,7 +144690,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves configuration information for datafeeds.\nYou can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "Retrieves configuration information for datafeeds.\r\nYou can get information for multiple datafeeds in a single API request by\r\nusing a comma-separated list of datafeeds or a wildcard expression. You can\r\nget information for all datafeeds by using `_all`, by specifying `*` as the\r\n``, or by omitting the ``.\r\nThis API returns a maximum of 10,000 datafeeds.", "inherits": { "type": { "name": "RequestBase", @@ -144632,7 +144704,7 @@ }, "path": [ { - "description": "Identifier for the datafeed. It can be a datafeed identifier or a\nwildcard expression. If you do not specify one of these options, the API\nreturns information about all datafeeds.", + "description": "Identifier for the datafeed. It can be a datafeed identifier or a\r\nwildcard expression. If you do not specify one of these options, the API\r\nreturns information about all datafeeds.", "name": "datafeed_id", "required": false, "type": { @@ -144646,7 +144718,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no datafeeds that match.\n2. Contains the `_all` string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value is `true`, which returns an empty `datafeeds` array\nwhen there are no matches and the subset of results when there are\npartial matches. If this parameter is `false`, the request returns a\n`404` status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no datafeeds that match.\r\n2. Contains the `_all` string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value is `true`, which returns an empty `datafeeds` array\r\nwhen there are no matches and the subset of results when there are\r\npartial matches. If this parameter is `false`, the request returns a\r\n`404` status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "type": { @@ -144658,7 +144730,7 @@ } }, { - "description": "Indicates if certain fields should be removed from the configuration on\nretrieval. This allows the configuration to be in an acceptable format to\nbe retrieved and then added to another cluster.", + "description": "Indicates if certain fields should be removed from the configuration on\r\nretrieval. This allows the configuration to be in an acceptable format to\r\nbe retrieved and then added to another cluster.", "name": "exclude_generated", "required": false, "serverDefault": false, @@ -144718,7 +144790,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves filters.\nYou can get a single filter or all filters.", + "description": "Retrieves filters.\r\nYou can get a single filter or all filters.", "inherits": { "type": { "name": "RequestBase", @@ -144832,7 +144904,7 @@ } ] }, - "description": "Retrieves anomaly detection job results for one or more influencers.\nInfluencers are the entities that have contributed to, or are to blame for,\nthe anomalies. Influencer results are available only if an\n`influencer_field_name` is specified in the job configuration.", + "description": "Retrieves anomaly detection job results for one or more influencers.\r\nInfluencers are the entities that have contributed to, or are to blame for,\r\nthe anomalies. Influencer results are available only if an\r\n`influencer_field_name` is specified in the job configuration.", "inherits": { "type": { "name": "RequestBase", @@ -144873,7 +144945,7 @@ } }, { - "description": "Returns influencers with timestamps earlier than this time.\nThe default value means it is unset and results are not limited to\nspecific timestamps.", + "description": "Returns influencers with timestamps earlier than this time.\r\nThe default value means it is unset and results are not limited to\r\nspecific timestamps.", "name": "end", "required": false, "serverDefault": "-1", @@ -144886,7 +144958,7 @@ } }, { - "description": "If true, the output excludes interim results. By default, interim results\nare included.", + "description": "If true, the output excludes interim results. By default, interim results\r\nare included.", "name": "exclude_interim", "required": false, "serverDefault": false, @@ -144899,7 +144971,7 @@ } }, { - "description": "Returns influencers with anomaly scores greater than or equal to this\nvalue.", + "description": "Returns influencers with anomaly scores greater than or equal to this\r\nvalue.", "name": "influencer_score", "required": false, "serverDefault": 0, @@ -144938,7 +145010,7 @@ } }, { - "description": "Specifies the sort field for the requested influencers. By default, the\ninfluencers are sorted by the `influencer_score` value.", + "description": "Specifies the sort field for the requested influencers. By default, the\r\ninfluencers are sorted by the `influencer_score` value.", "name": "sort", "required": false, "type": { @@ -144950,7 +145022,7 @@ } }, { - "description": "Returns influencers with timestamps after this time. The default value\nmeans it is unset and results are not limited to specific timestamps.", + "description": "Returns influencers with timestamps after this time. The default value\r\nmeans it is unset and results are not limited to specific timestamps.", "name": "start", "required": false, "serverDefault": "-1", @@ -145025,7 +145097,7 @@ }, "path": [ { - "description": "Identifier for the anomaly detection job. It can be a job identifier, a\ngroup name, a comma-separated list of jobs, or a wildcard expression. If\nyou do not specify one of these options, the API returns information for\nall anomaly detection jobs.", + "description": "Identifier for the anomaly detection job. It can be a job identifier, a\r\ngroup name, a comma-separated list of jobs, or a wildcard expression. If\r\nyou do not specify one of these options, the API returns information for\r\nall anomaly detection jobs.", "name": "job_id", "required": false, "type": { @@ -145039,7 +145111,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no jobs that match.\n2. Contains the _all string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nIf `true`, the API returns an empty `jobs` array when\nthere are no matches and the subset of results when there are partial\nmatches. If `false`, the API returns a `404` status\ncode when there are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no jobs that match.\r\n2. Contains the _all string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nIf `true`, the API returns an empty `jobs` array when\r\nthere are no matches and the subset of results when there are partial\r\nmatches. If `false`, the API returns a `404` status\r\ncode when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -145099,7 +145171,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves configuration information for anomaly detection jobs.\nYou can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "description": "Retrieves configuration information for anomaly detection jobs.\r\nYou can get information for multiple anomaly detection jobs in a single API\r\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\r\nexpression. You can get information for all anomaly detection jobs by using\r\n`_all`, by specifying `*` as the ``, or by omitting the ``.", "inherits": { "type": { "name": "RequestBase", @@ -145113,7 +145185,7 @@ }, "path": [ { - "description": "Identifier for the anomaly detection job. It can be a job identifier, a\ngroup name, or a wildcard expression. If you do not specify one of these\noptions, the API returns information for all anomaly detection jobs.", + "description": "Identifier for the anomaly detection job. It can be a job identifier, a\r\ngroup name, or a wildcard expression. If you do not specify one of these\r\noptions, the API returns information for all anomaly detection jobs.", "name": "job_id", "required": false, "type": { @@ -145127,7 +145199,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no jobs that match.\n2. Contains the _all string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value is `true`, which returns an empty `jobs` array when\nthere are no matches and the subset of results when there are partial\nmatches. If this parameter is `false`, the request returns a `404` status\ncode when there are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no jobs that match.\r\n2. Contains the _all string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value is `true`, which returns an empty `jobs` array when\r\nthere are no matches and the subset of results when there are partial\r\nmatches. If this parameter is `false`, the request returns a `404` status\r\ncode when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -145140,7 +145212,7 @@ } }, { - "description": "Indicates if certain fields should be removed from the configuration on\nretrieval. This allows the configuration to be in an acceptable format to\nbe retrieved and then added to another cluster.", + "description": "Indicates if certain fields should be removed from the configuration on\r\nretrieval. This allows the configuration to be in an acceptable format to\r\nbe retrieved and then added to another cluster.", "name": "exclude_generated", "required": false, "serverDefault": false, @@ -145413,7 +145485,7 @@ }, "properties": [ { - "description": "If the amount of physical memory has been overridden using the es.total_memory_bytes system property\nthen this reports the overridden value. Otherwise it reports the same value as total.", + "description": "If the amount of physical memory has been overridden using the es.total_memory_bytes system property\r\nthen this reports the overridden value. Otherwise it reports the same value as total.", "name": "adjusted_total", "required": false, "type": { @@ -145425,7 +145497,7 @@ } }, { - "description": "If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property\nthen this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`.", + "description": "If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property\r\nthen this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`.", "name": "adjusted_total_in_bytes", "required": true, "type": { @@ -145588,7 +145660,7 @@ "body": { "kind": "no_body" }, - "description": "Get information about how machine learning jobs and trained models are using memory,\non each node, both within the JVM heap, and natively, outside of the JVM.", + "description": "Get information about how machine learning jobs and trained models are using memory,\r\non each node, both within the JVM heap, and natively, outside of the JVM.", "inherits": { "type": { "name": "RequestBase", @@ -145602,7 +145674,7 @@ }, "path": [ { - "description": "The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or\n`ml:true`", + "description": "The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or\r\n`ml:true`", "name": "node_id", "required": false, "type": { @@ -145616,7 +145688,7 @@ ], "query": [ { - "description": "Specify this query parameter to include the fields with units in the response. Otherwise only\nthe `_in_bytes` sizes are returned in the response.", + "description": "Specify this query parameter to include the fields with units in the response. Otherwise only\r\nthe `_in_bytes` sizes are returned in the response.", "name": "human", "required": false, "type": { @@ -145628,7 +145700,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response is received before the timeout\nexpires, the request fails and returns an error.", + "description": "Period to wait for a connection to the master node. If no response is received before the timeout\r\nexpires, the request fails and returns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -145641,7 +145713,7 @@ } }, { - "description": "Period to wait for a response. If no response is received before the timeout expires, the request\nfails and returns an error.", + "description": "Period to wait for a response. If no response is received before the timeout expires, the request\r\nfails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -145746,7 +145818,7 @@ } }, { - "description": "A numerical character string that uniquely identifies the model snapshot. You can get information for multiple\nsnapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`,\nby specifying `*` as the snapshot ID, or by omitting the snapshot ID.", + "description": "A numerical character string that uniquely identifies the model snapshot. You can get information for multiple\r\nsnapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`,\r\nby specifying `*` as the snapshot ID, or by omitting the snapshot ID.", "name": "snapshot_id", "required": true, "type": { @@ -145760,7 +145832,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n - Contains wildcard expressions and there are no jobs that match.\n - Contains the _all string or no identifiers and there are no matches.\n - Contains wildcard expressions and there are only partial matches.\n\nThe default value is true, which returns an empty jobs array when there are no matches and the subset of results\nwhen there are partial matches. If this parameter is false, the request returns a 404 status code when there are\nno matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n - Contains wildcard expressions and there are no jobs that match.\r\n - Contains the _all string or no identifiers and there are no matches.\r\n - Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value is true, which returns an empty jobs array when there are no matches and the subset of results\r\nwhen there are partial matches. If this parameter is false, the request returns a 404 status code when there are\r\nno matches or only partial matches.", "name": "allow_no_match", "required": false, "type": { @@ -145907,7 +145979,7 @@ } }, { - "description": "A numerical character string that uniquely identifies the model snapshot. You can get information for multiple\nsnapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`,\nby specifying `*` as the snapshot ID, or by omitting the snapshot ID.", + "description": "A numerical character string that uniquely identifies the model snapshot. You can get information for multiple\r\nsnapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`,\r\nby specifying `*` as the snapshot ID, or by omitting the snapshot ID.", "name": "snapshot_id", "required": false, "type": { @@ -145972,7 +146044,7 @@ } }, { - "description": "Specifies the sort field for the requested snapshots. By default, the\nsnapshots are sorted by their timestamp.", + "description": "Specifies the sort field for the requested snapshots. By default, the\r\nsnapshots are sorted by their timestamp.", "name": "sort", "required": false, "type": { @@ -146144,7 +146216,7 @@ } ] }, - "description": "Retrieves overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "description": "Retrieves overall bucket results that summarize the bucket results of\r\nmultiple anomaly detection jobs.\r\n\r\nThe `overall_score` is calculated by combining the scores of all the\r\nbuckets within the overall bucket span. First, the maximum\r\n`anomaly_score` per anomaly detection job in the overall bucket is\r\ncalculated. Then the `top_n` of those scores are averaged to result in\r\nthe `overall_score`. This means that you can fine-tune the\r\n`overall_score` so that it is more or less sensitive to the number of\r\njobs that detect an anomaly at the same time. For example, if you set\r\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\r\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\r\nthe `overall_score` is high only when all jobs detect anomalies in that\r\noverall bucket. If you set the `bucket_span` parameter (to a value\r\ngreater than its default), the `overall_score` is the maximum\r\n`overall_score` of the overall buckets that have a span equal to the\r\njobs' largest bucket span.", "inherits": { "type": { "name": "RequestBase", @@ -146158,7 +146230,7 @@ }, "path": [ { - "description": "Identifier for the anomaly detection job. It can be a job identifier, a\ngroup name, a comma-separated list of jobs or groups, or a wildcard\nexpression.\n\nYou can summarize the bucket results for all anomaly detection jobs by\nusing `_all` or by specifying `*` as the ``.", + "description": "Identifier for the anomaly detection job. It can be a job identifier, a\r\ngroup name, a comma-separated list of jobs or groups, or a wildcard\r\nexpression.\r\n\r\nYou can summarize the bucket results for all anomaly detection jobs by\r\nusing `_all` or by specifying `*` as the ``.", "name": "job_id", "required": true, "type": { @@ -146172,7 +146244,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no jobs that match.\n2. Contains the `_all` string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nIf `true`, the request returns an empty `jobs` array when there are no\nmatches and the subset of results when there are partial matches. If this\nparameter is `false`, the request returns a `404` status code when there\nare no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no jobs that match.\r\n2. Contains the `_all` string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nIf `true`, the request returns an empty `jobs` array when there are no\r\nmatches and the subset of results when there are partial matches. If this\r\nparameter is `false`, the request returns a `404` status code when there\r\nare no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -146185,7 +146257,7 @@ } }, { - "description": "The span of the overall buckets. Must be greater or equal to the largest\nbucket span of the specified anomaly detection jobs, which is the default\nvalue.\n\nBy default, an overall bucket has a span equal to the largest bucket span\nof the specified anomaly detection jobs. To override that behavior, use\nthe optional `bucket_span` parameter.", + "description": "The span of the overall buckets. Must be greater or equal to the largest\r\nbucket span of the specified anomaly detection jobs, which is the default\r\nvalue.\r\n\r\nBy default, an overall bucket has a span equal to the largest bucket span\r\nof the specified anomaly detection jobs. To override that behavior, use\r\nthe optional `bucket_span` parameter.", "name": "bucket_span", "required": false, "type": { @@ -146222,7 +146294,7 @@ } }, { - "description": "Returns overall buckets with overall scores greater than or equal to this\nvalue.", + "description": "Returns overall buckets with overall scores greater than or equal to this\r\nvalue.", "name": "overall_score", "required": false, "type": { @@ -146258,7 +146330,7 @@ } }, { - "description": "The number of top anomaly detection job bucket scores to be used in the\n`overall_score` calculation.", + "description": "The number of top anomaly detection job bucket scores to be used in the\r\n`overall_score` calculation.", "name": "top_n", "required": false, "serverDefault": 1, @@ -146410,7 +146482,7 @@ } ] }, - "description": "Retrieves anomaly records for an anomaly detection job.\nRecords contain the detailed analytical results. They describe the anomalous\nactivity that has been identified in the input data based on the detector\nconfiguration.\nThere can be many anomaly records depending on the characteristics and size\nof the input data. In practice, there are often too many to be able to\nmanually process them. The machine learning features therefore perform a\nsophisticated aggregation of the anomaly records into buckets.\nThe number of record results depends on the number of anomalies found in each\nbucket, which relates to the number of time series being modeled and the\nnumber of detectors.", + "description": "Retrieves anomaly records for an anomaly detection job.\r\nRecords contain the detailed analytical results. They describe the anomalous\r\nactivity that has been identified in the input data based on the detector\r\nconfiguration.\r\nThere can be many anomaly records depending on the characteristics and size\r\nof the input data. In practice, there are often too many to be able to\r\nmanually process them. The machine learning features therefore perform a\r\nsophisticated aggregation of the anomaly records into buckets.\r\nThe number of record results depends on the number of anomalies found in each\r\nbucket, which relates to the number of time series being modeled and the\r\nnumber of detectors.", "inherits": { "type": { "name": "RequestBase", @@ -146451,7 +146523,7 @@ } }, { - "description": "Returns records with timestamps earlier than this time. The default value\nmeans results are not limited to specific timestamps.", + "description": "Returns records with timestamps earlier than this time. The default value\r\nmeans results are not limited to specific timestamps.", "name": "end", "required": false, "serverDefault": "-1", @@ -146529,7 +146601,7 @@ } }, { - "description": "Returns records with timestamps after this time. The default value means\nresults are not limited to specific timestamps.", + "description": "Returns records with timestamps after this time. The default value means\r\nresults are not limited to specific timestamps.", "name": "start", "required": false, "serverDefault": "-1", @@ -146617,7 +146689,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n- Contains wildcard expressions and there are no models that match.\n- Contains the _all string or no identifiers and there are no matches.\n- Contains wildcard expressions and there are only partial matches.\n\nIf true, it returns an empty array when there are no matches and the\nsubset of results when there are partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n- Contains wildcard expressions and there are no models that match.\r\n- Contains the _all string or no identifiers and there are no matches.\r\n- Contains wildcard expressions and there are only partial matches.\r\n\r\nIf true, it returns an empty array when there are no matches and the\r\nsubset of results when there are partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -146630,7 +146702,7 @@ } }, { - "description": "Specifies whether the included model definition should be returned as a\nJSON map (true) or in a custom compressed format (false).", + "description": "Specifies whether the included model definition should be returned as a\r\nJSON map (true) or in a custom compressed format (false).", "name": "decompress_definition", "required": false, "serverDefault": true, @@ -146643,7 +146715,7 @@ } }, { - "description": "Indicates if certain fields should be removed from the configuration on\nretrieval. This allows the configuration to be in an acceptable format to\nbe retrieved and then added to another cluster.", + "description": "Indicates if certain fields should be removed from the configuration on\r\nretrieval. This allows the configuration to be in an acceptable format to\r\nbe retrieved and then added to another cluster.", "name": "exclude_generated", "required": false, "serverDefault": false, @@ -146669,7 +146741,7 @@ } }, { - "description": "A comma delimited string of optional fields to include in the response\nbody.", + "description": "A comma delimited string of optional fields to include in the response\r\nbody.", "name": "include", "required": false, "type": { @@ -146694,7 +146766,7 @@ } }, { - "description": "A comma delimited string of tags. A trained model can have many tags, or\nnone. When supplied, only trained models that contain all the supplied\ntags are returned.", + "description": "A comma delimited string of tags. A trained model can have many tags, or\r\nnone. When supplied, only trained models that contain all the supplied\r\ntags are returned.", "name": "tags", "required": false, "type": { @@ -146754,7 +146826,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves usage information for trained models. You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "description": "Retrieves usage information for trained models. You can get usage information for multiple trained\r\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", "inherits": { "type": { "name": "RequestBase", @@ -146768,7 +146840,7 @@ }, "path": [ { - "description": "The unique identifier of the trained model or a model alias. It can be a\ncomma-separated list or a wildcard expression.", + "description": "The unique identifier of the trained model or a model alias. It can be a\r\ncomma-separated list or a wildcard expression.", "name": "model_id", "required": false, "type": { @@ -146782,7 +146854,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n- Contains wildcard expressions and there are no models that match.\n- Contains the _all string or no identifiers and there are no matches.\n- Contains wildcard expressions and there are only partial matches.\n\nIf true, it returns an empty array when there are no matches and the\nsubset of results when there are partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n- Contains wildcard expressions and there are no models that match.\r\n- Contains the _all string or no identifiers and there are no matches.\r\n- Contains wildcard expressions and there are only partial matches.\r\n\r\nIf true, it returns an empty array when there are no matches and the\r\nsubset of results when there are partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -146871,7 +146943,7 @@ "kind": "properties", "properties": [ { - "description": "An array of objects to pass to the model for inference. The objects should contain a fields matching your\nconfigured trained model input. Typically, for NLP models, the field name is `text_field`.\nCurrently, for NLP models, only a single value is allowed.", + "description": "An array of objects to pass to the model for inference. The objects should contain a fields matching your\r\nconfigured trained model input. Typically, for NLP models, the field name is `text_field`.\r\nCurrently, for NLP models, only a single value is allowed.", "name": "docs", "required": true, "type": { @@ -147176,7 +147248,7 @@ "body": { "kind": "no_body" }, - "description": "Returns defaults and limits used by machine learning.\nThis endpoint is designed to be used by a user interface that needs to fully\nunderstand machine learning configurations where some options are not\nspecified, meaning that the defaults should be used. This endpoint may be\nused to find out what those defaults are. It also provides information about\nthe maximum size of machine learning jobs that could run in the current\ncluster configuration.", + "description": "Returns defaults and limits used by machine learning.\r\nThis endpoint is designed to be used by a user interface that needs to fully\r\nunderstand machine learning configurations where some options are not\r\nspecified, meaning that the defaults should be used. This endpoint may be\r\nused to find out what those defaults are. It also provides information about\r\nthe maximum size of machine learning jobs that could run in the current\r\ncluster configuration.", "inherits": { "type": { "name": "RequestBase", @@ -147271,7 +147343,7 @@ } ] }, - "description": "Opens one or more anomaly detection jobs.\nAn anomaly detection job must be opened in order for it to be ready to\nreceive and analyze data. It can be opened and closed multiple times\nthroughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.", + "description": "Opens one or more anomaly detection jobs.\r\nAn anomaly detection job must be opened in order for it to be ready to\r\nreceive and analyze data. It can be opened and closed multiple times\r\nthroughout its lifecycle.\r\nWhen you open a new job, it starts with an empty model.\r\nWhen you open an existing job, the most recent model state is automatically\r\nloaded. The job is ready to resume its analysis from where it left off, once\r\nnew data is received.", "inherits": { "type": { "name": "RequestBase", @@ -147451,7 +147523,7 @@ "description": "Posting data directly to anomaly detection jobs is deprecated, in a future major version a datafeed will be required.", "version": "7.11.0" }, - "description": "Sends data to an anomaly detection job for analysis.\n\nIMPORTANT: For each job, data can be accepted from only a single connection at a time.\nIt is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.", + "description": "Sends data to an anomaly detection job for analysis.\r\n\r\nIMPORTANT: For each job, data can be accepted from only a single connection at a time.\r\nIt is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.", "generics": [ { "name": "TData", @@ -147762,9 +147834,9 @@ "kind": "properties", "properties": [ { - "description": "A data frame analytics config as described in create data frame analytics\njobs. Note that `id` and `dest` don’t need to be provided in the context of\nthis API.", + "description": "A data frame analytics config as described in create data frame analytics\r\njobs. Note that `id` and `dest` don’t need to be provided in the context of\r\nthis API.", "docId": "put-dfanalytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-dfanalytics.html\r", "name": "config", "required": false, "type": { @@ -147865,7 +147937,7 @@ } }, { - "description": "The configuration details for the anomaly detection job that is associated with the datafeed. If the\n`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must\nsupply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is\nused. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.", + "description": "The configuration details for the anomaly detection job that is associated with the datafeed. If the\r\n`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must\r\nsupply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is\r\nused. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object.", "name": "job_config", "required": false, "type": { @@ -147878,7 +147950,7 @@ } ] }, - "description": "Previews a datafeed.\nThis API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "Previews a datafeed.\r\nThis API returns the first \"page\" of search results from a datafeed.\r\nYou can preview an existing datafeed or provide configuration details for a datafeed\r\nand anomaly detection job in the API. The preview shows the structure of the data\r\nthat will be passed to the anomaly detection engine.\r\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\r\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\r\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\r\nYou can also use secondary authorization headers to supply the credentials.", "inherits": { "type": { "name": "RequestBase", @@ -147892,7 +147964,7 @@ }, "path": [ { - "description": "A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase\nalphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric\ncharacters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job\nconfiguration details in the request body.", + "description": "A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase\r\nalphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric\r\ncharacters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job\r\nconfiguration details in the request body.", "name": "datafeed_id", "required": false, "type": { @@ -148178,9 +148250,9 @@ "kind": "properties", "properties": [ { - "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node. If\nset to `false` and a machine learning node with capacity to run the job\ncannot be immediately found, the API returns an error. If set to `true`,\nthe API does not return an error; the job waits in the `starting` state\nuntil sufficient machine learning node capacity is available. This\nbehavior is also affected by the cluster-wide\n`xpack.ml.max_lazy_ml_nodes` setting.", + "description": "Specifies whether this job can start when there is insufficient machine\r\nlearning node capacity for it to be immediately assigned to a node. If\r\nset to `false` and a machine learning node with capacity to run the job\r\ncannot be immediately found, the API returns an error. If set to `true`,\r\nthe API does not return an error; the job waits in the `starting` state\r\nuntil sufficient machine learning node capacity is available. This\r\nbehavior is also affected by the cluster-wide\r\n`xpack.ml.max_lazy_ml_nodes` setting.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "allow_lazy_start", "required": false, "serverDefault": false, @@ -148193,7 +148265,7 @@ } }, { - "description": "The analysis configuration, which contains the information necessary to\nperform one of the following types of analysis: classification, outlier\ndetection, or regression.", + "description": "The analysis configuration, which contains the information necessary to\r\nperform one of the following types of analysis: classification, outlier\r\ndetection, or regression.", "name": "analysis", "required": true, "type": { @@ -148205,7 +148277,7 @@ } }, { - "description": "Specifies `includes` and/or `excludes` patterns to select which fields\nwill be included in the analysis. The patterns specified in `excludes`\nare applied last, therefore `excludes` takes precedence. In other words,\nif the same field is specified in both `includes` and `excludes`, then\nthe field will not be included in the analysis. If `analyzed_fields` is\nnot set, only the relevant fields will be included. For example, all the\nnumeric fields for outlier detection.\nThe supported fields vary for each type of analysis. Outlier detection\nrequires numeric or `boolean` data to analyze. The algorithms don’t\nsupport missing values therefore fields that have data types other than\nnumeric or boolean are ignored. Documents where included fields contain\nmissing values, null values, or an array are also ignored. Therefore the\n`dest` index may contain documents that don’t have an outlier score.\nRegression supports fields that are numeric, `boolean`, `text`,\n`keyword`, and `ip` data types. It is also tolerant of missing values.\nFields that are supported are included in the analysis, other fields are\nignored. Documents where included fields contain an array with two or\nmore values are also ignored. Documents in the `dest` index that don’t\ncontain a results field are not included in the regression analysis.\nClassification supports fields that are numeric, `boolean`, `text`,\n`keyword`, and `ip` data types. It is also tolerant of missing values.\nFields that are supported are included in the analysis, other fields are\nignored. Documents where included fields contain an array with two or\nmore values are also ignored. Documents in the `dest` index that don’t\ncontain a results field are not included in the classification analysis.\nClassification analysis can be improved by mapping ordinal variable\nvalues to a single number. For example, in case of age ranges, you can\nmodel the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on.", + "description": "Specifies `includes` and/or `excludes` patterns to select which fields\r\nwill be included in the analysis. The patterns specified in `excludes`\r\nare applied last, therefore `excludes` takes precedence. In other words,\r\nif the same field is specified in both `includes` and `excludes`, then\r\nthe field will not be included in the analysis. If `analyzed_fields` is\r\nnot set, only the relevant fields will be included. For example, all the\r\nnumeric fields for outlier detection.\r\nThe supported fields vary for each type of analysis. Outlier detection\r\nrequires numeric or `boolean` data to analyze. The algorithms don’t\r\nsupport missing values therefore fields that have data types other than\r\nnumeric or boolean are ignored. Documents where included fields contain\r\nmissing values, null values, or an array are also ignored. Therefore the\r\n`dest` index may contain documents that don’t have an outlier score.\r\nRegression supports fields that are numeric, `boolean`, `text`,\r\n`keyword`, and `ip` data types. It is also tolerant of missing values.\r\nFields that are supported are included in the analysis, other fields are\r\nignored. Documents where included fields contain an array with two or\r\nmore values are also ignored. Documents in the `dest` index that don’t\r\ncontain a results field are not included in the regression analysis.\r\nClassification supports fields that are numeric, `boolean`, `text`,\r\n`keyword`, and `ip` data types. It is also tolerant of missing values.\r\nFields that are supported are included in the analysis, other fields are\r\nignored. Documents where included fields contain an array with two or\r\nmore values are also ignored. Documents in the `dest` index that don’t\r\ncontain a results field are not included in the classification analysis.\r\nClassification analysis can be improved by mapping ordinal variable\r\nvalues to a single number. For example, in case of age ranges, you can\r\nmodel the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on.", "name": "analyzed_fields", "required": false, "type": { @@ -148241,7 +148313,7 @@ } }, { - "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "description": "The maximum number of threads to be used by the analysis. Using more\r\nthreads may decrease the time necessary to complete the analysis at the\r\ncost of using more CPU. Note that the process may use additional threads\r\nfor operational functionality other than the analysis itself.", "name": "max_num_threads", "required": false, "serverDefault": 1, @@ -148254,7 +148326,7 @@ } }, { - "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "description": "The approximate maximum amount of memory resources that are permitted for\r\nanalytical processing. If your `elasticsearch.yml` file contains an\r\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\r\nto create data frame analytics jobs that have `model_memory_limit` values\r\ngreater than that setting.", "name": "model_memory_limit", "required": false, "serverDefault": "1gb", @@ -148316,7 +148388,7 @@ } ] }, - "description": "Instantiates a data frame analytics job.\nThis API creates a data frame analytics job that performs an analysis on the\nsource indices and stores the outcome in a destination index.", + "description": "Instantiates a data frame analytics job.\r\nThis API creates a data frame analytics job that performs an analysis on the\r\nsource indices and stores the outcome in a destination index.", "inherits": { "type": { "name": "RequestBase", @@ -148330,7 +148402,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. This identifier can contain\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\nunderscores. It must start and end with alphanumeric characters.", + "description": "Identifier for the data frame analytics job. This identifier can contain\r\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\r\nunderscores. It must start and end with alphanumeric characters.", "name": "id", "required": true, "type": { @@ -148507,7 +148579,7 @@ "kind": "properties", "properties": [ { - "description": "If set, the datafeed performs aggregation searches.\nSupport for aggregations is limited and should be used only with low cardinality data.", + "description": "If set, the datafeed performs aggregation searches.\r\nSupport for aggregations is limited and should be used only with low cardinality data.", "name": "aggregations", "required": false, "type": { @@ -148530,7 +148602,7 @@ } }, { - "description": "Datafeeds might be required to search over long time periods, for several months or years.\nThis search is split into time chunks in order to ensure the load on Elasticsearch is managed.\nChunking configuration controls how the size of these time chunks are calculated;\nit is an advanced configuration option.", + "description": "Datafeeds might be required to search over long time periods, for several months or years.\r\nThis search is split into time chunks in order to ensure the load on Elasticsearch is managed.\r\nChunking configuration controls how the size of these time chunks are calculated;\r\nit is an advanced configuration option.", "name": "chunking_config", "required": false, "type": { @@ -148542,7 +148614,7 @@ } }, { - "description": "Specifies whether the datafeed checks for missing data and the size of the window.\nThe datafeed can optionally search over indices that have already been read in an effort to determine whether\nany data has subsequently been added to the index. If missing data is found, it is a good indication that the\n`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time.\nThis check runs only on real-time datafeeds.", + "description": "Specifies whether the datafeed checks for missing data and the size of the window.\r\nThe datafeed can optionally search over indices that have already been read in an effort to determine whether\r\nany data has subsequently been added to the index. If missing data is found, it is a good indication that the\r\n`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time.\r\nThis check runs only on real-time datafeeds.", "name": "delayed_data_check_config", "required": false, "type": { @@ -148554,7 +148626,7 @@ } }, { - "description": "The interval at which scheduled queries are made while the datafeed runs in real time.\nThe default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible\nfraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last\n(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses\naggregations, this value must be divisible by the interval of the date histogram aggregation.", + "description": "The interval at which scheduled queries are made while the datafeed runs in real time.\r\nThe default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible\r\nfraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last\r\n(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses\r\naggregations, this value must be divisible by the interval of the date histogram aggregation.", "name": "frequency", "required": false, "type": { @@ -148569,7 +148641,7 @@ "aliases": [ "indexes" ], - "description": "An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine\nlearning nodes must have the `remote_cluster_client` role.", + "description": "An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine\r\nlearning nodes must have the `remote_cluster_client` role.", "name": "indices", "required": false, "type": { @@ -148605,7 +148677,7 @@ } }, { - "description": "If a real-time datafeed has never seen any data (including during any initial training period), it automatically\nstops and closes the associated job after this many real-time searches return no documents. In other words,\nit stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no\nend time that sees no data remains started until it is explicitly stopped. By default, it is not set.", + "description": "If a real-time datafeed has never seen any data (including during any initial training period), it automatically\r\nstops and closes the associated job after this many real-time searches return no documents. In other words,\r\nit stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no\r\nend time that sees no data remains started until it is explicitly stopped. By default, it is not set.", "name": "max_empty_searches", "required": false, "type": { @@ -148617,7 +148689,7 @@ } }, { - "description": "The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an\nElasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this\nobject is passed verbatim to Elasticsearch.", + "description": "The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an\r\nElasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this\r\nobject is passed verbatim to Elasticsearch.", "name": "query", "required": false, "serverDefault": "{\"match_all\": {\"boost\": 1}}", @@ -148630,7 +148702,7 @@ } }, { - "description": "The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might\nnot be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default\nvalue is randomly selected between `60s` and `120s`. This randomness improves the query performance\nwhen there are multiple jobs running on the same node.", + "description": "The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might\r\nnot be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default\r\nvalue is randomly selected between `60s` and `120s`. This randomness improves the query performance\r\nwhen there are multiple jobs running on the same node.", "name": "query_delay", "required": false, "type": { @@ -148654,7 +148726,7 @@ } }, { - "description": "Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.\nThe detector configuration objects in a job can contain functions that use these script fields.", + "description": "Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.\r\nThe detector configuration objects in a job can contain functions that use these script fields.", "name": "script_fields", "required": false, "type": { @@ -148677,7 +148749,7 @@ } }, { - "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`, which is 10,000 by default.", + "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\r\nThe maximum value is the value of `index.max_result_window`, which is 10,000 by default.", "name": "scroll_size", "required": false, "serverDefault": 1000, @@ -148709,7 +148781,7 @@ } ] }, - "description": "Instantiates a datafeed.\nDatafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.", + "description": "Instantiates a datafeed.\r\nDatafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\r\nYou can associate only one datafeed with each anomaly detection job.\r\nThe datafeed contains a query that runs at a defined interval (`frequency`).\r\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\r\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\r\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\r\nthose credentials are used instead.\r\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\r\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.", "inherits": { "type": { "name": "RequestBase", @@ -148723,7 +148795,7 @@ }, "path": [ { - "description": "A numerical character string that uniquely identifies the datafeed.\nThis identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.\nIt must start and end with alphanumeric characters.", + "description": "A numerical character string that uniquely identifies the datafeed.\r\nThis identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.\r\nIt must start and end with alphanumeric characters.", "name": "datafeed_id", "required": true, "type": { @@ -148737,7 +148809,7 @@ ], "query": [ { - "description": "If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all`\nstring or when no indices are specified.", + "description": "If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all`\r\nstring or when no indices are specified.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -148750,7 +148822,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\r\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -149018,7 +149090,7 @@ } }, { - "description": "The items of the filter. A wildcard `*` can be used at the beginning or the end of an item.\nUp to 10000 items are allowed in each filter.", + "description": "The items of the filter. A wildcard `*` can be used at the beginning or the end of an item.\r\nUp to 10000 items are allowed in each filter.", "name": "items", "required": false, "type": { @@ -149034,7 +149106,7 @@ } ] }, - "description": "Instantiates a filter.\nA filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.", + "description": "Instantiates a filter.\r\nA filter contains a list of strings. It can be used by one or more anomaly detection jobs.\r\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.", "inherits": { "type": { "name": "RequestBase", @@ -149910,7 +149982,7 @@ "kind": "properties", "properties": [ { - "description": "The compressed (GZipped and Base64 encoded) inference definition of the\nmodel. If compressed_definition is specified, then definition cannot be\nspecified.", + "description": "The compressed (GZipped and Base64 encoded) inference definition of the\r\nmodel. If compressed_definition is specified, then definition cannot be\r\nspecified.", "name": "compressed_definition", "required": false, "type": { @@ -149922,7 +149994,7 @@ } }, { - "description": "The inference definition for the model. If definition is specified, then\ncompressed_definition cannot be specified.", + "description": "The inference definition for the model. If definition is specified, then\r\ncompressed_definition cannot be specified.", "name": "definition", "required": false, "type": { @@ -149946,7 +150018,7 @@ } }, { - "description": "The default configuration for inference. This can be either a regression\nor classification configuration. It must match the underlying\ndefinition.trained_model's target_type. For pre-packaged models such as\nELSER the config is not required.", + "description": "The default configuration for inference. This can be either a regression\r\nor classification configuration. It must match the underlying\r\ndefinition.trained_model's target_type. For pre-packaged models such as\r\nELSER the config is not required.", "name": "inference_config", "required": false, "type": { @@ -149991,7 +150063,7 @@ } }, { - "description": "The estimated memory usage in bytes to keep the trained model in memory.\nThis property is supported only if defer_definition_decompression is true\nor the model definition is not supplied.", + "description": "The estimated memory usage in bytes to keep the trained model in memory.\r\nThis property is supported only if defer_definition_decompression is true\r\nor the model definition is not supplied.", "name": "model_size_bytes", "required": false, "type": { @@ -150171,7 +150243,7 @@ } }, { - "description": "The definition of a node in a tree.\nThere are two major types of nodes: leaf nodes and not-leaf nodes.\n- Leaf nodes only need node_index and leaf_value defined.\n- All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined.", + "description": "The definition of a node in a tree.\r\nThere are two major types of nodes: leaf nodes and not-leaf nodes.\r\n- Leaf nodes only need node_index and leaf_value defined.\r\n- All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined.", "name": "tree_node", "required": false, "type": { @@ -150397,7 +150469,7 @@ "body": { "kind": "no_body" }, - "description": "Creates or updates a trained model alias. A trained model alias is a logical\nname used to reference a single trained model.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.", + "description": "Creates or updates a trained model alias. A trained model alias is a logical\r\nname used to reference a single trained model.\r\nYou can use aliases instead of trained model identifiers to make it easier to\r\nreference your models. For example, you can use aliases in inference\r\naggregations and processors.\r\nAn alias must be unique and refer to only a single trained model. However,\r\nyou can have multiple aliases for each trained model.\r\nIf you use this API to update an alias such that it references a different\r\ntrained model ID and the model uses a different type of data frame analytics,\r\nan error occurs. For example, this situation occurs if you have a trained\r\nmodel for regression analysis and a trained model for classification\r\nanalysis; you cannot reassign an alias from one type of trained model to\r\nanother.\r\nIf you use this API to update an alias and there are very few input fields in\r\ncommon between the old and new trained models for the model alias, the API\r\nreturns a warning.", "inherits": { "type": { "name": "RequestBase", @@ -150437,7 +150509,7 @@ ], "query": [ { - "description": "Specifies whether the alias gets reassigned to the specified trained\nmodel if it is already assigned to a different model. If the alias is\nalready assigned and this parameter is false, the API returns an error.", + "description": "Specifies whether the alias gets reassigned to the specified trained\r\nmodel if it is already assigned to a different model. If the alias is\r\nalready assigned and this parameter is false, the API returns an error.", "name": "reassign", "required": false, "serverDefault": false, @@ -150541,7 +150613,7 @@ } }, { - "description": "The definition part number. When the definition is loaded for inference the definition parts are streamed in the\norder of their part number. The first part must be `0` and the final part must be `total_parts - 1`.", + "description": "The definition part number. When the definition is loaded for inference the definition parts are streamed in the\r\norder of their part number. The first part must be `0` and the final part must be `total_parts - 1`.", "name": "part", "required": true, "type": { @@ -150620,7 +150692,7 @@ } ] }, - "description": "Creates a trained model vocabulary.\nThis API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.", + "description": "Creates a trained model vocabulary.\r\nThis API is supported only for natural language processing (NLP) models.\r\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.", "inherits": { "type": { "name": "RequestBase", @@ -150674,7 +150746,7 @@ "body": { "kind": "no_body" }, - "description": "Resets an anomaly detection job.\nAll model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.", + "description": "Resets an anomaly detection job.\r\nAll model state and results are deleted. The job is ready to start over as if\r\nit had just been created.\r\nIt is not currently possible to reset multiple jobs using wildcards or a\r\ncomma separated list.", "inherits": { "type": { "name": "RequestBase", @@ -150702,7 +150774,7 @@ ], "query": [ { - "description": "Should this request wait until the operation has completed before\nreturning.", + "description": "Should this request wait until the operation has completed before\r\nreturning.", "name": "wait_for_completion", "required": false, "serverDefault": true, @@ -150715,7 +150787,7 @@ } }, { - "description": "Specifies whether annotations that have been added by the\nuser should be deleted along with any auto-generated annotations when the job is\nreset.", + "description": "Specifies whether annotations that have been added by the\r\nuser should be deleted along with any auto-generated annotations when the job is\r\nreset.", "name": "delete_user_annotations", "required": false, "serverDefault": false, @@ -150770,7 +150842,7 @@ } ] }, - "description": "Reverts to a specific snapshot.\nThe machine learning features react quickly to anomalous input, learning new\nbehaviors in data. Highly anomalous input increases the variance in the\nmodels whilst the system learns whether this is a new step-change in behavior\nor a one-off event. In the case where this anomalous input is known to be a\none-off, then it might be appropriate to reset the model state to a time\nbefore this event. For example, you might consider reverting to a saved\nsnapshot after Black Friday or a critical system failure.", + "description": "Reverts to a specific snapshot.\r\nThe machine learning features react quickly to anomalous input, learning new\r\nbehaviors in data. Highly anomalous input increases the variance in the\r\nmodels whilst the system learns whether this is a new step-change in behavior\r\nor a one-off event. In the case where this anomalous input is known to be a\r\none-off, then it might be appropriate to reset the model state to a time\r\nbefore this event. For example, you might consider reverting to a saved\r\nsnapshot after Black Friday or a critical system failure.", "inherits": { "type": { "name": "RequestBase", @@ -150796,7 +150868,7 @@ } }, { - "description": "You can specify `empty` as the . Reverting to the empty\nsnapshot means the anomaly detection job starts learning a new model from\nscratch when it is started.", + "description": "You can specify `empty` as the . Reverting to the empty\r\nsnapshot means the anomaly detection job starts learning a new model from\r\nscratch when it is started.", "name": "snapshot_id", "required": true, "type": { @@ -150810,7 +150882,7 @@ ], "query": [ { - "description": "If true, deletes the results in the time period between the latest\nresults and the time of the reverted snapshot. It also resets the model\nto accept records for this time period. If you choose not to delete\nintervening results when reverting a snapshot, the job will not accept\ninput data that is older than the current time. If you want to resend\ndata, then delete the intervening results.", + "description": "If true, deletes the results in the time period between the latest\r\nresults and the time of the reverted snapshot. It also resets the model\r\nto accept records for this time period. If you choose not to delete\r\nintervening results when reverting a snapshot, the job will not accept\r\ninput data that is older than the current time. If you want to resend\r\ndata, then delete the intervening results.", "name": "delete_intervening_results", "required": false, "serverDefault": false, @@ -150856,7 +150928,7 @@ "body": { "kind": "no_body" }, - "description": "Sets a cluster wide upgrade_mode setting that prepares machine learning\nindices for an upgrade.\nWhen upgrading your cluster, in some circumstances you must restart your\nnodes and reindex your machine learning indices. In those circumstances,\nthere must be no machine learning jobs running. You can close the machine\nlearning jobs, do the upgrade, then open all the jobs again. Alternatively,\nyou can use this API to temporarily halt tasks associated with the jobs and\ndatafeeds and prevent new jobs from opening. You can also use this API\nduring upgrades that do not require you to reindex your machine learning\nindices, though stopping jobs is not a requirement in that case.\nYou can see the current value for the upgrade_mode setting by using the get\nmachine learning info API.", + "description": "Sets a cluster wide upgrade_mode setting that prepares machine learning\r\nindices for an upgrade.\r\nWhen upgrading your cluster, in some circumstances you must restart your\r\nnodes and reindex your machine learning indices. In those circumstances,\r\nthere must be no machine learning jobs running. You can close the machine\r\nlearning jobs, do the upgrade, then open all the jobs again. Alternatively,\r\nyou can use this API to temporarily halt tasks associated with the jobs and\r\ndatafeeds and prevent new jobs from opening. You can also use this API\r\nduring upgrades that do not require you to reindex your machine learning\r\nindices, though stopping jobs is not a requirement in that case.\r\nYou can see the current value for the upgrade_mode setting by using the get\r\nmachine learning info API.", "inherits": { "type": { "name": "RequestBase", @@ -150871,7 +150943,7 @@ "path": [], "query": [ { - "description": "When `true`, it enables `upgrade_mode` which temporarily halts all job\nand datafeed tasks and prohibits new job and datafeed tasks from\nstarting.", + "description": "When `true`, it enables `upgrade_mode` which temporarily halts all job\r\nand datafeed tasks and prohibits new job and datafeed tasks from\r\nstarting.", "name": "enabled", "required": false, "serverDefault": false, @@ -150924,7 +150996,7 @@ "body": { "kind": "no_body" }, - "description": "Starts a data frame analytics job.\nA data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.", + "description": "Starts a data frame analytics job.\r\nA data frame analytics job can be started and stopped multiple times\r\nthroughout its lifecycle.\r\nIf the destination index does not exist, it is created automatically the\r\nfirst time you start the data frame analytics job. The\r\n`index.number_of_shards` and `index.number_of_replicas` settings for the\r\ndestination index are copied from the source index. If there are multiple\r\nsource indices, the destination index copies the highest setting values. The\r\nmappings for the destination index are also copied from the source indices.\r\nIf there are any mapping conflicts, the job fails to start.\r\nIf the destination index exists, it is used as is. You can therefore set up\r\nthe destination index in advance with custom settings and mappings.", "inherits": { "type": { "name": "RequestBase", @@ -150938,7 +151010,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. This identifier can contain\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\nunderscores. It must start and end with alphanumeric characters.", + "description": "Identifier for the data frame analytics job. This identifier can contain\r\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\r\nunderscores. It must start and end with alphanumeric characters.", "name": "id", "required": true, "type": { @@ -150952,7 +151024,7 @@ ], "query": [ { - "description": "Controls the amount of time to wait until the data frame analytics job\nstarts.", + "description": "Controls the amount of time to wait until the data frame analytics job\r\nstarts.", "name": "timeout", "required": false, "serverDefault": "20s", @@ -151049,7 +151121,7 @@ } ] }, - "description": "Starts one or more datafeeds.\n\nA datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.", + "description": "Starts one or more datafeeds.\r\n\r\nA datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\r\nmultiple times throughout its lifecycle.\r\n\r\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\r\n\r\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\r\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\r\n\r\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\r\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\r\nauthorization headers when you created or updated the datafeed, those credentials are used instead.", "inherits": { "type": { "name": "RequestBase", @@ -151063,7 +151135,7 @@ }, "path": [ { - "description": "A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase\nalphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric\ncharacters.", + "description": "A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase\r\nalphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric\r\ncharacters.", "name": "datafeed_id", "required": true, "type": { @@ -151077,7 +151149,7 @@ ], "query": [ { - "description": "The time that the datafeed should end, which can be specified by using one of the following formats:\n\n* ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z`\n* ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00`\n* Milliseconds since the epoch, for example `1485061200000`\n\nDate-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted\nas an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone\ndesignators must be encoded as `%2B`.\nThe end time value is exclusive. If you do not specify an end time, the datafeed\nruns continuously.", + "description": "The time that the datafeed should end, which can be specified by using one of the following formats:\r\n\r\n* ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z`\r\n* ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00`\r\n* Milliseconds since the epoch, for example `1485061200000`\r\n\r\nDate-time arguments using either of the ISO 8601 formats must have a time zone designator, where `Z` is accepted\r\nas an abbreviation for UTC time. When a URL is expected (for example, in browsers), the `+` used in time zone\r\ndesignators must be encoded as `%2B`.\r\nThe end time value is exclusive. If you do not specify an end time, the datafeed\r\nruns continuously.", "name": "end", "required": false, "type": { @@ -151089,7 +151161,7 @@ } }, { - "description": "The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter.\nThis value is inclusive.\nIf you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis\nstarts from the earliest time for which data is available.\nIf you restart a stopped datafeed and specify a start value that is earlier than the timestamp of the latest\nprocessed record, the datafeed continues from 1 millisecond after the timestamp of the latest processed record.", + "description": "The time that the datafeed should begin, which can be specified by using the same formats as the `end` parameter.\r\nThis value is inclusive.\r\nIf you do not specify a start time and the datafeed is associated with a new anomaly detection job, the analysis\r\nstarts from the earliest time for which data is available.\r\nIf you restart a stopped datafeed and specify a start value that is earlier than the timestamp of the latest\r\nprocessed record, the datafeed continues from 1 millisecond after the timestamp of the latest processed record.", "name": "start", "required": false, "type": { @@ -151121,7 +151193,7 @@ "kind": "properties", "properties": [ { - "description": "The ID of the node that the datafeed was started on. If the datafeed is allowed to open lazily and has not yet\nbeen assigned to a node, this value is an empty string.", + "description": "The ID of the node that the datafeed was started on. If the datafeed is allowed to open lazily and has not yet\r\nbeen assigned to a node, this value is an empty string.", "name": "node", "required": true, "type": { @@ -151188,7 +151260,7 @@ ], "query": [ { - "description": "The inference cache size (in memory outside the JVM heap) per node for the model.\nThe default value is the same size as the `model_size_bytes`. To disable the cache,\n`0b` can be provided.", + "description": "The inference cache size (in memory outside the JVM heap) per node for the model.\r\nThe default value is the same size as the `model_size_bytes`. To disable the cache,\r\n`0b` can be provided.", "name": "cache_size", "required": false, "type": { @@ -151200,7 +151272,7 @@ } }, { - "description": "The number of model allocations on each node where the model is deployed.\nAll allocations on a node share the same copy of the model in memory but use\na separate set of threads to evaluate the model.\nIncreasing this value generally increases the throughput.\nIf this setting is greater than the number of hardware threads\nit will automatically be changed to a value less than the number of hardware threads.", + "description": "The number of model allocations on each node where the model is deployed.\r\nAll allocations on a node share the same copy of the model in memory but use\r\na separate set of threads to evaluate the model.\r\nIncreasing this value generally increases the throughput.\r\nIf this setting is greater than the number of hardware threads\r\nit will automatically be changed to a value less than the number of hardware threads.", "name": "number_of_allocations", "required": false, "serverDefault": 1, @@ -151225,7 +151297,7 @@ } }, { - "description": "Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds\nthis value, new requests are rejected with a 429 error.", + "description": "Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds\r\nthis value, new requests are rejected with a 429 error.", "name": "queue_capacity", "required": false, "serverDefault": 1024, @@ -151238,7 +151310,7 @@ } }, { - "description": "Sets the number of threads used by each model allocation during inference. This generally increases\nthe inference speed. The inference process is a compute-bound process; any number\ngreater than the number of available hardware threads on the machine does not increase the\ninference speed. If this setting is greater than the number of hardware threads\nit will automatically be changed to a value less than the number of hardware threads.", + "description": "Sets the number of threads used by each model allocation during inference. This generally increases\r\nthe inference speed. The inference process is a compute-bound process; any number\r\ngreater than the number of available hardware threads on the machine does not increase the\r\ninference speed. If this setting is greater than the number of hardware threads\r\nit will automatically be changed to a value less than the number of hardware threads.", "name": "threads_per_allocation", "required": false, "serverDefault": 1, @@ -151310,7 +151382,7 @@ "body": { "kind": "no_body" }, - "description": "Stops one or more data frame analytics jobs.\nA data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.", + "description": "Stops one or more data frame analytics jobs.\r\nA data frame analytics job can be started and stopped multiple times\r\nthroughout its lifecycle.", "inherits": { "type": { "name": "RequestBase", @@ -151324,7 +151396,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. This identifier can contain\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\nunderscores. It must start and end with alphanumeric characters.", + "description": "Identifier for the data frame analytics job. This identifier can contain\r\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\r\nunderscores. It must start and end with alphanumeric characters.", "name": "id", "required": true, "type": { @@ -151338,7 +151410,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no data frame analytics\njobs that match.\n2. Contains the _all string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nThe default value is true, which returns an empty data_frame_analytics\narray when there are no matches and the subset of results when there are\npartial matches. If this parameter is false, the request returns a 404\nstatus code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no data frame analytics\r\njobs that match.\r\n2. Contains the _all string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nThe default value is true, which returns an empty data_frame_analytics\r\narray when there are no matches and the subset of results when there are\r\npartial matches. If this parameter is false, the request returns a 404\r\nstatus code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -151364,7 +151436,7 @@ } }, { - "description": "Controls the amount of time to wait until the data frame analytics job\nstops. Defaults to 20 seconds.", + "description": "Controls the amount of time to wait until the data frame analytics job\r\nstops. Defaults to 20 seconds.", "name": "timeout", "required": false, "serverDefault": "20s", @@ -151451,7 +151523,7 @@ } ] }, - "description": "Stops one or more datafeeds.\nA datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.", + "description": "Stops one or more datafeeds.\r\nA datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\r\nmultiple times throughout its lifecycle.", "inherits": { "type": { "name": "RequestBase", @@ -151465,7 +151537,7 @@ }, "path": [ { - "description": "Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated\nlist of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as\nthe identifier.", + "description": "Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated\r\nlist of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as\r\nthe identifier.", "name": "datafeed_id", "required": true, "type": { @@ -151479,7 +151551,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n* Contains wildcard expressions and there are no datafeeds that match.\n* Contains the `_all` string or no identifiers and there are no matches.\n* Contains wildcard expressions and there are only partial matches.\n\nIf `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when\nthere are partial matches. If `false`, the API returns a 404 status code when there are no matches or only\npartial matches.", + "description": "Specifies what to do when the request:\r\n\r\n* Contains wildcard expressions and there are no datafeeds that match.\r\n* Contains the `_all` string or no identifiers and there are no matches.\r\n* Contains wildcard expressions and there are only partial matches.\r\n\r\nIf `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when\r\nthere are partial matches. If `false`, the API returns a 404 status code when there are no matches or only\r\npartial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -151579,7 +151651,7 @@ ], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no deployments that match;\ncontains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and\nthere are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no deployments that match;\r\ncontains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and\r\nthere are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches.\r\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -151592,7 +151664,7 @@ } }, { - "description": "Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you\nrestart the model deployment.", + "description": "Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you\r\nrestart the model deployment.", "name": "force", "required": false, "serverDefault": false, @@ -151651,9 +151723,9 @@ } }, { - "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "description": "The approximate maximum amount of memory resources that are permitted for\r\nanalytical processing. If your `elasticsearch.yml` file contains an\r\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\r\nto create data frame analytics jobs that have `model_memory_limit` values\r\ngreater than that setting.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "model_memory_limit", "required": false, "serverDefault": "1gb", @@ -151666,7 +151738,7 @@ } }, { - "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "description": "The maximum number of threads to be used by the analysis. Using more\r\nthreads may decrease the time necessary to complete the analysis at the\r\ncost of using more CPU. Note that the process may use additional threads\r\nfor operational functionality other than the analysis itself.", "name": "max_num_threads", "required": false, "serverDefault": 1, @@ -151679,9 +151751,9 @@ } }, { - "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node.", + "description": "Specifies whether this job can start when there is insufficient machine\r\nlearning node capacity for it to be immediately assigned to a node.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "allow_lazy_start", "required": false, "serverDefault": false, @@ -151709,7 +151781,7 @@ }, "path": [ { - "description": "Identifier for the data frame analytics job. This identifier can contain\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\nunderscores. It must start and end with alphanumeric characters.", + "description": "Identifier for the data frame analytics job. This identifier can contain\r\nlowercase alphanumeric characters (a-z and 0-9), hyphens, and\r\nunderscores. It must start and end with alphanumeric characters.", "name": "id", "required": true, "type": { @@ -151877,7 +151949,7 @@ "kind": "properties", "properties": [ { - "description": "If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only\nwith low cardinality data.", + "description": "If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only\r\nwith low cardinality data.", "name": "aggregations", "required": false, "type": { @@ -151900,7 +151972,7 @@ } }, { - "description": "Datafeeds might search over long time periods, for several months or years. This search is split into time\nchunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of\nthese time chunks are calculated; it is an advanced configuration option.", + "description": "Datafeeds might search over long time periods, for several months or years. This search is split into time\r\nchunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of\r\nthese time chunks are calculated; it is an advanced configuration option.", "name": "chunking_config", "required": false, "type": { @@ -151912,7 +151984,7 @@ } }, { - "description": "Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally\nsearch over indices that have already been read in an effort to determine whether any data has subsequently been\nadded to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and\nthe data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time\ndatafeeds.", + "description": "Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally\r\nsearch over indices that have already been read in an effort to determine whether any data has subsequently been\r\nadded to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and\r\nthe data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time\r\ndatafeeds.", "name": "delayed_data_check_config", "required": false, "type": { @@ -151924,7 +151996,7 @@ } }, { - "description": "The interval at which scheduled queries are made while the datafeed runs in real time. The default value is\neither the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket\nspan. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are\nwritten then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value\nmust be divisible by the interval of the date histogram aggregation.", + "description": "The interval at which scheduled queries are made while the datafeed runs in real time. The default value is\r\neither the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket\r\nspan. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are\r\nwritten then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value\r\nmust be divisible by the interval of the date histogram aggregation.", "name": "frequency", "required": false, "type": { @@ -151939,7 +152011,7 @@ "aliases": [ "indexes" ], - "description": "An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine\nlearning nodes must have the `remote_cluster_client` role.", + "description": "An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine\r\nlearning nodes must have the `remote_cluster_client` role.", "name": "indices", "required": false, "type": { @@ -151977,7 +152049,7 @@ } }, { - "description": "If a real-time datafeed has never seen any data (including during any initial training period), it automatically\nstops and closes the associated job after this many real-time searches return no documents. In other words,\nit stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no\nend time that sees no data remains started until it is explicitly stopped. By default, it is not set.", + "description": "If a real-time datafeed has never seen any data (including during any initial training period), it automatically\r\nstops and closes the associated job after this many real-time searches return no documents. In other words,\r\nit stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no\r\nend time that sees no data remains started until it is explicitly stopped. By default, it is not set.", "name": "max_empty_searches", "required": false, "type": { @@ -151989,7 +152061,7 @@ } }, { - "description": "The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an\nElasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this\nobject is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also\nchanged. Therefore, the time required to learn might be long and the understandability of the results is\nunpredictable. If you want to make significant changes to the source data, it is recommended that you\nclone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one\nwhen you are satisfied with the results of the job.", + "description": "The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an\r\nElasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this\r\nobject is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also\r\nchanged. Therefore, the time required to learn might be long and the understandability of the results is\r\nunpredictable. If you want to make significant changes to the source data, it is recommended that you\r\nclone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one\r\nwhen you are satisfied with the results of the job.", "name": "query", "required": false, "serverDefault": "{\"match_all\": {\"boost\": 1}}", @@ -152002,7 +152074,7 @@ } }, { - "description": "The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might\nnot be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default\nvalue is randomly selected between `60s` and `120s`. This randomness improves the query performance\nwhen there are multiple jobs running on the same node.", + "description": "The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might\r\nnot be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default\r\nvalue is randomly selected between `60s` and `120s`. This randomness improves the query performance\r\nwhen there are multiple jobs running on the same node.", "name": "query_delay", "required": false, "type": { @@ -152026,7 +152098,7 @@ } }, { - "description": "Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.\nThe detector configuration objects in a job can contain functions that use these script fields.", + "description": "Specifies scripts that evaluate custom expressions and returns script fields to the datafeed.\r\nThe detector configuration objects in a job can contain functions that use these script fields.", "name": "script_fields", "required": false, "type": { @@ -152049,7 +152121,7 @@ } }, { - "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`.", + "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\r\nThe maximum value is the value of `index.max_result_window`.", "name": "scroll_size", "required": false, "serverDefault": 1000, @@ -152063,7 +152135,7 @@ } ] }, - "description": "Updates the properties of a datafeed.\nYou must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.", + "description": "Updates the properties of a datafeed.\r\nYou must stop and start the datafeed for the changes to be applied.\r\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\r\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\r\nthose credentials are used instead.", "inherits": { "type": { "name": "RequestBase", @@ -152077,7 +152149,7 @@ }, "path": [ { - "description": "A numerical character string that uniquely identifies the datafeed.\nThis identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.\nIt must start and end with alphanumeric characters.", + "description": "A numerical character string that uniquely identifies the datafeed.\r\nThis identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.\r\nIt must start and end with alphanumeric characters.", "name": "datafeed_id", "required": true, "type": { @@ -152091,7 +152163,7 @@ ], "query": [ { - "description": "If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the\n`_all` string or when no indices are specified.", + "description": "If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the\r\n`_all` string or when no indices are specified.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -152104,7 +152176,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\r\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\r\n\r\n* `all`: Match any data stream or index, including hidden ones.\r\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\r\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\r\n* `none`: Wildcard patterns are not accepted.\r\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -152489,9 +152561,9 @@ "kind": "properties", "properties": [ { - "description": "Advanced configuration option. Specifies whether this job can open when\nthere is insufficient machine learning node capacity for it to be\nimmediately assigned to a node. If `false` and a machine learning node\nwith capacity to run the job cannot immediately be found, the open\nanomaly detection jobs API returns an error. However, this is also\nsubject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this\noption is set to `true`, the open anomaly detection jobs API does not\nreturn an error and the job waits in the opening state until sufficient\nmachine learning node capacity is available.", + "description": "Advanced configuration option. Specifies whether this job can open when\r\nthere is insufficient machine learning node capacity for it to be\r\nimmediately assigned to a node. If `false` and a machine learning node\r\nwith capacity to run the job cannot immediately be found, the open\r\nanomaly detection jobs API returns an error. However, this is also\r\nsubject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this\r\noption is set to `true`, the open anomaly detection jobs API does not\r\nreturn an error and the job waits in the opening state until sufficient\r\nmachine learning node capacity is available.", "docId": "ml-settings", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-settings.html\r", "name": "allow_lazy_open", "required": false, "serverDefault": false, @@ -152515,7 +152587,7 @@ } }, { - "description": "Advanced configuration option. The time between each periodic persistence\nof the model.\nThe default value is a randomized value between 3 to 4 hours, which\navoids all jobs persisting at exactly the same time. The smallest allowed\nvalue is 1 hour.\nFor very large models (several GB), persistence could take 10-20 minutes,\nso do not set the value too low.\nIf the job is open when you make the update, you must stop the datafeed,\nclose the job, then reopen the job and restart the datafeed for the\nchanges to take effect.", + "description": "Advanced configuration option. The time between each periodic persistence\r\nof the model.\r\nThe default value is a randomized value between 3 to 4 hours, which\r\navoids all jobs persisting at exactly the same time. The smallest allowed\r\nvalue is 1 hour.\r\nFor very large models (several GB), persistence could take 10-20 minutes,\r\nso do not set the value too low.\r\nIf the job is open when you make the update, you must stop the datafeed,\r\nclose the job, then reopen the job and restart the datafeed for the\r\nchanges to take effect.", "name": "background_persist_interval", "required": false, "type": { @@ -152527,7 +152599,7 @@ } }, { - "description": "Advanced configuration option. Contains custom meta data about the job.\nFor example, it can contain custom URL information as shown in Adding\ncustom URLs to machine learning results.", + "description": "Advanced configuration option. Contains custom meta data about the job.\r\nFor example, it can contain custom URL information as shown in Adding\r\ncustom URLs to machine learning results.", "docId": "ml.customUrls", "name": "custom_settings", "required": false, @@ -152595,9 +152667,9 @@ } }, { - "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies a period of time (in days)\nafter which only the first snapshot per day is retained. This period is\nrelative to the timestamp of the most recent snapshot for this job. Valid\nvalues range from 0 to `model_snapshot_retention_days`. For jobs created\nbefore version 7.8.0, the default value matches\n`model_snapshot_retention_days`.", + "description": "Advanced configuration option, which affects the automatic removal of old\r\nmodel snapshots for this job. It specifies a period of time (in days)\r\nafter which only the first snapshot per day is retained. This period is\r\nrelative to the timestamp of the most recent snapshot for this job. Valid\r\nvalues range from 0 to `model_snapshot_retention_days`. For jobs created\r\nbefore version 7.8.0, the default value matches\r\n`model_snapshot_retention_days`.", "docId": "ml-model-snapshots", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-ad-run-jobs.html#ml-ad-model-snapshots", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-ad-run-jobs.html#ml-ad-model-snapshots\r", "name": "daily_model_snapshot_retention_after_days", "required": false, "serverDefault": 1, @@ -152610,9 +152682,9 @@ } }, { - "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies the maximum period of time (in\ndays) that snapshots are retained. This period is relative to the\ntimestamp of the most recent snapshot for this job.", + "description": "Advanced configuration option, which affects the automatic removal of old\r\nmodel snapshots for this job. It specifies the maximum period of time (in\r\ndays) that snapshots are retained. This period is relative to the\r\ntimestamp of the most recent snapshot for this job.", "docId": "ml-model-snapshots", - "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-ad-run-jobs.html#ml-ad-model-snapshots", + "docUrl": "https://www.elastic.co/guide/en/machine-learning/{branch}/ml-ad-run-jobs.html#ml-ad-model-snapshots\r", "name": "model_snapshot_retention_days", "required": false, "serverDefault": 10, @@ -152625,7 +152697,7 @@ } }, { - "description": "Advanced configuration option. The period over which adjustments to the\nscore are applied, as new data is seen.", + "description": "Advanced configuration option. The period over which adjustments to the\r\nscore are applied, as new data is seen.", "name": "renormalization_window_days", "required": false, "type": { @@ -152637,7 +152709,7 @@ } }, { - "description": "Advanced configuration option. The period of time (in days) that results\nare retained. Age is calculated relative to the timestamp of the latest\nbucket result. If this property has a non-null value, once per day at\n00:30 (server time), results that are the specified number of days older\nthan the latest bucket result are deleted from Elasticsearch. The default\nvalue is null, which means all results are retained.", + "description": "Advanced configuration option. The period of time (in days) that results\r\nare retained. Age is calculated relative to the timestamp of the latest\r\nbucket result. If this property has a non-null value, once per day at\r\n00:30 (server time), results that are the specified number of days older\r\nthan the latest bucket result are deleted from Elasticsearch. The default\r\nvalue is null, which means all results are retained.", "name": "results_retention_days", "required": false, "type": { @@ -153017,7 +153089,7 @@ } }, { - "description": "If `true`, this snapshot will not be deleted during automatic cleanup of\nsnapshots older than `model_snapshot_retention_days`. However, this\nsnapshot will be deleted when the job is deleted.", + "description": "If `true`, this snapshot will not be deleted during automatic cleanup of\r\nsnapshots older than `model_snapshot_retention_days`. However, this\r\nsnapshot will be deleted when the job is deleted.", "name": "retain", "required": false, "serverDefault": false, @@ -153114,7 +153186,7 @@ "body": { "kind": "no_body" }, - "description": "Upgrades an anomaly detection model snapshot to the latest major version.\nOver time, older snapshot formats are deprecated and removed. Anomaly\ndetection jobs support only snapshots that are from the current or previous\nmajor version.\nThis API provides a means to upgrade a snapshot to the current major version.\nThis aids in preparing the cluster for an upgrade to the next major version.\nOnly one snapshot per anomaly detection job can be upgraded at a time and the\nupgraded snapshot cannot be the current snapshot of the anomaly detection\njob.", + "description": "Upgrades an anomaly detection model snapshot to the latest major version.\r\nOver time, older snapshot formats are deprecated and removed. Anomaly\r\ndetection jobs support only snapshots that are from the current or previous\r\nmajor version.\r\nThis API provides a means to upgrade a snapshot to the current major version.\r\nThis aids in preparing the cluster for an upgrade to the next major version.\r\nOnly one snapshot per anomaly detection job can be upgraded at a time and the\r\nupgraded snapshot cannot be the current snapshot of the anomaly detection\r\njob.", "inherits": { "type": { "name": "RequestBase", @@ -153154,7 +153226,7 @@ ], "query": [ { - "description": "When true, the API won’t respond until the upgrade is complete.\nOtherwise, it responds as soon as the upgrade task is assigned to a node.", + "description": "When true, the API won’t respond until the upgrade is complete.\r\nOtherwise, it responds as soon as the upgrade task is assigned to a node.", "name": "wait_for_completion", "required": false, "serverDefault": false, @@ -156068,7 +156140,7 @@ "codegenName": "node_stats", "description": "Contains statistics about the number of nodes selected by the request’s node filters.", "docId": "cluster-nodes", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster.html#cluster-nodes", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster.html#cluster-nodes\r", "name": "_nodes", "required": false, "type": { @@ -156731,7 +156803,7 @@ } }, { - "description": "A flag that tells whether or not this object has been archived. When a repository is closed or updated the\nrepository metering information is archived and kept for a certain period of time. This allows retrieving the\nrepository metering information of previous repository instantiations.", + "description": "A flag that tells whether or not this object has been archived. When a repository is closed or updated the\r\nrepository metering information is archived and kept for a certain period of time. This allows retrieving the\r\nrepository metering information of previous repository instantiations.", "name": "archived", "required": true, "type": { @@ -156743,7 +156815,7 @@ } }, { - "description": "The cluster state version when this object was archived, this field can be used as a logical timestamp to delete\nall the archived metrics up to an observed version. This field is only present for archived repository metering\ninformation objects. The main purpose of this field is to avoid possible race conditions during repository metering\ninformation deletions, i.e. deleting archived repositories metering information that we haven’t observed yet.", + "description": "The cluster state version when this object was archived, this field can be used as a logical timestamp to delete\r\nall the archived metrics up to an observed version. This field is only present for archived repository metering\r\ninformation objects. The main purpose of this field is to avoid possible race conditions during repository metering\r\ninformation deletions, i.e. deleting archived repositories metering information that we haven’t observed yet.", "name": "cluster_version", "required": false, "type": { @@ -156873,7 +156945,7 @@ } }, { - "description": "Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads\ncan perform multiple http requests to insert a single object but they are considered as a single request\nsince they are billed as an individual operation. (GCP)", + "description": "Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads\r\ncan perform multiple http requests to insert a single object but they are considered as a single request\r\nsince they are billed as an individual operation. (GCP)", "name": "InsertObject", "required": false, "type": { @@ -157744,7 +157816,7 @@ }, "path": [ { - "description": "Comma-separated list of node IDs or names used to limit returned information.\nAll the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).", + "description": "Comma-separated list of node IDs or names used to limit returned information.\r\nAll the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).", "name": "node_id", "required": true, "type": { @@ -157847,7 +157919,7 @@ "body": { "kind": "no_body" }, - "description": "You can use the cluster repositories metering API to retrieve repositories metering information in a cluster.\nThis API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the\ninformation needed to compute aggregations over a period of time. Additionally, the information exposed by this\nAPI is volatile, meaning that it won’t be present after node restarts.", + "description": "You can use the cluster repositories metering API to retrieve repositories metering information in a cluster.\r\nThis API exposes monotonically non-decreasing counters and it’s expected that clients would durably store the\r\ninformation needed to compute aggregations over a period of time. Additionally, the information exposed by this\r\nAPI is volatile, meaning that it won’t be present after node restarts.", "inherits": { "type": { "name": "RequestBase", @@ -157861,7 +157933,7 @@ }, "path": [ { - "description": "Comma-separated list of node IDs or names used to limit returned information.\nAll the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).", + "description": "Comma-separated list of node IDs or names used to limit returned information.\r\nAll the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes).", "name": "node_id", "required": true, "type": { @@ -158012,7 +158084,7 @@ "body": { "kind": "no_body" }, - "description": "This API yields a breakdown of the hot threads on each selected node in the cluster.\nThe output is plain text with a breakdown of each node’s top hot threads.", + "description": "This API yields a breakdown of the hot threads on each selected node in the cluster.\r\nThe output is plain text with a breakdown of each node’s top hot threads.", "inherits": { "type": { "name": "RequestBase", @@ -158040,7 +158112,7 @@ ], "query": [ { - "description": "If true, known idle threads (e.g. waiting in a socket select, or to get\na task from an empty queue) are filtered out.", + "description": "If true, known idle threads (e.g. waiting in a socket select, or to get\r\na task from an empty queue) are filtered out.", "name": "ignore_idle_threads", "required": false, "serverDefault": true, @@ -158079,7 +158151,7 @@ } }, { - "description": "Period to wait for a connection to the master node. If no response\nis received before the timeout expires, the request fails and\nreturns an error.", + "description": "Period to wait for a connection to the master node. If no response\r\nis received before the timeout expires, the request fails and\r\nreturns an error.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -158105,7 +158177,7 @@ } }, { - "description": "Period to wait for a response. If no response is received\nbefore the timeout expires, the request fails and returns an error.", + "description": "Period to wait for a response. If no response is received\r\nbefore the timeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -158420,7 +158492,7 @@ { "description": "Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings.", "docId": "indexing-buffer", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indexing-buffer.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indexing-buffer.html\r", "name": "total_indexing_buffer", "required": false, "type": { @@ -162961,7 +163033,7 @@ "kind": "properties", "properties": [ { - "description": "A cron string which defines the intervals when the rollup job should be executed. When the interval\ntriggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated\nto the time interval of the data being rolled up. For example, you may wish to create hourly rollups\nof your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The\ncron pattern is defined just like a Watcher cron schedule.", + "description": "A cron string which defines the intervals when the rollup job should be executed. When the interval\r\ntriggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated\r\nto the time interval of the data being rolled up. For example, you may wish to create hourly rollups\r\nof your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The\r\ncron pattern is defined just like a Watcher cron schedule.", "name": "cron", "required": true, "type": { @@ -162973,7 +163045,7 @@ } }, { - "description": "Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be\navailable later for aggregating into buckets. These aggs and fields can be used in any combination. Think of\nthe groups configuration as defining a set of tools that can later be used in aggregations to partition the\ndata. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide\nenough flexibility that you simply need to determine which fields are needed, not in what order they are needed.", + "description": "Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be\r\navailable later for aggregating into buckets. These aggs and fields can be used in any combination. Think of\r\nthe groups configuration as defining a set of tools that can later be used in aggregations to partition the\r\ndata. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide\r\nenough flexibility that you simply need to determine which fields are needed, not in what order they are needed.", "name": "groups", "required": true, "type": { @@ -162985,7 +163057,7 @@ } }, { - "description": "The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to\nrollup the entire index or index-pattern.", + "description": "The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to\r\nrollup the entire index or index-pattern.", "name": "index_pattern", "required": true, "type": { @@ -162997,7 +163069,7 @@ } }, { - "description": "Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each\ngroup. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined\non a per-field basis and for each field you configure which metric should be collected.", + "description": "Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each\r\ngroup. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined\r\non a per-field basis and for each field you configure which metric should be collected.", "name": "metrics", "required": false, "type": { @@ -163012,7 +163084,7 @@ } }, { - "description": "The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends\nto execute faster, but requires more memory during processing. This value has no effect on how the data is\nrolled up; it is merely used for tweaking the speed or memory cost of the indexer.", + "description": "The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends\r\nto execute faster, but requires more memory during processing. This value has no effect on how the data is\r\nrolled up; it is merely used for tweaking the speed or memory cost of the indexer.", "name": "page_size", "required": true, "type": { @@ -163075,7 +163147,7 @@ }, "path": [ { - "description": "Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the\ndata that is associated with the rollup job. The ID is persistent; it is stored with the rolled\nup data. If you create a job, let it run for a while, then delete the job, the data that the job\nrolled up is still be associated with this job ID. You cannot create a new job with the same ID\nsince that could lead to problems with mismatched job configurations.", + "description": "Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the\r\ndata that is associated with the rollup job. The ID is persistent; it is stored with the rolled\r\nup data. If you create a job, let it run for a while, then delete the job, the data that the job\r\nrolled up is still be associated with this job ID. You cannot create a new job with the same ID\r\nsince that could lead to problems with mismatched job configurations.", "name": "id", "required": true, "type": { @@ -164926,7 +164998,7 @@ } }, { - "description": "Invalidation status for the API key.\nIf the key has been invalidated, it has a value of `true`. Otherwise, it is `false`.", + "description": "Invalidation status for the API key.\r\nIf the key has been invalidated, it has a value of `true`. Otherwise, it is `false`.", "name": "invalidated", "required": false, "type": { @@ -164993,7 +165065,7 @@ } }, { - "description": "The role descriptors assigned to this API key when it was created or last updated.\nAn empty role descriptor means the API key inherits the owner user’s permissions.", + "description": "The role descriptors assigned to this API key when it was created or last updated.\r\nAn empty role descriptor means the API key inherits the owner user’s permissions.", "name": "role_descriptors", "required": false, "type": { @@ -165022,7 +165094,7 @@ "since": "8.5.0" } }, - "description": "The owner user’s permissions associated with the API key.\nIt is a point-in-time snapshot captured at creation and subsequent updates.\nAn API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions.", + "description": "The owner user’s permissions associated with the API key.\r\nIt is a point-in-time snapshot captured at creation and subsequent updates.\r\nAn API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions.", "name": "limited_by", "required": false, "since": "8.5.0", @@ -165512,7 +165584,7 @@ { "description": "The document fields that the owners of the role have read access to.", "docId": "field-and-document-access-control", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html\r", "name": "field_security", "required": false, "type": { @@ -165584,7 +165656,7 @@ "query", "template" ], - "description": "While creating or updating a role you can provide either a JSON structure or a string to the API.\nHowever, the response provided by Elasticsearch will only be string with a json-as-text content.\n\nSince this is embedded in `IndicesPrivileges`, the same structure is used for clarity in both contexts.", + "description": "While creating or updating a role you can provide either a JSON structure or a string to the API.\r\nHowever, the response provided by Elasticsearch will only be string with a json-as-text content.\r\n\r\nSince this is embedded in `IndicesPrivileges`, the same structure is used for clarity in both contexts.", "kind": "type_alias", "name": { "name": "IndicesPrivilegesQuery", @@ -165792,7 +165864,7 @@ { "description": "A list of users that the API keys can impersonate.", "docId": "run-as-privilege", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html\r", "name": "run_as", "required": false, "type": { @@ -165937,7 +166009,7 @@ { "description": "A list of users that the API keys can impersonate.", "docId": "run-as-privilege", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html\r", "name": "run_as", "required": false, "type": { @@ -166199,9 +166271,9 @@ }, "properties": [ { - "description": "When you create a role, you can specify a query that defines the document level security permissions. You can optionally\nuse Mustache templates in the role query to insert the username of the current authenticated user into the role.\nLike other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based\ntemplates and define custom parameters. You access the details for the current authenticated user through the _user parameter.", + "description": "When you create a role, you can specify a query that defines the document level security permissions. You can optionally\r\nuse Mustache templates in the role query to insert the username of the current authenticated user into the role.\r\nLike other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based\r\ntemplates and define custom parameters. You access the details for the current authenticated user through the _user parameter.", "docId": "templating-role-query", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html#templating-role-query", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html#templating-role-query\r", "name": "template", "required": false, "type": { @@ -166391,7 +166463,7 @@ { "description": "The document fields that the owners of the role have read access to.", "docId": "field-and-document-access-control", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-and-document-access-control.html\r", "name": "field_security", "required": false, "type": { @@ -166823,7 +166895,7 @@ "body": { "kind": "no_body" }, - "description": "Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user.\nA successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user.\nIf the user cannot be authenticated, this API returns a 401 status code.", + "description": "Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user.\r\nA successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user.\r\nIf the user cannot be authenticated, this API returns a 401 status code.", "inherits": { "type": { "name": "RequestBase", @@ -167066,7 +167138,7 @@ } }, { - "description": "A hash of the new password value. This must be produced using the same\nhashing algorithm as has been configured for password storage. For more details,\nsee the explanation of the `xpack.security.authc.password_hashing.algorithm`\nsetting.", + "description": "A hash of the new password value. This must be produced using the same\r\nhashing algorithm as has been configured for password storage. For more details,\r\nsee the explanation of the `xpack.security.authc.password_hashing.algorithm`\r\nsetting.", "name": "password_hash", "required": false, "type": { @@ -167093,7 +167165,7 @@ }, "path": [ { - "description": "The user whose password you want to change. If you do not specify this\nparameter, the password is changed for the current user.", + "description": "The user whose password you want to change. If you do not specify this\r\nparameter, the password is changed for the current user.", "name": "username", "required": false, "type": { @@ -167140,7 +167212,7 @@ "body": { "kind": "no_body" }, - "description": "Evicts a subset of all entries from the API key cache.\nThe cache is also automatically cleared on state changes of the security index.", + "description": "Evicts a subset of all entries from the API key cache.\r\nThe cache is also automatically cleared on state changes of the security index.", "inherits": { "type": { "name": "RequestBase", @@ -167154,7 +167226,7 @@ }, "path": [ { - "description": "Comma-separated list of API key IDs to evict from the API key cache.\nTo evict all API keys, use `*`.\nDoes not support other wildcard patterns.", + "description": "Comma-separated list of API key IDs to evict from the API key cache.\r\nTo evict all API keys, use `*`.\r\nDoes not support other wildcard patterns.", "name": "ids", "required": true, "type": { @@ -167677,7 +167749,7 @@ { "description": "An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.", "docId": "security-api-put-role", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-put-role.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-put-role.html\r", "name": "role_descriptors", "required": false, "type": { @@ -167720,7 +167792,7 @@ } ] }, - "description": "Creates an API key for access without requiring basic authentication.\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.", + "description": "Creates an API key for access without requiring basic authentication.\r\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\r\nIf applicable, it also returns expiration information for the API key in milliseconds.\r\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.", "inherits": { "type": { "name": "RequestBase", @@ -167808,7 +167880,7 @@ "since": "7.16.0" } }, - "description": "API key credentials which is the base64-encoding of\nthe UTF-8 representation of `id` and `api_key` joined\nby a colon (`:`).", + "description": "API key credentials which is the base64-encoding of\r\nthe UTF-8 representation of `id` and `api_key` joined\r\nby a colon (`:`).", "name": "encoded", "required": true, "since": "7.16.0", @@ -168503,7 +168575,7 @@ ], "query": [ { - "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\nvisible to search, if 'false' do nothing with refreshes.", + "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\r\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\r\nvisible to search, if 'false' do nothing with refreshes.", "name": "refresh", "required": false, "serverDefault": "false", @@ -168632,7 +168704,7 @@ ], "query": [ { - "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\nvisible to search, if 'false' do nothing with refreshes.", + "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\r\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\r\nvisible to search, if 'false' do nothing with refreshes.", "name": "refresh", "required": false, "serverDefault": "false", @@ -168867,7 +168939,7 @@ "body": { "kind": "no_body" }, - "description": "Retrieves information for one or more API keys.\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Retrieves information for one or more API keys.\r\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\r\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", "inherits": { "type": { "name": "RequestBase", @@ -168882,7 +168954,7 @@ "path": [], "query": [ { - "description": "An API key id.\nThis parameter cannot be used with any of `name`, `realm_name` or `username`.", + "description": "An API key id.\r\nThis parameter cannot be used with any of `name`, `realm_name` or `username`.", "name": "id", "required": false, "type": { @@ -168894,7 +168966,7 @@ } }, { - "description": "An API key name.\nThis parameter cannot be used with any of `id`, `realm_name` or `username`.\nIt supports prefix search with wildcard.", + "description": "An API key name.\r\nThis parameter cannot be used with any of `id`, `realm_name` or `username`.\r\nIt supports prefix search with wildcard.", "name": "name", "required": false, "type": { @@ -168906,7 +168978,7 @@ } }, { - "description": "A boolean flag that can be used to query API keys owned by the currently authenticated user.\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.", + "description": "A boolean flag that can be used to query API keys owned by the currently authenticated user.\r\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.", "name": "owner", "required": false, "serverDefault": false, @@ -168919,7 +168991,7 @@ } }, { - "description": "The name of an authentication realm.\nThis parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`.", + "description": "The name of an authentication realm.\r\nThis parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`.", "name": "realm_name", "required": false, "type": { @@ -168931,7 +169003,7 @@ } }, { - "description": "The username of a user.\nThis parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`.", + "description": "The username of a user.\r\nThis parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`.", "name": "username", "required": false, "type": { @@ -168949,7 +169021,7 @@ "since": "8.5.0" } }, - "description": "Return the snapshot of the owner user's role descriptors\nassociated with the API key. An API key's actual\npermission is the intersection of its assigned role\ndescriptors and the owner user's role descriptors.", + "description": "Return the snapshot of the owner user's role descriptors\r\nassociated with the API key. An API key's actual\r\npermission is the intersection of its assigned role\r\ndescriptors and the owner user's role descriptors.", "name": "with_limited_by", "required": false, "serverDefault": false, @@ -169148,7 +169220,7 @@ "body": { "kind": "no_body" }, - "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.", + "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\r\nThe get roles API cannot retrieve roles that are defined in roles files.", "inherits": { "type": { "name": "RequestBase", @@ -170434,7 +170506,7 @@ ], "query": [ { - "description": "List of filters for the `data` field of the profile document.\nTo return all content use `data=*`. To return a subset of content\nuse `data=` to retrieve content nested under the specified ``.\nBy default returns no `data` content.", + "description": "List of filters for the `data` field of the profile document.\r\nTo return all content use `data=*`. To return a subset of content\r\nuse `data=` to retrieve content nested under the specified ``.\r\nBy default returns no `data` content.", "name": "data", "required": false, "type": { @@ -170548,7 +170620,7 @@ } }, { - "description": "The role descriptors for this API key.\nThis parameter is optional.\nWhen it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token.\nIf you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token.", + "description": "The role descriptors for this API key.\r\nThis parameter is optional.\r\nWhen it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token.\r\nIf you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token.", "name": "role_descriptors", "required": false, "type": { @@ -170597,7 +170669,7 @@ } }, { - "description": "Arbitrary metadata that you want to associate with the API key.\nIt supports nested data structure.\nWithin the `metadata` object, keys beginning with `_` are reserved for system usage.", + "description": "Arbitrary metadata that you want to associate with the API key.\r\nIt supports nested data structure.\r\nWithin the `metadata` object, keys beginning with `_` are reserved for system usage.", "name": "metadata", "required": false, "type": { @@ -170643,7 +170715,7 @@ } }, { - "description": "The user’s access token.\nIf you specify the `access_token` grant type, this parameter is required.\nIt is not valid with other grant types.", + "description": "The user’s access token.\r\nIf you specify the `access_token` grant type, this parameter is required.\r\nIt is not valid with other grant types.", "name": "access_token", "required": false, "type": { @@ -170655,7 +170727,7 @@ } }, { - "description": "The user name that identifies the user.\nIf you specify the `password` grant type, this parameter is required.\nIt is not valid with other grant types.", + "description": "The user name that identifies the user.\r\nIf you specify the `password` grant type, this parameter is required.\r\nIt is not valid with other grant types.", "name": "username", "required": false, "type": { @@ -170667,7 +170739,7 @@ } }, { - "description": "The user’s password. If you specify the `password` grant type, this parameter is required.\nIt is not valid with other grant types.", + "description": "The user’s password. If you specify the `password` grant type, this parameter is required.\r\nIt is not valid with other grant types.", "name": "password", "required": false, "type": { @@ -170692,7 +170764,7 @@ } ] }, - "description": "Creates an API key on behalf of another user.\nThis API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API.\nThe caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created.\nIt is not possible to use this API to create an API key without that user’s credentials.\nThe user, for whom the authentication credentials is provided, can optionally \"run as\" (impersonate) another user.\nIn this case, the API key will be created on behalf of the impersonated user.\n\nThis API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf.\n\nA successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.", + "description": "Creates an API key on behalf of another user.\r\nThis API is similar to Create API keys, however it creates the API key for a user that is different than the user that runs the API.\r\nThe caller must have authentication credentials (either an access token, or a username and password) for the user on whose behalf the API key will be created.\r\nIt is not possible to use this API to create an API key without that user’s credentials.\r\nThe user, for whom the authentication credentials is provided, can optionally \"run as\" (impersonate) another user.\r\nIn this case, the API key will be created on behalf of the impersonated user.\r\n\r\nThis API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf.\r\n\r\nA successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name.\r\nIf applicable, it also returns expiration information for the API key in milliseconds.\r\n\r\nBy default, API keys never expire. You can specify expiration information when you create the API keys.", "inherits": { "type": { "name": "RequestBase", @@ -170898,7 +170970,7 @@ } }, { - "description": "This needs to be set to true (default is false) if using wildcards or regexps for patterns that cover restricted indices.\nImplicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false.\nIf restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of allow_restricted_indices.", + "description": "This needs to be set to true (default is false) if using wildcards or regexps for patterns that cover restricted indices.\r\nImplicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false.\r\nIf restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of allow_restricted_indices.", "name": "allow_restricted_indices", "required": false, "type": { @@ -171287,7 +171359,7 @@ "kind": "properties", "properties": [ { - "description": "The subset of the requested profile IDs of the users that\nhave all the requested privileges.", + "description": "The subset of the requested profile IDs of the users that\r\nhave all the requested privileges.", "name": "has_privilege_uids", "required": true, "type": { @@ -171302,7 +171374,7 @@ } }, { - "description": "The subset of the requested profile IDs for which an error\nwas encountered. It does not include the missing profile IDs\nor the profile IDs of the users that do not have all the\nrequested privileges. This field is absent if empty.", + "description": "The subset of the requested profile IDs for which an error\r\nwas encountered. It does not include the missing profile IDs\r\nor the profile IDs of the users that do not have all the\r\nrequested privileges. This field is absent if empty.", "name": "errors", "required": false, "type": { @@ -171341,7 +171413,7 @@ } }, { - "description": "A list of API key ids.\nThis parameter cannot be used with any of `name`, `realm_name`, or `username`.", + "description": "A list of API key ids.\r\nThis parameter cannot be used with any of `name`, `realm_name`, or `username`.", "name": "ids", "required": false, "type": { @@ -171356,7 +171428,7 @@ } }, { - "description": "An API key name.\nThis parameter cannot be used with any of `ids`, `realm_name` or `username`.", + "description": "An API key name.\r\nThis parameter cannot be used with any of `ids`, `realm_name` or `username`.", "name": "name", "required": false, "type": { @@ -171368,7 +171440,7 @@ } }, { - "description": "Can be used to query API keys owned by the currently authenticated user.\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.", + "description": "Can be used to query API keys owned by the currently authenticated user.\r\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.", "name": "owner", "required": false, "serverDefault": false, @@ -171381,7 +171453,7 @@ } }, { - "description": "The name of an authentication realm.\nThis parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`.", + "description": "The name of an authentication realm.\r\nThis parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`.", "name": "realm_name", "required": false, "type": { @@ -171393,7 +171465,7 @@ } }, { - "description": "The username of a user.\nThis parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`.", + "description": "The username of a user.\r\nThis parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`.", "name": "username", "required": false, "type": { @@ -171406,7 +171478,7 @@ } ] }, - "description": "Invalidates one or more API keys.\nThe `manage_api_key` privilege allows deleting any API keys.\nThe `manage_own_api_key` only allows deleting API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user’s identity.\n- Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field.", + "description": "Invalidates one or more API keys.\r\nThe `manage_api_key` privilege allows deleting any API keys.\r\nThe `manage_own_api_key` only allows deleting API keys that are owned by the user.\r\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\r\n- Set the parameter `owner=true`.\r\n- Or, set both `username` and `realm_name` to match the user’s identity.\r\n- Or, if the request is issued by an API key, i.e. an API key invalidates itself, specify its ID in the `ids` field.", "inherits": { "type": { "name": "RequestBase", @@ -171866,7 +171938,7 @@ { "description": "A list of users that the owners of this role can impersonate.", "docId": "run-as-privilege", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/run-as-privilege.html\r", "name": "run_as", "required": false, "type": { @@ -171894,7 +171966,7 @@ } ] }, - "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.", + "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\r\nThe create or update roles API cannot update roles that are defined in roles files.", "inherits": { "type": { "name": "RequestBase", @@ -172305,7 +172377,7 @@ "kind": "properties", "properties": [ { - "description": "A query to filter which API keys to return.\nThe query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`.\nYou can query all public information associated with an API key.", + "description": "A query to filter which API keys to return.\r\nThe query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`.\r\nYou can query all public information associated with an API key.", "name": "query", "required": false, "type": { @@ -172317,7 +172389,7 @@ } }, { - "description": "Starting document offset.\nBy default, you cannot page through more than 10,000 hits using the from and size parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "Starting document offset.\r\nBy default, you cannot page through more than 10,000 hits using the from and size parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "from", "required": false, "serverDefault": 0, @@ -172330,9 +172402,9 @@ } }, { - "description": "Other than `id`, all public fields of an API key are eligible for sorting.\nIn addition, sort can also be applied to the `_doc` field to sort by index order.", + "description": "Other than `id`, all public fields of an API key are eligible for sorting.\r\nIn addition, sort can also be applied to the `_doc` field to sort by index order.", "docId": "sort-search-results", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sort-search-results.html\r", "name": "sort", "required": false, "type": { @@ -172344,7 +172416,7 @@ } }, { - "description": "The number of hits to return.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "description": "The number of hits to return.\r\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\r\nTo page through more hits, use the `search_after` parameter.", "name": "size", "required": false, "serverDefault": 10, @@ -172391,7 +172463,7 @@ "since": "8.5.0" } }, - "description": "Return the snapshot of the owner user's role descriptors associated with the API key. \nAn API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors.", + "description": "Return the snapshot of the owner user's role descriptors associated with the API key. \r\nAn API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors.", "name": "with_limited_by", "required": false, "since": "8.5.0", @@ -172691,7 +172763,7 @@ } }, { - "description": "The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout.\nThis query should include a single parameter named SAMLRequest that contains a SAML logout request that is deflated and Base64 encoded.\nIf the SAML IdP has signed the logout request, the URL should include two extra parameters named SigAlg and Signature that contain the algorithm used for the signature and the signature value itself.\nIn order for Elasticsearch to be able to verify the IdP’s signature, the value of the query_string field must be an exact match to the string provided by the browser.\nThe client application must not attempt to parse or process the string in any way.", + "description": "The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout.\r\nThis query should include a single parameter named SAMLRequest that contains a SAML logout request that is deflated and Base64 encoded.\r\nIf the SAML IdP has signed the logout request, the URL should include two extra parameters named SigAlg and Signature that contain the algorithm used for the signature and the signature value itself.\r\nIn order for Elasticsearch to be able to verify the IdP’s signature, the value of the query_string field must be an exact match to the string provided by the browser.\r\nThe client application must not attempt to parse or process the string in any way.", "name": "query_string", "required": true, "type": { @@ -172786,7 +172858,7 @@ "kind": "properties", "properties": [ { - "description": "The access token that was returned as a response to calling the SAML authenticate API.\nAlternatively, the most recent token that was received after refreshing the original one by using a refresh_token.", + "description": "The access token that was returned as a response to calling the SAML authenticate API.\r\nAlternatively, the most recent token that was received after refreshing the original one by using a refresh_token.", "name": "token", "required": true, "type": { @@ -172798,7 +172870,7 @@ } }, { - "description": "The refresh token that was returned as a response to calling the SAML authenticate API.\nAlternatively, the most recent refresh token that was received after refreshing the original access token.", + "description": "The refresh token that was returned as a response to calling the SAML authenticate API.\r\nAlternatively, the most recent refresh token that was received after refreshing the original access token.", "name": "refresh_token", "required": false, "type": { @@ -172859,7 +172931,7 @@ "kind": "properties", "properties": [ { - "description": "The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch.\nThe realm is used to generate the authentication request. You must specify either this parameter or the realm parameter.", + "description": "The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch.\r\nThe realm is used to generate the authentication request. You must specify either this parameter or the realm parameter.", "name": "acs", "required": false, "type": { @@ -172871,7 +172943,7 @@ } }, { - "description": "The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request.\nYou must specify either this parameter or the acs parameter.", + "description": "The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request.\r\nYou must specify either this parameter or the acs parameter.", "name": "realm", "required": false, "type": { @@ -172883,7 +172955,7 @@ } }, { - "description": "A string that will be included in the redirect URL that this API returns as the RelayState query parameter.\nIf the Authentication Request is signed, this value is used as part of the signature computation.", + "description": "A string that will be included in the redirect URL that this API returns as the RelayState query parameter.\r\nIf the Authentication Request is signed, this value is used as part of the signature computation.", "name": "relay_state", "required": false, "type": { @@ -173041,7 +173113,7 @@ } }, { - "description": "A single key-value pair to match against the labels section\nof a profile. A profile is considered matching if it matches\nat least one of the strings.", + "description": "A single key-value pair to match against the labels section\r\nof a profile. A profile is considered matching if it matches\r\nat least one of the strings.", "name": "labels", "required": false, "type": { @@ -173089,7 +173161,7 @@ "kind": "properties", "properties": [ { - "description": "Query string used to match name-related fields in user profile documents.\nName-related fields are the user's `username`, `full_name`, and `email`.", + "description": "Query string used to match name-related fields in user profile documents.\r\nName-related fields are the user's `username`, `full_name`, and `email`.", "name": "name", "required": false, "type": { @@ -173114,7 +173186,7 @@ } }, { - "description": "List of filters for the `data` field of the profile document.\nTo return all content use `data=*`. To return a subset of content\nuse `data=` to retrieve content nested under the specified ``.\nBy default returns no `data` content.", + "description": "List of filters for the `data` field of the profile document.\r\nTo return all content use `data=*`. To return a subset of content\r\nuse `data=` to retrieve content nested under the specified ``.\r\nBy default returns no `data` content.", "name": "data", "required": false, "type": { @@ -173141,7 +173213,7 @@ } }, { - "description": "Extra search criteria to improve relevance of the suggestion result.\nProfiles matching the spcified hint are ranked higher in the response.\nProfiles not matching the hint don't exclude the profile from the response\nas long as the profile matches the `name` field query.", + "description": "Extra search criteria to improve relevance of the suggestion result.\r\nProfiles matching the spcified hint are ranked higher in the response.\r\nProfiles not matching the hint don't exclude the profile from the response\r\nas long as the profile matches the `name` field query.", "name": "hint", "required": false, "type": { @@ -173169,7 +173241,7 @@ "path": [], "query": [ { - "description": "List of filters for the `data` field of the profile document.\nTo return all content use `data=*`. To return a subset of content\nuse `data=` to retrieve content nested under the specified ``.\nBy default returns no `data` content.", + "description": "List of filters for the `data` field of the profile document.\r\nTo return all content use `data=*`. To return a subset of content\r\nuse `data=` to retrieve content nested under the specified ``.\r\nBy default returns no `data` content.", "name": "data", "required": false, "type": { @@ -173289,7 +173361,7 @@ { "description": "An array of role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for create role API. For more details, see create or update roles API.", "docId": "security-api-put-role", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-put-role.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-api-put-role.html\r", "name": "role_descriptors", "required": false, "type": { @@ -173325,7 +173397,7 @@ } ] }, - "description": "Updates attributes of an existing API key.\nUsers can only update API keys that they created or that were granted to them.\nUse this API to update API keys created by the create API Key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead.\nIt’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key.\nThis API supports updates to an API key’s access scope and metadata.\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request.\nThe snapshot of the owner’s permissions is updated automatically on every call.\nIf you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope.\nThis change can occur if the owner user’s permissions have changed since the API key was created or last modified.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\nIMPORTANT: It’s not possible to use an API key as the authentication credential for this API.\nTo update an API key, the owner user’s credentials are required.", + "description": "Updates attributes of an existing API key.\r\nUsers can only update API keys that they created or that were granted to them.\r\nUse this API to update API keys created by the create API Key or grant API Key APIs.\r\nIf you need to apply the same update to many API keys, you can use bulk update API Keys to reduce overhead.\r\nIt’s not possible to update expired API keys, or API keys that have been invalidated by invalidate API Key.\r\nThis API supports updates to an API key’s access scope and metadata.\r\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request, and a snapshot of the owner user’s permissions at the time of the request.\r\nThe snapshot of the owner’s permissions is updated automatically on every call.\r\nIf you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope.\r\nThis change can occur if the owner user’s permissions have changed since the API key was created or last modified.\r\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\r\nIMPORTANT: It’s not possible to use an API key as the authentication credential for this API.\r\nTo update an API key, the owner user’s credentials are required.", "inherits": { "type": { "name": "RequestBase", @@ -173359,7 +173431,7 @@ "kind": "properties", "properties": [ { - "description": "If `true`, the API key was updated.\nIf `false`, the API key didn’t change because no change was detected.", + "description": "If `true`, the API key was updated.\r\nIf `false`, the API key didn’t change because no change was detected.", "name": "updated", "required": true, "type": { @@ -173387,7 +173459,7 @@ "kind": "properties", "properties": [ { - "description": "Searchable data that you want to associate with the user profile. This\nfield supports a nested data structure.", + "description": "Searchable data that you want to associate with the user profile. This\r\nfield supports a nested data structure.", "name": "labels", "required": false, "type": { @@ -173406,7 +173478,7 @@ } }, { - "description": "Non-searchable data that you want to associate with the user profile.\nThis field supports a nested data structure.", + "description": "Non-searchable data that you want to associate with the user profile.\r\nThis field supports a nested data structure.", "name": "data", "required": false, "type": { @@ -173478,7 +173550,7 @@ } }, { - "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\nvisible to search, if 'false' do nothing with refreshes.", + "description": "If 'true', Elasticsearch refreshes the affected shards to make this operation\r\nvisible to search, if 'wait_for' then wait for a refresh to make this operation\r\nvisible to search, if 'false' do nothing with refreshes.", "name": "refresh", "required": false, "serverDefault": "false", @@ -173917,7 +173989,7 @@ "kind": "properties", "properties": [ { - "description": "Valid values are restart, remove, or replace.\nUse restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance.\nBecause the node is expected to rejoin the cluster, data is not migrated off of the node.\nUse remove when you need to permanently remove a node from the cluster.\nThe node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node.\nCertain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node.\nDuring a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete.", + "description": "Valid values are restart, remove, or replace.\r\nUse restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance.\r\nBecause the node is expected to rejoin the cluster, data is not migrated off of the node.\r\nUse remove when you need to permanently remove a node from the cluster.\r\nThe node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node.\r\nCertain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node.\r\nDuring a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete.", "name": "type", "required": true, "type": { @@ -173929,7 +174001,7 @@ } }, { - "description": "A human-readable reason that the node is being shut down.\nThis field provides information for other cluster operators; it does not affect the shut down process.", + "description": "A human-readable reason that the node is being shut down.\r\nThis field provides information for other cluster operators; it does not affect the shut down process.", "name": "reason", "required": true, "type": { @@ -173941,7 +174013,7 @@ } }, { - "description": "Only valid if type is restart.\nControls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes.\nThis works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting.\nIf you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used.", + "description": "Only valid if type is restart.\r\nControls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes.\r\nThis works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting.\r\nIf you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used.", "name": "allocation_delay", "required": false, "type": { @@ -173953,7 +174025,7 @@ } }, { - "description": "Only valid if type is replace.\nSpecifies the name of the node that is replacing the node being shut down.\nShards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node.\nDuring relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules.", + "description": "Only valid if type is replace.\r\nSpecifies the name of the node that is replacing the node being shut down.\r\nShards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node.\r\nDuring relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules.", "name": "target_node_name", "required": false, "type": { @@ -174061,7 +174133,7 @@ } }, { - "description": "A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported.\nBy default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters.", + "description": "A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported.\r\nBy default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters.", "name": "indices", "required": false, "type": { @@ -174086,7 +174158,7 @@ } }, { - "description": "A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API.\nEach feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false.", + "description": "A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API.\r\nEach feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false.", "name": "feature_states", "required": false, "type": { @@ -177205,7 +177277,7 @@ } }, { - "description": "Comma-separated list of snapshot names to retrieve. Also accepts wildcards (*).\n- To get information about all snapshots in a registered repository, use a wildcard (*) or _all.\n- To get information about any snapshots that are currently running, use _current.", + "description": "Comma-separated list of snapshot names to retrieve. Also accepts wildcards (*).\r\n- To get information about all snapshots in a registered repository, use a wildcard (*) or _all.\r\n- To get information about any snapshots that are currently running, use _current.", "name": "snapshot", "required": true, "type": { @@ -178353,7 +178425,7 @@ } }, { - "description": "Format for the response. You must specify a format using this parameter or the\nAccept HTTP header. If you specify both, the API uses this parameter.", + "description": "Format for the response. You must specify a format using this parameter or the\r\nAccept HTTP header. If you specify both, the API uses this parameter.", "name": "format", "required": false, "type": { @@ -178365,7 +178437,7 @@ } }, { - "description": "Retention period for the search and its results. Defaults\nto the `keep_alive` period for the original SQL search.", + "description": "Retention period for the search and its results. Defaults\r\nto the `keep_alive` period for the original SQL search.", "name": "keep_alive", "required": false, "type": { @@ -178377,7 +178449,7 @@ } }, { - "description": "Period to wait for complete results. Defaults to no timeout,\nmeaning the request waits for complete search results.", + "description": "Period to wait for complete results. Defaults to no timeout,\r\nmeaning the request waits for complete search results.", "name": "wait_for_completion_timeout", "required": false, "type": { @@ -178396,7 +178468,7 @@ "kind": "properties", "properties": [ { - "description": "Identifier for the search. This value is only returned for async and saved\nsynchronous searches. For CSV, TSV, and TXT responses, this value is returned\nin the `Async-ID` HTTP header.", + "description": "Identifier for the search. This value is only returned for async and saved\r\nsynchronous searches. For CSV, TSV, and TXT responses, this value is returned\r\nin the `Async-ID` HTTP header.", "name": "id", "required": true, "type": { @@ -178408,7 +178480,7 @@ } }, { - "description": "If `true`, the search is still running. If false, the search has finished.\nThis value is only returned for async and saved synchronous searches. For\nCSV, TSV, and TXT responses, this value is returned in the `Async-partial`\nHTTP header.", + "description": "If `true`, the search is still running. If false, the search has finished.\r\nThis value is only returned for async and saved synchronous searches. For\r\nCSV, TSV, and TXT responses, this value is returned in the `Async-partial`\r\nHTTP header.", "name": "is_running", "required": true, "type": { @@ -178420,7 +178492,7 @@ } }, { - "description": "If `true`, the response does not contain complete search results. If `is_partial`\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\nis `true` but `is_running` is `false`, the results are partial due to a failure or\ntimeout. This value is only returned for async and saved synchronous searches.\nFor CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header.", + "description": "If `true`, the response does not contain complete search results. If `is_partial`\r\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\r\nis `true` but `is_running` is `false`, the results are partial due to a failure or\r\ntimeout. This value is only returned for async and saved synchronous searches.\r\nFor CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header.", "name": "is_partial", "required": true, "type": { @@ -178447,7 +178519,7 @@ } }, { - "description": "Cursor for the next set of paginated results. For CSV, TSV, and\nTXT responses, this value is returned in the `Cursor` HTTP header.", + "description": "Cursor for the next set of paginated results. For CSV, TSV, and\r\nTXT responses, this value is returned in the `Cursor` HTTP header.", "name": "cursor", "required": false, "type": { @@ -178547,7 +178619,7 @@ } }, { - "description": "If `true`, the response does not contain complete search results. If `is_partial`\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\nis `true` but `is_running` is `false`, the results are partial due to a failure or\ntimeout.", + "description": "If `true`, the response does not contain complete search results. If `is_partial`\r\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\r\nis `true` but `is_running` is `false`, the results are partial due to a failure or\r\ntimeout.", "name": "is_partial", "required": true, "type": { @@ -178559,7 +178631,7 @@ } }, { - "description": "Timestamp, in milliseconds since the Unix epoch, when the search started.\nThe API only returns this property for running searches.", + "description": "Timestamp, in milliseconds since the Unix epoch, when the search started.\r\nThe API only returns this property for running searches.", "name": "start_time_in_millis", "required": true, "type": { @@ -178580,7 +178652,7 @@ } }, { - "description": "Timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete\nthe search and its results, even if the search is still running.", + "description": "Timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete\r\nthe search and its results, even if the search is still running.", "name": "expiration_time_in_millis", "required": true, "type": { @@ -178680,7 +178752,7 @@ { "description": "Optional Elasticsearch query DSL for additional filtering.", "docId": "sql-rest-filtering", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sql-rest-filtering.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sql-rest-filtering.html\r", "name": "filter", "required": false, "serverDefault": "none", @@ -178757,7 +178829,7 @@ } }, { - "description": "Defines one or more runtime fields in the search request. These fields take\nprecedence over mapped fields with the same name.", + "description": "Defines one or more runtime fields in the search request. These fields take\r\nprecedence over mapped fields with the same name.", "name": "runtime_mappings", "required": false, "type": { @@ -178857,7 +178929,7 @@ { "description": "a short version of the Accept header, e.g. json, yaml", "docId": "sql-rest-format", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sql-rest-format.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/sql-rest-format.html\r", "name": "format", "required": false, "type": { @@ -178876,7 +178948,7 @@ "kind": "properties", "properties": [ { - "description": "Identifier for the search. This value is only returned for async and saved\nsynchronous searches. For CSV, TSV, and TXT responses, this value is returned\nin the `Async-ID` HTTP header.", + "description": "Identifier for the search. This value is only returned for async and saved\r\nsynchronous searches. For CSV, TSV, and TXT responses, this value is returned\r\nin the `Async-ID` HTTP header.", "name": "id", "required": false, "type": { @@ -178888,7 +178960,7 @@ } }, { - "description": "If `true`, the search is still running. If false, the search has finished.\nThis value is only returned for async and saved synchronous searches. For\nCSV, TSV, and TXT responses, this value is returned in the `Async-partial`\nHTTP header.", + "description": "If `true`, the search is still running. If false, the search has finished.\r\nThis value is only returned for async and saved synchronous searches. For\r\nCSV, TSV, and TXT responses, this value is returned in the `Async-partial`\r\nHTTP header.", "name": "is_running", "required": false, "type": { @@ -178900,7 +178972,7 @@ } }, { - "description": "If `true`, the response does not contain complete search results. If `is_partial`\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\nis `true` but `is_running` is `false`, the results are partial due to a failure or\ntimeout. This value is only returned for async and saved synchronous searches.\nFor CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header.", + "description": "If `true`, the response does not contain complete search results. If `is_partial`\r\nis `true` and `is_running` is `true`, the search is still running. If `is_partial`\r\nis `true` but `is_running` is `false`, the results are partial due to a failure or\r\ntimeout. This value is only returned for async and saved synchronous searches.\r\nFor CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header.", "name": "is_partial", "required": false, "type": { @@ -178927,7 +178999,7 @@ } }, { - "description": "Cursor for the next set of paginated results. For CSV, TSV, and\nTXT responses, this value is returned in the `Cursor` HTTP header.", + "description": "Cursor for the next set of paginated results. For CSV, TSV, and\r\nTXT responses, this value is returned in the `Cursor` HTTP header.", "name": "cursor", "required": false, "type": { @@ -179728,7 +179800,7 @@ } }, { - "description": "Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if\n`group_by` was set to `parents`.", + "description": "Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if\r\n`group_by` was set to `parents`.", "name": "tasks", "required": false, "type": { @@ -180998,7 +181070,7 @@ }, "properties": [ { - "description": "The destination index for the transform. The mappings of the destination index are deduced based on the source\nfields when possible. If alternate mappings are required, use the create index API prior to starting the\ntransform.", + "description": "The destination index for the transform. The mappings of the destination index are deduced based on the source\r\nfields when possible. If alternate mappings are required, use the create index API prior to starting the\r\ntransform.", "name": "index", "required": false, "type": { @@ -181072,7 +181144,7 @@ "aliases": [ "aggs" ], - "description": "Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket\nscript, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation,\nmin, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted\naverage.", + "description": "Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket\r\nscript, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation,\r\nmin, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted\r\naverage.", "name": "aggregations", "required": false, "type": { @@ -181095,7 +181167,7 @@ } }, { - "description": "Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are\ncurrently supported: date histogram, geotile grid, histogram, terms.", + "description": "Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are\r\ncurrently supported: date histogram, geotile grid, histogram, terms.", "name": "group_by", "required": false, "type": { @@ -181197,7 +181269,7 @@ } }, { - "description": "Specifies the maximum age of a document in the destination index. Documents that are older than the configured\nvalue are removed from the destination index.", + "description": "Specifies the maximum age of a document in the destination index. Documents that are older than the configured\r\nvalue are removed from the destination index.", "name": "max_age", "required": true, "type": { @@ -181245,7 +181317,7 @@ }, "properties": [ { - "description": "Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align\ncheckpoint ranges with the date histogram interval when date histogram is specified as a group source in the\ntransform config. As a result, less document updates in the destination index will be performed thus improving\noverall performance.", + "description": "Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align\r\ncheckpoint ranges with the date histogram interval when date histogram is specified as a group source in the\r\ntransform config. As a result, less document updates in the destination index will be performed thus improving\r\noverall performance.", "name": "align_checkpoints", "required": false, "serverDefault": true, @@ -181258,7 +181330,7 @@ } }, { - "description": "Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was\nthe default for transforms created before version 7.11. For compatible output set this value to `true`.", + "description": "Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was\r\nthe default for transforms created before version 7.11. For compatible output set this value to `true`.", "name": "dates_as_epoch_millis", "required": false, "serverDefault": false, @@ -181284,7 +181356,7 @@ } }, { - "description": "Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a\nwait time between search requests. The default value is null, which disables throttling.", + "description": "Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a\r\nwait time between search requests. The default value is null, which disables throttling.", "name": "docs_per_second", "required": false, "type": { @@ -181296,7 +181368,7 @@ } }, { - "description": "Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker\nexceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`.", + "description": "Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker\r\nexceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the\r\nmaximum is `65,536`.", "name": "max_page_search_size", "required": false, "serverDefault": 500, @@ -181315,7 +181387,7 @@ "since": "8.5.0" } }, - "description": "If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case\nof an error which means the transform never fails. Setting the number of retries other than infinite fails in\nvalidation.", + "description": "If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case\r\nof an error which means the transform never fails. Setting the number of retries other than infinite fails in\r\nvalidation.", "name": "unattended", "required": false, "serverDefault": false, @@ -181339,7 +181411,7 @@ }, "properties": [ { - "description": "The source indices for the transform. It can be a single index, an index pattern (for example, `\"my-index-*\"\"`), an\narray of indices (for example, `[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns (for\nexample, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use the syntax `\"remote_name:index_name\"`. If\nany indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role.", + "description": "The source indices for the transform. It can be a single index, an index pattern (for example, `\"my-index-*\"\"`), an\r\narray of indices (for example, `[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns (for\r\nexample, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use the syntax `\"remote_name:index_name\"`. If\r\nany indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role.", "name": "index", "required": true, "type": { @@ -181369,7 +181441,7 @@ "since": "7.12.0" } }, - "description": "Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data\nnodes, including remote nodes, must be 7.12 or later.", + "description": "Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data\r\nnodes, including remote nodes, must be 7.12 or later.", "name": "runtime_mappings", "required": false, "since": "7.12.0", @@ -181430,7 +181502,7 @@ } }, { - "description": "The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field\nthat contains the ingest timestamp. If you use a different field, you might need to set the delay such that it\naccounts for data transmission delays.", + "description": "The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field\r\nthat contains the ingest timestamp. If you use a different field, you might need to set the delay such that it\r\naccounts for data transmission delays.", "name": "field", "required": true, "type": { @@ -181479,7 +181551,7 @@ ], "query": [ { - "description": "If this value is false, the transform must be stopped before it can be deleted. If true, the transform is\ndeleted regardless of its current state.", + "description": "If this value is false, the transform must be stopped before it can be deleted. If true, the transform is\r\ndeleted regardless of its current state.", "name": "force", "required": false, "serverDefault": false, @@ -181546,7 +181618,7 @@ }, "path": [ { - "description": "Identifier for the transform. It can be a transform identifier or a\nwildcard expression. You can get information for all transforms by using\n`_all`, by specifying `*` as the ``, or by omitting the\n``.", + "description": "Identifier for the transform. It can be a transform identifier or a\r\nwildcard expression. You can get information for all transforms by using\r\n`_all`, by specifying `*` as the ``, or by omitting the\r\n``.", "name": "transform_id", "required": false, "type": { @@ -181560,7 +181632,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no transforms that match.\n2. Contains the _all string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nIf this parameter is false, the request returns a 404 status code when\nthere are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no transforms that match.\r\n2. Contains the _all string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nIf this parameter is false, the request returns a 404 status code when\r\nthere are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -181599,7 +181671,7 @@ } }, { - "description": "Excludes fields that were automatically added when creating the\ntransform. This allows the configuration to be in an acceptable format to\nbe retrieved and then added to another cluster.", + "description": "Excludes fields that were automatically added when creating the\r\ntransform. This allows the configuration to be in an acceptable format to\r\nbe retrieved and then added to another cluster.", "name": "exclude_generated", "required": false, "serverDefault": false, @@ -182025,7 +182097,7 @@ }, "path": [ { - "description": "Identifier for the transform. It can be a transform identifier or a\nwildcard expression. You can get information for all transforms by using\n`_all`, by specifying `*` as the ``, or by omitting the\n``.", + "description": "Identifier for the transform. It can be a transform identifier or a\r\nwildcard expression. You can get information for all transforms by using\r\n`_all`, by specifying `*` as the ``, or by omitting the\r\n``.", "name": "transform_id", "required": true, "type": { @@ -182039,7 +182111,7 @@ ], "query": [ { - "description": "Specifies what to do when the request:\n\n1. Contains wildcard expressions and there are no transforms that match.\n2. Contains the _all string or no identifiers and there are no matches.\n3. Contains wildcard expressions and there are only partial matches.\n\nIf this parameter is false, the request returns a 404 status code when\nthere are no matches or only partial matches.", + "description": "Specifies what to do when the request:\r\n\r\n1. Contains wildcard expressions and there are no transforms that match.\r\n2. Contains the _all string or no identifiers and there are no matches.\r\n3. Contains wildcard expressions and there are only partial matches.\r\n\r\nIf this parameter is false, the request returns a 404 status code when\r\nthere are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -182577,7 +182649,7 @@ } }, { - "description": "The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is 1s and the maximum is 1h.", + "description": "The interval between checks for changes in the source indices when the\r\ntransform is running continuously. Also determines the retry interval in\r\nthe event of transient failures while the transform is searching or\r\nindexing. The minimum value is 1s and the maximum is 1h.", "name": "frequency", "required": false, "serverDefault": "1m", @@ -182590,7 +182662,7 @@ } }, { - "description": "The pivot method transforms the data by aggregating and grouping it.\nThese objects define the group by fields and the aggregation to reduce\nthe data.", + "description": "The pivot method transforms the data by aggregating and grouping it.\r\nThese objects define the group by fields and the aggregation to reduce\r\nthe data.", "name": "pivot", "required": false, "type": { @@ -182638,7 +182710,7 @@ } }, { - "description": "Defines a retention policy for the transform. Data that meets the defined\ncriteria is deleted from the destination index.", + "description": "Defines a retention policy for the transform. Data that meets the defined\r\ncriteria is deleted from the destination index.", "name": "retention_policy", "required": false, "type": { @@ -182650,7 +182722,7 @@ } }, { - "description": "The latest method transforms the data by finding the latest document for\neach unique key.", + "description": "The latest method transforms the data by finding the latest document for\r\neach unique key.", "name": "latest", "required": false, "type": { @@ -182663,7 +182735,7 @@ } ] }, - "description": "Previews a transform.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Previews a transform.\r\n\r\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\r\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\r\ntypes of the source index and the transform aggregations.", "inherits": { "type": { "name": "RequestBase", @@ -182677,7 +182749,7 @@ }, "path": [ { - "description": "Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform\nconfiguration details in the request body.", + "description": "Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform\r\nconfiguration details in the request body.", "name": "transform_id", "required": false, "type": { @@ -182691,7 +182763,7 @@ ], "query": [ { - "description": "Period to wait for a response. If no response is received before the\ntimeout expires, the request fails and returns an error.", + "description": "Period to wait for a response. If no response is received before the\r\ntimeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -182782,7 +182854,7 @@ } }, { - "description": "The interval between checks for changes in the source indices when the transform is running continuously. Also\ndetermines the retry interval in the event of transient failures while the transform is searching or indexing.\nThe minimum value is `1s` and the maximum is `1h`.", + "description": "The interval between checks for changes in the source indices when the transform is running continuously. Also\r\ndetermines the retry interval in the event of transient failures while the transform is searching or indexing.\r\nThe minimum value is `1s` and the maximum is `1h`.", "name": "frequency", "required": false, "serverDefault": "1m", @@ -182819,7 +182891,7 @@ } }, { - "description": "The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields\nand the aggregation to reduce the data.", + "description": "The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields\r\nand the aggregation to reduce the data.", "name": "pivot", "required": false, "type": { @@ -182831,7 +182903,7 @@ } }, { - "description": "Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the\ndestination index.", + "description": "Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the\r\ndestination index.", "name": "retention_policy", "required": false, "type": { @@ -182880,7 +182952,7 @@ } ] }, - "description": "Creates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.", + "description": "Creates a transform.\r\n\r\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\r\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\r\nunique row per entity.\r\n\r\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\r\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\r\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\r\nin the latest object.\r\n\r\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\r\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\r\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\r\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\r\nattempts unauthorized operations.\r\n\r\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\r\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\r\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\r\ngive users any privileges on `.data-frame-internal*` indices.", "inherits": { "type": { "name": "RequestBase", @@ -182894,7 +182966,7 @@ }, "path": [ { - "description": "Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9),\nhyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters.", + "description": "Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9),\r\nhyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters.", "name": "transform_id", "required": true, "type": { @@ -182908,7 +182980,7 @@ ], "query": [ { - "description": "When the transform is created, a series of validations occur to ensure its success. For example, there is a\ncheck for the existence of the source indices and a check that the destination index is not part of the source\nindex pattern. You can use this parameter to skip the checks, for example when the source index does not exist\nuntil after the transform is created. The validations are always run when you start the transform, however, with\nthe exception of privilege checks.", + "description": "When the transform is created, a series of validations occur to ensure its success. For example, there is a\r\ncheck for the existence of the source indices and a check that the destination index is not part of the source\r\nindex pattern. You can use this parameter to skip the checks, for example when the source index does not exist\r\nuntil after the transform is created. The validations are always run when you start the transform, however, with\r\nthe exception of privilege checks.", "name": "defer_validation", "required": false, "serverDefault": false, @@ -182961,7 +183033,7 @@ "body": { "kind": "no_body" }, - "description": "Resets a transform.\nBefore you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.", + "description": "Resets a transform.\r\nBefore you can reset it, you must stop it; alternatively, use the `force` query parameter.\r\nIf the destination index was created by the transform, it is deleted.", "inherits": { "type": { "name": "RequestBase", @@ -182975,7 +183047,7 @@ }, "path": [ { - "description": "Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9),\nhyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters.", + "description": "Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9),\r\nhyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters.", "name": "transform_id", "required": true, "type": { @@ -182989,7 +183061,7 @@ ], "query": [ { - "description": "If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform\nmust be stopped before it can be reset.", + "description": "If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform\r\nmust be stopped before it can be reset.", "name": "force", "required": false, "serverDefault": false, @@ -183029,7 +183101,7 @@ "body": { "kind": "no_body" }, - "description": "Schedules now a transform.\n\nIf you _schedule_now a transform, it will process the new data instantly,\nwithout waiting for the configured frequency interval. After _schedule_now API is called,\nthe transform will be processed again at now + frequency unless _schedule_now API\nis called again in the meantime.", + "description": "Schedules now a transform.\r\n\r\nIf you _schedule_now a transform, it will process the new data instantly,\r\nwithout waiting for the configured frequency interval. After _schedule_now API is called,\r\nthe transform will be processed again at now + frequency unless _schedule_now API\r\nis called again in the meantime.", "inherits": { "type": { "name": "RequestBase", @@ -183097,7 +183169,7 @@ "body": { "kind": "no_body" }, - "description": "Starts a transform.\n\nWhen you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.", + "description": "Starts a transform.\r\n\r\nWhen you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\r\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\r\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\r\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\r\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\r\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\r\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\r\nin a pivot transform.\r\n\r\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\r\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\r\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\r\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\r\ndestination indices, the transform fails when it attempts unauthorized operations.", "inherits": { "type": { "name": "RequestBase", @@ -183191,7 +183263,7 @@ }, "path": [ { - "description": "Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression.\nTo stop all transforms, use `_all` or `*` as the identifier.", + "description": "Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression.\r\nTo stop all transforms, use `_all` or `*` as the identifier.", "name": "transform_id", "required": true, "type": { @@ -183205,7 +183277,7 @@ ], "query": [ { - "description": "Specifies what to do when the request: contains wildcard expressions and there are no transforms that match;\ncontains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there\nare only partial matches.\n\nIf it is true, the API returns a successful acknowledgement message when there are no matches. When there are\nonly partial matches, the API stops the appropriate transforms.\n\nIf it is false, the request returns a 404 status code when there are no matches or only partial matches.", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no transforms that match;\r\ncontains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there\r\nare only partial matches.\r\n\r\nIf it is true, the API returns a successful acknowledgement message when there are no matches. When there are\r\nonly partial matches, the API stops the appropriate transforms.\r\n\r\nIf it is false, the request returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, "serverDefault": true, @@ -183231,7 +183303,7 @@ } }, { - "description": "Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the\ntimeout expires, the request returns a timeout exception. However, the request continues processing and\neventually moves the transform to a STOPPED state.", + "description": "Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the\r\ntimeout expires, the request returns a timeout exception. However, the request continues processing and\r\neventually moves the transform to a STOPPED state.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -183244,7 +183316,7 @@ } }, { - "description": "If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false,\nthe transform stops as soon as possible.", + "description": "If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false,\r\nthe transform stops as soon as possible.", "name": "wait_for_checkpoint", "required": false, "serverDefault": false, @@ -183257,7 +183329,7 @@ } }, { - "description": "If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns\nimmediately and the indexer is stopped asynchronously in the background.", + "description": "If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns\r\nimmediately and the indexer is stopped asynchronously in the background.", "name": "wait_for_completion", "required": false, "serverDefault": false, @@ -183322,7 +183394,7 @@ } }, { - "description": "The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is 1s and the maximum is 1h.", + "description": "The interval between checks for changes in the source indices when the\r\ntransform is running continuously. Also determines the retry interval in\r\nthe event of transient failures while the transform is searching or\r\nindexing. The minimum value is 1s and the maximum is 1h.", "name": "frequency", "required": false, "serverDefault": "1m", @@ -183383,7 +183455,7 @@ } }, { - "description": "Defines a retention policy for the transform. Data that meets the defined\ncriteria is deleted from the destination index.", + "description": "Defines a retention policy for the transform. Data that meets the defined\r\ncriteria is deleted from the destination index.", "name": "retention_policy", "required": false, "type": { @@ -183408,7 +183480,7 @@ } ] }, - "description": "Updates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.", + "description": "Updates certain properties of a transform.\r\n\r\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\r\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\r\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\r\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\r\ntime of update and runs with those privileges.", "inherits": { "type": { "name": "RequestBase", @@ -183436,7 +183508,7 @@ ], "query": [ { - "description": "When true, deferrable validations are not run. This behavior may be\ndesired if the source index does not exist until after the transform is\ncreated.", + "description": "When true, deferrable validations are not run. This behavior may be\r\ndesired if the source index does not exist until after the transform is\r\ncreated.", "name": "defer_validation", "required": false, "type": { @@ -183448,7 +183520,7 @@ } }, { - "description": "Period to wait for a response. If no response is received before the\ntimeout expires, the request fails and returns an error.", + "description": "Period to wait for a response. If no response is received before the\r\ntimeout expires, the request fails and returns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -183637,7 +183709,7 @@ "body": { "kind": "no_body" }, - "description": "Upgrades all transforms.\nThis API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It\nalso cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not\naffect the source and destination indices. The upgrade also does not affect the roles that transforms use when\nElasticsearch security features are enabled; the role used to read source data and write to the destination index\nremains unchanged.", + "description": "Upgrades all transforms.\r\nThis API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It\r\nalso cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not\r\naffect the source and destination indices. The upgrade also does not affect the roles that transforms use when\r\nElasticsearch security features are enabled; the role used to read source data and write to the destination index\r\nremains unchanged.", "inherits": { "type": { "name": "RequestBase", @@ -183665,7 +183737,7 @@ } }, { - "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and\nreturns an error.", + "description": "Period to wait for a response. If no response is received before the timeout expires, the request fails and\r\nreturns an error.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -184491,7 +184563,7 @@ }, { "docId": "cron-expressions", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/api-conventions.html#api-cron-expressions", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/api-conventions.html#api-cron-expressions\r", "kind": "type_alias", "name": { "name": "CronExpression", @@ -187106,7 +187178,7 @@ } }, { - "description": "ID of the search template to use. If no source is specified,\nthis parameter is required.", + "description": "ID of the search template to use. If no source is specified,\r\nthis parameter is required.", "name": "id", "required": false, "type": { @@ -187148,7 +187220,7 @@ } }, { - "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. Also supports Mustache variables. If no id is specified, this\nparameter is required.", + "description": "An inline search template. Supports the same parameters as the search API's\r\nrequest body. Also supports Mustache variables. If no id is specified, this\r\nparameter is required.", "name": "source", "required": false, "type": { @@ -188506,7 +188578,7 @@ } ] }, - "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution.", + "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\r\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs. You can execute the watch without executing all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after execution.", "inherits": { "type": { "name": "RequestBase", @@ -193950,7 +194022,7 @@ "specLocation": "xpack/usage/types.ts#L466-L469" }, { - "description": "In some places in the specification an object consists of the union of a set of known properties\nand a set of runtime injected properties. Meaning that object should theoretically extend Dictionary but expose\na set of known keys and possibly. The object might already be part of an object graph and have a parent class.\nThis puts it into a bind that needs a client specific solution.\nWe therefore document the requirement to behave like a dictionary for unknown properties with this interface.", + "description": "In some places in the specification an object consists of the union of a set of known properties\r\nand a set of runtime injected properties. Meaning that object should theoretically extend Dictionary but expose\r\na set of known keys and possibly. The object might already be part of an object graph and have a parent class.\r\nThis puts it into a bind that needs a client specific solution.\r\nWe therefore document the requirement to behave like a dictionary for unknown properties with this interface.", "generics": [ { "name": "TKey", @@ -193970,7 +194042,7 @@ "specLocation": "_spec_utils/behaviors.ts#L29-L37" }, { - "description": "In some places in the specification an object consists of a static set of properties and a single additional property\nwith an arbitrary name but a statically defined type. This is typically used for configurations associated\nto a single field. Meaning that object should theoretically extend SingleKeyDictionary but expose\na set of known keys. And possibly the object might already be part of an object graph and have a parent class.\nThis puts it into a bind that needs a client specific solution.\nWe therefore document the requirement to accept a single unknown property with this interface.", + "description": "In some places in the specification an object consists of a static set of properties and a single additional property\r\nwith an arbitrary name but a statically defined type. This is typically used for configurations associated\r\nto a single field. Meaning that object should theoretically extend SingleKeyDictionary but expose\r\na set of known keys. And possibly the object might already be part of an object graph and have a parent class.\r\nThis puts it into a bind that needs a client specific solution.\r\nWe therefore document the requirement to accept a single unknown property with this interface.", "generics": [ { "name": "TKey", @@ -193990,7 +194062,7 @@ "specLocation": "_spec_utils/behaviors.ts#L39-L48" }, { - "description": "Implements a set of common query parameters all API's support.\nSince these can break the request structure these are listed explicitly as a behavior.\nIts up to individual clients to define support although `error_trace` and `pretty` are\nrecommended as a minimum.", + "description": "Implements a set of common query parameters all API's support.\r\nSince these can break the request structure these are listed explicitly as a behavior.\r\nIts up to individual clients to define support although `error_trace` and `pretty` are\r\nrecommended as a minimum.", "kind": "interface", "name": { "name": "CommonQueryParameters", @@ -193998,7 +194070,7 @@ }, "properties": [ { - "description": "When set to `true` Elasticsearch will include the full stack trace of errors\nwhen they occur.", + "description": "When set to `true` Elasticsearch will include the full stack trace of errors\r\nwhen they occur.", "name": "error_trace", "required": false, "serverDefault": false, @@ -194011,7 +194083,7 @@ } }, { - "description": "Comma-separated list of filters in dot notation which reduce the response\nreturned by Elasticsearch.", + "description": "Comma-separated list of filters in dot notation which reduce the response\r\nreturned by Elasticsearch.", "name": "filter_path", "required": false, "type": { @@ -194038,7 +194110,7 @@ } }, { - "description": "When set to `true` will return statistics in a format suitable for humans.\nFor example `\"exists_time\": \"1h\"` for humans and\n`\"eixsts_time_in_millis\": 3600000` for computers. When disabled the human\nreadable values will be omitted. This makes sense for responses being consumed\nonly by machines.", + "description": "When set to `true` will return statistics in a format suitable for humans.\r\nFor example `\"exists_time\": \"1h\"` for humans and\r\n`\"eixsts_time_in_millis\": 3600000` for computers. When disabled the human\r\nreadable values will be omitted. This makes sense for responses being consumed\r\nonly by machines.", "name": "human", "required": false, "serverDefault": false, @@ -194051,7 +194123,7 @@ } }, { - "description": "If set to `true` the returned JSON will be \"pretty-formatted\". Only use\nthis option for debugging only.", + "description": "If set to `true` the returned JSON will be \"pretty-formatted\". Only use\r\nthis option for debugging only.", "name": "pretty", "required": false, "serverDefault": false, @@ -194067,7 +194139,7 @@ "specLocation": "_spec_utils/behaviors.ts#L50-L84" }, { - "description": "Implements a set of common query parameters all Cat API's support.\nSince these can break the request structure these are listed explicitly as a behavior.", + "description": "Implements a set of common query parameters all Cat API's support.\r\nSince these can break the request structure these are listed explicitly as a behavior.", "kind": "interface", "name": { "name": "CommonCatQueryParameters", @@ -194075,7 +194147,7 @@ }, "properties": [ { - "description": "Specifies the format to return the columnar data in, can be set to\n`text`, `json`, `cbor`, `yaml`, or `smile`.", + "description": "Specifies the format to return the columnar data in, can be set to\r\n`text`, `json`, `cbor`, `yaml`, or `smile`.", "name": "format", "required": false, "serverDefault": "text", @@ -194100,7 +194172,7 @@ } }, { - "description": "When set to `true` will output available columns. This option\ncan't be combined with any other query string option.", + "description": "When set to `true` will output available columns. This option\r\ncan't be combined with any other query string option.", "name": "help", "required": false, "serverDefault": false, @@ -194113,7 +194185,7 @@ } }, { - "description": "If `true`, the request computes the list of selected nodes from the\nlocal cluster state. If `false` the list of selected nodes are computed\nfrom the cluster state of the master node. In both cases the coordinating\nnode will send requests for further information to each selected node.", + "description": "If `true`, the request computes the list of selected nodes from the\r\nlocal cluster state. If `false` the list of selected nodes are computed\r\nfrom the cluster state of the master node. In both cases the coordinating\r\nnode will send requests for further information to each selected node.", "name": "local", "required": false, "serverDefault": false, @@ -194139,7 +194211,7 @@ } }, { - "description": "List of columns that determine how the table should be sorted.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", + "description": "List of columns that determine how the table should be sorted.\r\nSorting defaults to ascending and can be changed by setting `:asc`\r\nor `:desc` as a suffix to the column name.", "name": "s", "required": false, "type": { @@ -194167,7 +194239,7 @@ "specLocation": "_spec_utils/behaviors.ts#L86-L132" }, { - "description": "A class that implements `OverloadOf` should have the exact same properties with the same types.\nIt can change if a property is required or not. There is no need to port the descriptions\nand js doc tags, the compiler will do that for you.", + "description": "A class that implements `OverloadOf` should have the exact same properties with the same types.\r\nIt can change if a property is required or not. There is no need to port the descriptions\r\nand js doc tags, the compiler will do that for you.", "generics": [ { "name": "TDefinition", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 4a36e62dd4..0541521f2e 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -4162,7 +4162,7 @@ export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnaly export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export type AnalysisCharFilter = string | AnalysisCharFilterDefinition @@ -4242,7 +4242,7 @@ export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { max_gram?: integer min_gram?: integer side?: AnalysisEdgeNGramSide - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { @@ -4481,14 +4481,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { type: 'ngram' max_gram?: integer min_gram?: integer - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { @@ -4545,7 +4545,7 @@ export interface AnalysisPatternAnalyzer { export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original?: boolean + preserve_original?: SpecUtilsStringified } export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { @@ -4758,7 +4758,7 @@ export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilt generate_number_parts?: boolean generate_word_parts?: boolean ignore_keywords?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -4775,7 +4775,7 @@ export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBas catenate_words?: boolean generate_number_parts?: boolean generate_word_parts?: boolean - preserve_original?: boolean + preserve_original?: SpecUtilsStringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -9776,7 +9776,7 @@ export interface IndicesIndexSettingsAnalysis { export interface IndicesIndexSettingsLifecycle { name: Name - indexing_complete?: boolean + indexing_complete?: SpecUtilsStringified origination_date?: long parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep diff --git a/specification/_types/analysis/token_filters.ts b/specification/_types/analysis/token_filters.ts index eddc8c3652..5fe3067feb 100644 --- a/specification/_types/analysis/token_filters.ts +++ b/specification/_types/analysis/token_filters.ts @@ -81,7 +81,7 @@ export class EdgeNGramTokenFilter extends TokenFilterBase { max_gram?: integer min_gram?: integer side?: EdgeNGramSide - preserve_original?: boolean + preserve_original?: Stringified } export class ShingleTokenFilter extends TokenFilterBase { @@ -136,7 +136,7 @@ export class WordDelimiterTokenFilter extends TokenFilterBase { catenate_words?: boolean generate_number_parts?: boolean generate_word_parts?: boolean - preserve_original?: boolean + preserve_original?: Stringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -155,7 +155,7 @@ export class WordDelimiterGraphTokenFilter extends TokenFilterBase { generate_number_parts?: boolean generate_word_parts?: boolean ignore_keywords?: boolean - preserve_original?: boolean + preserve_original?: Stringified protected_words?: string[] protected_words_path?: string split_on_case_change?: boolean @@ -167,7 +167,7 @@ export class WordDelimiterGraphTokenFilter extends TokenFilterBase { export class AsciiFoldingTokenFilter extends TokenFilterBase { type: 'asciifolding' - preserve_original?: boolean + preserve_original?: Stringified } export class CommonGramsTokenFilter extends TokenFilterBase { @@ -260,14 +260,14 @@ export class LowercaseTokenFilter extends TokenFilterBase { export class MultiplexerTokenFilter extends TokenFilterBase { type: 'multiplexer' filters: string[] - preserve_original?: boolean + preserve_original?: Stringified } export class NGramTokenFilter extends TokenFilterBase { type: 'ngram' max_gram?: integer min_gram?: integer - preserve_original?: boolean + preserve_original?: Stringified } export class NoriPartOfSpeechTokenFilter extends TokenFilterBase { @@ -278,7 +278,7 @@ export class NoriPartOfSpeechTokenFilter extends TokenFilterBase { export class PatternCaptureTokenFilter extends TokenFilterBase { type: 'pattern_capture' patterns: string[] - preserve_original?: boolean + preserve_original?: Stringified } export class PatternReplaceTokenFilter extends TokenFilterBase { diff --git a/specification/indices/_types/IndexSettings.ts b/specification/indices/_types/IndexSettings.ts index 4126f37387..fcf8793198 100644 --- a/specification/indices/_types/IndexSettings.ts +++ b/specification/indices/_types/IndexSettings.ts @@ -274,7 +274,7 @@ export class IndexSettingsLifecycle { * You can explicitly set it to skip rollover. * @server_default false */ - indexing_complete?: boolean + indexing_complete?: Stringified /** * If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting * if you create a new index that contains old data and want to use the original creation date to calculate the index