From 78d9059fc7490e9c9374e80e04507a88861bd89a Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Mon, 24 Jun 2024 11:20:27 +0100 Subject: [PATCH 01/57] proxy: update tokio-postgres to allow arbitrary config params (#8076) ## Problem Fixes https://github.com/neondatabase/neon/issues/1287 ## Summary of changes tokio-postgres now supports arbitrary server params through the `param(key, value)` method. Some keys are special so we explicitly filter them out. --- Cargo.lock | 8 +- libs/postgres_connection/src/lib.rs | 50 +++++----- proxy/src/compute.rs | 129 ++++++++++++++------------ proxy/src/serverless/backend.rs | 4 + proxy/src/serverless/sql_over_http.rs | 1 + test_runner/regress/test_proxy.py | 19 ++++ 6 files changed, 119 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 77bf01240273..70c837c14645 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4005,7 +4005,7 @@ dependencies = [ [[package]] name = "postgres" version = "0.19.4" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2" +source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#cff6927e4f58b1af6ecc2ee7279df1f2ff537295" dependencies = [ "bytes", "fallible-iterator", @@ -4018,7 +4018,7 @@ dependencies = [ [[package]] name = "postgres-protocol" version = "0.6.4" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2" +source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#cff6927e4f58b1af6ecc2ee7279df1f2ff537295" dependencies = [ "base64 0.20.0", "byteorder", @@ -4037,7 +4037,7 @@ dependencies = [ [[package]] name = "postgres-types" version = "0.2.4" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2" +source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#cff6927e4f58b1af6ecc2ee7279df1f2ff537295" dependencies = [ "bytes", "fallible-iterator", @@ -6210,7 +6210,7 @@ dependencies = [ [[package]] name = "tokio-postgres" version = "0.7.7" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2" +source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#cff6927e4f58b1af6ecc2ee7279df1f2ff537295" dependencies = [ "async-trait", "byteorder", diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs index 9f57f3d50750..fdabcbacb245 100644 --- a/libs/postgres_connection/src/lib.rs +++ b/libs/postgres_connection/src/lib.rs @@ -144,20 +144,7 @@ impl PgConnectionConfig { // implement and this function is hardly a bottleneck. The function is only called around // establishing a new connection. #[allow(unstable_name_collisions)] - config.options( - &self - .options - .iter() - .map(|s| { - if s.contains(['\\', ' ']) { - Cow::Owned(s.replace('\\', "\\\\").replace(' ', "\\ ")) - } else { - Cow::Borrowed(s.as_str()) - } - }) - .intersperse(Cow::Borrowed(" ")) // TODO: use impl from std once it's stabilized - .collect::(), - ); + config.options(&encode_options(&self.options)); } config } @@ -178,6 +165,21 @@ impl PgConnectionConfig { } } +#[allow(unstable_name_collisions)] +fn encode_options(options: &[String]) -> String { + options + .iter() + .map(|s| { + if s.contains(['\\', ' ']) { + Cow::Owned(s.replace('\\', "\\\\").replace(' ', "\\ ")) + } else { + Cow::Borrowed(s.as_str()) + } + }) + .intersperse(Cow::Borrowed(" ")) // TODO: use impl from std once it's stabilized + .collect::() +} + impl fmt::Display for PgConnectionConfig { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // The password is intentionally hidden and not part of this display string. @@ -206,7 +208,7 @@ impl fmt::Debug for PgConnectionConfig { #[cfg(test)] mod tests_pg_connection_config { - use crate::PgConnectionConfig; + use crate::{encode_options, PgConnectionConfig}; use once_cell::sync::Lazy; use url::Host; @@ -255,18 +257,12 @@ mod tests_pg_connection_config { #[test] fn test_with_options() { - let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123).extend_options([ - "hello", - "world", - "with space", - "and \\ backslashes", + let options = encode_options(&[ + "hello".to_owned(), + "world".to_owned(), + "with space".to_owned(), + "and \\ backslashes".to_owned(), ]); - assert_eq!(cfg.host(), &*STUB_HOST); - assert_eq!(cfg.port(), 123); - assert_eq!(cfg.raw_address(), "stub.host.example:123"); - assert_eq!( - cfg.to_tokio_postgres_config().get_options(), - Some("hello world with\\ space and\\ \\\\\\ backslashes") - ); + assert_eq!(options, "hello world with\\ space and\\ \\\\\\ backslashes"); } } diff --git a/proxy/src/compute.rs b/proxy/src/compute.rs index feb09d563896..a50a96e5e844 100644 --- a/proxy/src/compute.rs +++ b/proxy/src/compute.rs @@ -103,12 +103,8 @@ impl ConnCfg { /// Reuse password or auth keys from the other config. pub fn reuse_password(&mut self, other: Self) { - if let Some(password) = other.get_password() { - self.password(password); - } - - if let Some(keys) = other.get_auth_keys() { - self.auth_keys(keys); + if let Some(password) = other.get_auth() { + self.auth(password); } } @@ -124,48 +120,64 @@ impl ConnCfg { /// Apply startup message params to the connection config. pub fn set_startup_params(&mut self, params: &StartupMessageParams) { - // Only set `user` if it's not present in the config. - // Link auth flow takes username from the console's response. - if let (None, Some(user)) = (self.get_user(), params.get("user")) { - self.user(user); - } - - // Only set `dbname` if it's not present in the config. - // Link auth flow takes dbname from the console's response. - if let (None, Some(dbname)) = (self.get_dbname(), params.get("database")) { - self.dbname(dbname); - } - - // Don't add `options` if they were only used for specifying a project. - // Connection pools don't support `options`, because they affect backend startup. - if let Some(options) = filtered_options(params) { - self.options(&options); - } - - if let Some(app_name) = params.get("application_name") { - self.application_name(app_name); - } - - // TODO: This is especially ugly... - if let Some(replication) = params.get("replication") { - use tokio_postgres::config::ReplicationMode; - match replication { - "true" | "on" | "yes" | "1" => { - self.replication_mode(ReplicationMode::Physical); + let mut client_encoding = false; + for (k, v) in params.iter() { + match k { + "user" => { + // Only set `user` if it's not present in the config. + // Link auth flow takes username from the console's response. + if self.get_user().is_none() { + self.user(v); + } } "database" => { - self.replication_mode(ReplicationMode::Logical); + // Only set `dbname` if it's not present in the config. + // Link auth flow takes dbname from the console's response. + if self.get_dbname().is_none() { + self.dbname(v); + } + } + "options" => { + // Don't add `options` if they were only used for specifying a project. + // Connection pools don't support `options`, because they affect backend startup. + if let Some(options) = filtered_options(v) { + self.options(&options); + } + } + + // the special ones in tokio-postgres that we don't want being set by the user + "dbname" => {} + "password" => {} + "sslmode" => {} + "host" => {} + "port" => {} + "connect_timeout" => {} + "keepalives" => {} + "keepalives_idle" => {} + "keepalives_interval" => {} + "keepalives_retries" => {} + "target_session_attrs" => {} + "channel_binding" => {} + "max_backend_message_size" => {} + + "client_encoding" => { + client_encoding = true; + // only error should be from bad null bytes, + // but we've already checked for those. + _ = self.param("client_encoding", v); + } + + _ => { + // only error should be from bad null bytes, + // but we've already checked for those. + _ = self.param(k, v); } - _other => {} } } - - // TODO: extend the list of the forwarded startup parameters. - // Currently, tokio-postgres doesn't allow us to pass - // arbitrary parameters, but the ones above are a good start. - // - // This and the reverse params problem can be better addressed - // in a bespoke connection machinery (a new library for that sake). + if !client_encoding { + // for compatibility since we removed it from tokio-postgres + self.param("client_encoding", "UTF8").unwrap(); + } } } @@ -338,10 +350,9 @@ impl ConnCfg { } /// Retrieve `options` from a startup message, dropping all proxy-secific flags. -fn filtered_options(params: &StartupMessageParams) -> Option { +fn filtered_options(options: &str) -> Option { #[allow(unstable_name_collisions)] - let options: String = params - .options_raw()? + let options: String = StartupMessageParams::parse_options_raw(options) .filter(|opt| parse_endpoint_param(opt).is_none() && neon_option(opt).is_none()) .intersperse(" ") // TODO: use impl from std once it's stabilized .collect(); @@ -413,27 +424,23 @@ mod tests { #[test] fn test_filtered_options() { // Empty options is unlikely to be useful anyway. - let params = StartupMessageParams::new([("options", "")]); - assert_eq!(filtered_options(¶ms), None); + assert_eq!(filtered_options(""), None); // It's likely that clients will only use options to specify endpoint/project. - let params = StartupMessageParams::new([("options", "project=foo")]); - assert_eq!(filtered_options(¶ms), None); + let params = "project=foo"; + assert_eq!(filtered_options(params), None); // Same, because unescaped whitespaces are no-op. - let params = StartupMessageParams::new([("options", " project=foo ")]); - assert_eq!(filtered_options(¶ms).as_deref(), None); + let params = " project=foo "; + assert_eq!(filtered_options(params), None); - let params = StartupMessageParams::new([("options", r"\ project=foo \ ")]); - assert_eq!(filtered_options(¶ms).as_deref(), Some(r"\ \ ")); + let params = r"\ project=foo \ "; + assert_eq!(filtered_options(params).as_deref(), Some(r"\ \ ")); - let params = StartupMessageParams::new([("options", "project = foo")]); - assert_eq!(filtered_options(¶ms).as_deref(), Some("project = foo")); + let params = "project = foo"; + assert_eq!(filtered_options(params).as_deref(), Some("project = foo")); - let params = StartupMessageParams::new([( - "options", - "project = foo neon_endpoint_type:read_write neon_lsn:0/2", - )]); - assert_eq!(filtered_options(¶ms).as_deref(), Some("project = foo")); + let params = "project = foo neon_endpoint_type:read_write neon_lsn:0/2"; + assert_eq!(filtered_options(params).as_deref(), Some("project = foo")); } } diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 86e64c0a386d..05d60612385c 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -231,6 +231,10 @@ impl ConnectMechanism for TokioMechanism { .dbname(&self.conn_info.dbname) .connect_timeout(timeout); + config + .param("client_encoding", "UTF8") + .expect("client encoding UTF8 is always valid"); + let pause = ctx.latency_timer.pause(crate::metrics::Waiting::Compute); let res = config.connect(tokio_postgres::NoTls).await; drop(pause); diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 7a99aeb75938..583ff75f7ca7 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -202,6 +202,7 @@ fn get_conn_info( options = Some(NeonOptions::parse_options_raw(&value)); } } + ctx.set_db_options(params.freeze()); let user_info = ComputeUserInfo { endpoint, diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index f446f4f200d7..8ed44b109442 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -53,6 +53,25 @@ def test_proxy_select_1(static_proxy: NeonProxy): assert out[0][0] == 42 +def test_proxy_server_params(static_proxy: NeonProxy): + """ + Test that server params are passing through to postgres + """ + + out = static_proxy.safe_psql( + "select to_json('0 seconds'::interval)", options="-c intervalstyle=iso_8601" + ) + assert out[0][0] == "PT0S" + out = static_proxy.safe_psql( + "select to_json('0 seconds'::interval)", options="-c intervalstyle=sql_standard" + ) + assert out[0][0] == "0" + out = static_proxy.safe_psql( + "select to_json('0 seconds'::interval)", options="-c intervalstyle=postgres" + ) + assert out[0][0] == "00:00:00" + + def test_password_hack(static_proxy: NeonProxy): """ Check the PasswordHack auth flow: an alternative to SCRAM auth for From 5446e08891bd58a598aa427cb6208806154e3b41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Mon, 24 Jun 2024 12:29:54 +0200 Subject: [PATCH 02/57] Move remote_storage config related code into dedicated module (#8132) Moves `RemoteStorageConfig` and related structs and functions into a dedicated module. Also implements `Serialize` for the config structs (requested in #8126). Follow-up of #8126 --- libs/remote_storage/src/azure_blob.rs | 2 +- libs/remote_storage/src/config.rs | 277 ++++++++++++++++++++++++++ libs/remote_storage/src/lib.rs | 254 +---------------------- libs/remote_storage/src/s3_bucket.rs | 4 +- 4 files changed, 285 insertions(+), 252 deletions(-) create mode 100644 libs/remote_storage/src/config.rs diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index dbd64fb5a631..8e590b17c486 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -34,7 +34,7 @@ use utils::backoff; use crate::metrics::{start_measuring_requests, AttemptOutcome, RequestKind}; use crate::{ - error::Cancelled, AzureConfig, ConcurrencyLimiter, Download, DownloadError, Listing, + config::AzureConfig, error::Cancelled, ConcurrencyLimiter, Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, StorageMetadata, TimeTravelError, TimeoutOrCancel, }; diff --git a/libs/remote_storage/src/config.rs b/libs/remote_storage/src/config.rs new file mode 100644 index 000000000000..8a8f6212e99b --- /dev/null +++ b/libs/remote_storage/src/config.rs @@ -0,0 +1,277 @@ +use std::{fmt::Debug, num::NonZeroUsize, str::FromStr, time::Duration}; + +use anyhow::bail; +use aws_sdk_s3::types::StorageClass; +use camino::Utf8PathBuf; + +use serde::{Deserialize, Serialize}; + +use crate::{ + DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT, + DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT, +}; + +/// External backup storage configuration, enough for creating a client for that storage. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct RemoteStorageConfig { + /// The storage connection configuration. + #[serde(flatten)] + pub storage: RemoteStorageKind, + /// A common timeout enforced for all requests after concurrency limiter permit has been + /// acquired. + #[serde( + with = "humantime_serde", + default = "default_timeout", + skip_serializing_if = "is_default_timeout" + )] + pub timeout: Duration, +} + +fn default_timeout() -> Duration { + RemoteStorageConfig::DEFAULT_TIMEOUT +} + +fn is_default_timeout(d: &Duration) -> bool { + *d == RemoteStorageConfig::DEFAULT_TIMEOUT +} + +/// A kind of a remote storage to connect to, with its connection configuration. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(untagged)] +pub enum RemoteStorageKind { + /// Storage based on local file system. + /// Specify a root folder to place all stored files into. + LocalFs { local_path: Utf8PathBuf }, + /// AWS S3 based storage, storing all files in the S3 bucket + /// specified by the config + AwsS3(S3Config), + /// Azure Blob based storage, storing all files in the container + /// specified by the config + AzureContainer(AzureConfig), +} + +/// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write). +#[derive(Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct S3Config { + /// Name of the bucket to connect to. + pub bucket_name: String, + /// The region where the bucket is located at. + pub bucket_region: String, + /// A "subfolder" in the bucket, to use the same bucket separately by multiple remote storage users at once. + pub prefix_in_bucket: Option, + /// A base URL to send S3 requests to. + /// By default, the endpoint is derived from a region name, assuming it's + /// an AWS S3 region name, erroring on wrong region name. + /// Endpoint provides a way to support other S3 flavors and their regions. + /// + /// Example: `http://127.0.0.1:5000` + pub endpoint: Option, + /// AWS S3 has various limits on its API calls, we need not to exceed those. + /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details. + #[serde(default = "default_remote_storage_s3_concurrency_limit")] + pub concurrency_limit: NonZeroUsize, + #[serde(default = "default_max_keys_per_list_response")] + pub max_keys_per_list_response: Option, + #[serde( + deserialize_with = "deserialize_storage_class", + serialize_with = "serialize_storage_class", + default + )] + pub upload_storage_class: Option, +} + +fn default_remote_storage_s3_concurrency_limit() -> NonZeroUsize { + DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT + .try_into() + .unwrap() +} + +fn default_max_keys_per_list_response() -> Option { + DEFAULT_MAX_KEYS_PER_LIST_RESPONSE +} + +impl Debug for S3Config { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("S3Config") + .field("bucket_name", &self.bucket_name) + .field("bucket_region", &self.bucket_region) + .field("prefix_in_bucket", &self.prefix_in_bucket) + .field("concurrency_limit", &self.concurrency_limit) + .field( + "max_keys_per_list_response", + &self.max_keys_per_list_response, + ) + .finish() + } +} + +/// Azure bucket coordinates and access credentials to manage the bucket contents (read and write). +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct AzureConfig { + /// Name of the container to connect to. + pub container_name: String, + /// Name of the storage account the container is inside of + pub storage_account: Option, + /// The region where the bucket is located at. + pub container_region: String, + /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once. + pub prefix_in_container: Option, + /// Azure has various limits on its API calls, we need not to exceed those. + /// See [`DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT`] for more details. + #[serde(default = "default_remote_storage_azure_concurrency_limit")] + pub concurrency_limit: NonZeroUsize, + #[serde(default = "default_max_keys_per_list_response")] + pub max_keys_per_list_response: Option, +} + +fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize { + NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap() +} + +impl Debug for AzureConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AzureConfig") + .field("bucket_name", &self.container_name) + .field("storage_account", &self.storage_account) + .field("bucket_region", &self.container_region) + .field("prefix_in_container", &self.prefix_in_container) + .field("concurrency_limit", &self.concurrency_limit) + .field( + "max_keys_per_list_response", + &self.max_keys_per_list_response, + ) + .finish() + } +} + +fn deserialize_storage_class<'de, D: serde::Deserializer<'de>>( + deserializer: D, +) -> Result, D::Error> { + Option::::deserialize(deserializer).and_then(|s| { + if let Some(s) = s { + use serde::de::Error; + let storage_class = StorageClass::from_str(&s).expect("infallible"); + #[allow(deprecated)] + if matches!(storage_class, StorageClass::Unknown(_)) { + return Err(D::Error::custom(format!( + "Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", + StorageClass::values() + ))); + } + Ok(Some(storage_class)) + } else { + Ok(None) + } + }) +} + +fn serialize_storage_class( + val: &Option, + serializer: S, +) -> Result { + let val = val.as_ref().map(StorageClass::as_str); + Option::<&str>::serialize(&val, serializer) +} + +impl RemoteStorageConfig { + pub const DEFAULT_TIMEOUT: Duration = std::time::Duration::from_secs(120); + + pub fn from_toml(toml: &toml_edit::Item) -> anyhow::Result> { + let document: toml_edit::Document = match toml { + toml_edit::Item::Table(toml) => toml.clone().into(), + toml_edit::Item::Value(toml_edit::Value::InlineTable(toml)) => { + toml.clone().into_table().into() + } + _ => bail!("toml not a table or inline table"), + }; + + if document.is_empty() { + return Ok(None); + } + + Ok(Some(toml_edit::de::from_document(document)?)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn parse(input: &str) -> anyhow::Result> { + let toml = input.parse::().unwrap(); + RemoteStorageConfig::from_toml(toml.as_item()) + } + + #[test] + fn parse_localfs_config_with_timeout() { + let input = "local_path = '.' +timeout = '5s'"; + + let config = parse(input).unwrap().expect("it exists"); + + assert_eq!( + config, + RemoteStorageConfig { + storage: RemoteStorageKind::LocalFs { + local_path: Utf8PathBuf::from(".") + }, + timeout: Duration::from_secs(5) + } + ); + } + + #[test] + fn test_s3_parsing() { + let toml = "\ + bucket_name = 'foo-bar' + bucket_region = 'eu-central-1' + upload_storage_class = 'INTELLIGENT_TIERING' + timeout = '7s' + "; + + let config = parse(toml).unwrap().expect("it exists"); + + assert_eq!( + config, + RemoteStorageConfig { + storage: RemoteStorageKind::AwsS3(S3Config { + bucket_name: "foo-bar".into(), + bucket_region: "eu-central-1".into(), + prefix_in_bucket: None, + endpoint: None, + concurrency_limit: default_remote_storage_s3_concurrency_limit(), + max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, + upload_storage_class: Some(StorageClass::IntelligentTiering), + }), + timeout: Duration::from_secs(7) + } + ); + } + + #[test] + fn test_azure_parsing() { + let toml = "\ + container_name = 'foo-bar' + container_region = 'westeurope' + upload_storage_class = 'INTELLIGENT_TIERING' + timeout = '7s' + "; + + let config = parse(toml).unwrap().expect("it exists"); + + assert_eq!( + config, + RemoteStorageConfig { + storage: RemoteStorageKind::AzureContainer(AzureConfig { + container_name: "foo-bar".into(), + storage_account: None, + container_region: "westeurope".into(), + prefix_in_container: None, + concurrency_limit: default_remote_storage_azure_concurrency_limit(), + max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, + }), + timeout: Duration::from_secs(7) + } + ); + } +} diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index e39ac581c758..d440c03a0e65 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -10,6 +10,7 @@ #![deny(clippy::undocumented_unsafe_blocks)] mod azure_blob; +mod config; mod error; mod local_fs; mod metrics; @@ -18,17 +19,10 @@ mod simulate_failures; mod support; use std::{ - collections::HashMap, - fmt::Debug, - num::{NonZeroU32, NonZeroUsize}, - pin::Pin, - str::FromStr, - sync::Arc, - time::{Duration, SystemTime}, + collections::HashMap, fmt::Debug, num::NonZeroU32, pin::Pin, sync::Arc, time::SystemTime, }; -use anyhow::{bail, Context}; -use aws_sdk_s3::types::StorageClass; +use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use bytes::Bytes; @@ -44,6 +38,8 @@ pub use self::{ }; use s3_bucket::RequestKind; +pub use crate::config::{AzureConfig, RemoteStorageConfig, RemoteStorageKind, S3Config}; + /// Azure SDK's ETag type is a simple String wrapper: we use this internally instead of repeating it here. pub use azure_core::Etag; @@ -525,168 +521,6 @@ impl From<[(&str, &str); N]> for StorageMetadata { } } -/// External backup storage configuration, enough for creating a client for that storage. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -pub struct RemoteStorageConfig { - /// The storage connection configuration. - #[serde(flatten)] - pub storage: RemoteStorageKind, - /// A common timeout enforced for all requests after concurrency limiter permit has been - /// acquired. - #[serde(with = "humantime_serde", default = "default_timeout")] - pub timeout: Duration, -} - -fn default_timeout() -> Duration { - RemoteStorageConfig::DEFAULT_TIMEOUT -} - -/// A kind of a remote storage to connect to, with its connection configuration. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(untagged)] -pub enum RemoteStorageKind { - /// Storage based on local file system. - /// Specify a root folder to place all stored files into. - LocalFs { local_path: Utf8PathBuf }, - /// AWS S3 based storage, storing all files in the S3 bucket - /// specified by the config - AwsS3(S3Config), - /// Azure Blob based storage, storing all files in the container - /// specified by the config - AzureContainer(AzureConfig), -} - -/// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write). -#[derive(Clone, PartialEq, Eq, serde::Deserialize)] -pub struct S3Config { - /// Name of the bucket to connect to. - pub bucket_name: String, - /// The region where the bucket is located at. - pub bucket_region: String, - /// A "subfolder" in the bucket, to use the same bucket separately by multiple remote storage users at once. - pub prefix_in_bucket: Option, - /// A base URL to send S3 requests to. - /// By default, the endpoint is derived from a region name, assuming it's - /// an AWS S3 region name, erroring on wrong region name. - /// Endpoint provides a way to support other S3 flavors and their regions. - /// - /// Example: `http://127.0.0.1:5000` - pub endpoint: Option, - /// AWS S3 has various limits on its API calls, we need not to exceed those. - /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details. - #[serde(default = "default_remote_storage_s3_concurrency_limit")] - pub concurrency_limit: NonZeroUsize, - #[serde(default = "default_max_keys_per_list_response")] - pub max_keys_per_list_response: Option, - #[serde(deserialize_with = "deserialize_storage_class", default)] - pub upload_storage_class: Option, -} - -fn default_remote_storage_s3_concurrency_limit() -> NonZeroUsize { - DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT - .try_into() - .unwrap() -} - -fn default_max_keys_per_list_response() -> Option { - DEFAULT_MAX_KEYS_PER_LIST_RESPONSE -} - -impl Debug for S3Config { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("S3Config") - .field("bucket_name", &self.bucket_name) - .field("bucket_region", &self.bucket_region) - .field("prefix_in_bucket", &self.prefix_in_bucket) - .field("concurrency_limit", &self.concurrency_limit) - .field( - "max_keys_per_list_response", - &self.max_keys_per_list_response, - ) - .finish() - } -} - -/// Azure bucket coordinates and access credentials to manage the bucket contents (read and write). -#[derive(Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] -pub struct AzureConfig { - /// Name of the container to connect to. - pub container_name: String, - /// Name of the storage account the container is inside of - pub storage_account: Option, - /// The region where the bucket is located at. - pub container_region: String, - /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once. - pub prefix_in_container: Option, - /// Azure has various limits on its API calls, we need not to exceed those. - /// See [`DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT`] for more details. - #[serde(default = "default_remote_storage_azure_concurrency_limit")] - pub concurrency_limit: NonZeroUsize, - #[serde(default = "default_max_keys_per_list_response")] - pub max_keys_per_list_response: Option, -} - -fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize { - NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap() -} - -impl Debug for AzureConfig { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AzureConfig") - .field("bucket_name", &self.container_name) - .field("storage_account", &self.storage_account) - .field("bucket_region", &self.container_region) - .field("prefix_in_container", &self.prefix_in_container) - .field("concurrency_limit", &self.concurrency_limit) - .field( - "max_keys_per_list_response", - &self.max_keys_per_list_response, - ) - .finish() - } -} - -fn deserialize_storage_class<'de, D: serde::Deserializer<'de>>( - deserializer: D, -) -> Result, D::Error> { - Option::::deserialize(deserializer).and_then(|s| { - if let Some(s) = s { - use serde::de::Error; - let storage_class = StorageClass::from_str(&s).expect("infallible"); - #[allow(deprecated)] - if matches!(storage_class, StorageClass::Unknown(_)) { - return Err(D::Error::custom(format!( - "Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", - StorageClass::values() - ))); - } - Ok(Some(storage_class)) - } else { - Ok(None) - } - }) -} - -impl RemoteStorageConfig { - pub const DEFAULT_TIMEOUT: Duration = std::time::Duration::from_secs(120); - - pub fn from_toml(toml: &toml_edit::Item) -> anyhow::Result> { - let document: toml_edit::Document = match toml { - toml_edit::Item::Table(toml) => toml.clone().into(), - toml_edit::Item::Value(toml_edit::Value::InlineTable(toml)) => { - toml.clone().into_table().into() - } - _ => bail!("toml not a table or inline table"), - }; - - if document.is_empty() { - return Ok(None); - } - - Ok(Some(toml_edit::de::from_document(document)?)) - } -} - struct ConcurrencyLimiter { // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded. // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold. @@ -733,11 +567,6 @@ impl ConcurrencyLimiter { mod tests { use super::*; - fn parse(input: &str) -> anyhow::Result> { - let toml = input.parse::().unwrap(); - RemoteStorageConfig::from_toml(toml.as_item()) - } - #[test] fn test_object_name() { let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap(); @@ -759,77 +588,4 @@ mod tests { let err = RemotePath::new(Utf8Path::new("/")).expect_err("Should fail on absolute paths"); assert_eq!(err.to_string(), "Path \"/\" is not relative"); } - - #[test] - fn parse_localfs_config_with_timeout() { - let input = "local_path = '.' -timeout = '5s'"; - - let config = parse(input).unwrap().expect("it exists"); - - assert_eq!( - config, - RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs { - local_path: Utf8PathBuf::from(".") - }, - timeout: Duration::from_secs(5) - } - ); - } - - #[test] - fn test_s3_parsing() { - let toml = "\ - bucket_name = 'foo-bar' - bucket_region = 'eu-central-1' - upload_storage_class = 'INTELLIGENT_TIERING' - timeout = '7s' - "; - - let config = parse(toml).unwrap().expect("it exists"); - - assert_eq!( - config, - RemoteStorageConfig { - storage: RemoteStorageKind::AwsS3(S3Config { - bucket_name: "foo-bar".into(), - bucket_region: "eu-central-1".into(), - prefix_in_bucket: None, - endpoint: None, - concurrency_limit: default_remote_storage_s3_concurrency_limit(), - max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, - upload_storage_class: Some(StorageClass::IntelligentTiering), - }), - timeout: Duration::from_secs(7) - } - ); - } - - #[test] - fn test_azure_parsing() { - let toml = "\ - container_name = 'foo-bar' - container_region = 'westeurope' - upload_storage_class = 'INTELLIGENT_TIERING' - timeout = '7s' - "; - - let config = parse(toml).unwrap().expect("it exists"); - - assert_eq!( - config, - RemoteStorageConfig { - storage: RemoteStorageKind::AzureContainer(AzureConfig { - container_name: "foo-bar".into(), - storage_account: None, - container_region: "westeurope".into(), - prefix_in_container: None, - concurrency_limit: default_remote_storage_azure_concurrency_limit(), - max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, - }), - timeout: Duration::from_secs(7) - } - ); - } } diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index 76cf3eac80ef..ef1bd2c04730 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -46,12 +46,12 @@ use utils::backoff; use super::StorageMetadata; use crate::{ + config::S3Config, error::Cancelled, metrics::{start_counting_cancelled_wait, start_measuring_requests}, support::PermitCarrying, ConcurrencyLimiter, Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, - S3Config, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE, - REMOTE_STORAGE_PREFIX_SEPARATOR, + TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE, REMOTE_STORAGE_PREFIX_SEPARATOR, }; use crate::metrics::AttemptOutcome; From 188797f0486adb53b24edac39929e36bffdfe1b3 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 24 Jun 2024 11:41:11 +0100 Subject: [PATCH 03/57] pageserver: remove code that resumes tenant deletions after restarts (#8091) #8082 removed the legacy deletion path, but retained code for completing deletions that were started before a pageserver restart. This PR cleans up that remaining code, and removes all the pageserver code that dealt with tenant deletion markers and resuming tenant deletions. The release at https://github.com/neondatabase/neon/pull/8138 contains https://github.com/neondatabase/neon/pull/8082, so we can now merge this to `main` --- pageserver/src/config.rs | 12 +- pageserver/src/http/routes.rs | 9 +- pageserver/src/tenant.rs | 69 +--- pageserver/src/tenant/delete.rs | 426 ----------------------- pageserver/src/tenant/mgr.rs | 54 +-- pageserver/src/tenant/timeline/delete.rs | 5 - 6 files changed, 22 insertions(+), 553 deletions(-) delete mode 100644 pageserver/src/tenant/delete.rs diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index feb136384325..104234841c82 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -33,9 +33,7 @@ use utils::{ use crate::tenant::timeline::GetVectoredImpl; use crate::tenant::vectored_blob_io::MaxVectoredReadBytes; use crate::tenant::{config::TenantConfOpt, timeline::GetImpl}; -use crate::tenant::{ - TENANTS_SEGMENT_NAME, TENANT_DELETED_MARKER_FILE_NAME, TIMELINES_SEGMENT_NAME, -}; +use crate::tenant::{TENANTS_SEGMENT_NAME, TIMELINES_SEGMENT_NAME}; use crate::{disk_usage_eviction_task::DiskUsageEvictionTaskConfig, virtual_file::io_engine}; use crate::{tenant::config::TenantConf, virtual_file}; use crate::{ @@ -855,14 +853,6 @@ impl PageServerConf { ) } - pub(crate) fn tenant_deleted_mark_file_path( - &self, - tenant_shard_id: &TenantShardId, - ) -> Utf8PathBuf { - self.tenant_path(tenant_shard_id) - .join(TENANT_DELETED_MARKER_FILE_NAME) - } - pub fn traces_path(&self) -> Utf8PathBuf { self.workdir.join("traces") } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index b5713a8cb441..cfa507fed000 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -329,14 +329,11 @@ impl From for ApiError { } } -impl From for ApiError { - fn from(value: crate::tenant::delete::DeleteTenantError) -> Self { - use crate::tenant::delete::DeleteTenantError::*; +impl From for ApiError { + fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self { + use crate::tenant::mgr::DeleteTenantError::*; match value { - Get(g) => ApiError::from(g), - Timeline(t) => ApiError::from(t), SlotError(e) => e.into(), - SlotUpsertError(e) => e.into(), Other(o) => ApiError::InternalServerError(o), Cancelled => ApiError::ShuttingDown, } diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index ace95af10ac3..6a748f61e7e1 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -55,11 +55,9 @@ use self::config::AttachedLocationConfig; use self::config::AttachmentMode; use self::config::LocationConf; use self::config::TenantConf; -use self::delete::DeleteTenantFlow; use self::metadata::TimelineMetadata; use self::mgr::GetActiveTenantError; use self::mgr::GetTenantError; -use self::mgr::TenantsMap; use self::remote_timeline_client::upload::upload_index_part; use self::remote_timeline_client::RemoteTimelineClient; use self::timeline::uninit::TimelineCreateGuard; @@ -137,7 +135,6 @@ pub mod remote_timeline_client; pub mod storage_layer; pub mod config; -pub mod delete; pub mod mgr; pub mod secondary; pub mod tasks; @@ -161,8 +158,6 @@ pub const TENANTS_SEGMENT_NAME: &str = "tenants"; /// Parts of the `.neon/tenants//timelines/` directory prefix. pub const TIMELINES_SEGMENT_NAME: &str = "timelines"; -pub const TENANT_DELETED_MARKER_FILE_NAME: &str = "deleted"; - /// References to shared objects that are passed into each tenant, such /// as the shared remote storage client and process initialization state. #[derive(Clone)] @@ -207,7 +202,6 @@ struct TimelinePreload { } pub(crate) struct TenantPreload { - deleting: bool, timelines: HashMap, } @@ -286,8 +280,6 @@ pub struct Tenant { /// background warmup. pub(crate) activate_now_sem: tokio::sync::Semaphore, - pub(crate) delete_progress: Arc>, - // Cancellation token fires when we have entered shutdown(). This is a parent of // Timelines' cancellation token. pub(crate) cancel: CancellationToken, @@ -654,7 +646,6 @@ impl Tenant { attached_conf: AttachedTenantConf, shard_identity: ShardIdentity, init_order: Option, - tenants: &'static std::sync::RwLock, mode: SpawnMode, ctx: &RequestContext, ) -> anyhow::Result> { @@ -828,52 +819,6 @@ impl Tenant { // Remote preload is complete. drop(remote_load_completion); - let pending_deletion = { - match DeleteTenantFlow::should_resume_deletion( - conf, - preload.as_ref().map(|p| p.deleting).unwrap_or(false), - &tenant_clone, - ) - .await - { - Ok(should_resume_deletion) => should_resume_deletion, - Err(err) => { - make_broken(&tenant_clone, anyhow::anyhow!(err), BrokenVerbosity::Error); - return Ok(()); - } - } - }; - - info!("pending_deletion {}", pending_deletion.is_some()); - - if let Some(deletion) = pending_deletion { - // as we are no longer loading, signal completion by dropping - // the completion while we resume deletion - drop(_completion); - let background_jobs_can_start = - init_order.as_ref().map(|x| &x.background_jobs_can_start); - if let Some(background) = background_jobs_can_start { - info!("waiting for backgound jobs barrier"); - background.clone().wait().await; - info!("ready for backgound jobs barrier"); - } - - let deleted = DeleteTenantFlow::resume_from_attach( - deletion, - &tenant_clone, - preload, - tenants, - &ctx, - ) - .await; - - if let Err(e) = deleted { - make_broken(&tenant_clone, anyhow::anyhow!(e), BrokenVerbosity::Error); - } - - return Ok(()); - } - // We will time the duration of the attach phase unless this is a creation (attach will do no work) let attached = { let _attach_timer = match mode { @@ -931,21 +876,13 @@ impl Tenant { ) .await?; - let deleting = other_keys.contains(TENANT_DELETED_MARKER_FILE_NAME); - info!( - "found {} timelines, deleting={}", - remote_timeline_ids.len(), - deleting - ); + info!("found {} timelines", remote_timeline_ids.len(),); for k in other_keys { - if k != TENANT_DELETED_MARKER_FILE_NAME { - warn!("Unexpected non timeline key {k}"); - } + warn!("Unexpected non timeline key {k}"); } Ok(TenantPreload { - deleting, timelines: Self::load_timeline_metadata( self, remote_timeline_ids, @@ -974,7 +911,6 @@ impl Tenant { let preload = match (preload, mode) { (Some(p), _) => p, (None, SpawnMode::Create) => TenantPreload { - deleting: false, timelines: HashMap::new(), }, (None, _) => { @@ -2628,7 +2564,6 @@ impl Tenant { cached_synthetic_tenant_size: Arc::new(AtomicU64::new(0)), eviction_task_tenant_state: tokio::sync::Mutex::new(EvictionTaskTenantState::default()), activate_now_sem: tokio::sync::Semaphore::new(0), - delete_progress: Arc::new(tokio::sync::Mutex::new(DeleteTenantFlow::default())), cancel: CancellationToken::default(), gate: Gate::default(), timeline_get_throttle: Arc::new(throttle::Throttle::new( diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs deleted file mode 100644 index d9da3157b7fb..000000000000 --- a/pageserver/src/tenant/delete.rs +++ /dev/null @@ -1,426 +0,0 @@ -use std::sync::Arc; - -use anyhow::Context; -use camino::{Utf8Path, Utf8PathBuf}; -use pageserver_api::{models::TenantState, shard::TenantShardId}; -use remote_storage::{GenericRemoteStorage, RemotePath, TimeoutOrCancel}; -use tokio::sync::OwnedMutexGuard; -use tokio_util::sync::CancellationToken; -use tracing::{error, Instrument}; - -use utils::{backoff, completion, crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; - -use crate::{ - config::PageServerConf, - context::RequestContext, - task_mgr::{self}, - tenant::{ - mgr::{TenantSlot, TenantsMapRemoveResult}, - remote_timeline_client::remote_heatmap_path, - }, -}; - -use super::{ - mgr::{GetTenantError, TenantSlotError, TenantSlotUpsertError, TenantsMap}, - remote_timeline_client::{FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD}, - timeline::delete::DeleteTimelineFlow, - tree_sort_timelines, DeleteTimelineError, Tenant, TenantPreload, -}; - -#[derive(Debug, thiserror::Error)] -pub(crate) enum DeleteTenantError { - #[error("GetTenant {0}")] - Get(#[from] GetTenantError), - - #[error("Tenant map slot error {0}")] - SlotError(#[from] TenantSlotError), - - #[error("Tenant map slot upsert error {0}")] - SlotUpsertError(#[from] TenantSlotUpsertError), - - #[error("Timeline {0}")] - Timeline(#[from] DeleteTimelineError), - - #[error("Cancelled")] - Cancelled, - - #[error(transparent)] - Other(#[from] anyhow::Error), -} - -type DeletionGuard = tokio::sync::OwnedMutexGuard; - -fn remote_tenant_delete_mark_path( - conf: &PageServerConf, - tenant_shard_id: &TenantShardId, -) -> anyhow::Result { - let tenant_remote_path = conf - .tenant_path(tenant_shard_id) - .strip_prefix(&conf.workdir) - .context("Failed to strip workdir prefix") - .and_then(RemotePath::new) - .context("tenant path")?; - Ok(tenant_remote_path.join(Utf8Path::new("timelines/deleted"))) -} - -async fn schedule_ordered_timeline_deletions( - tenant: &Arc, -) -> Result>, TimelineId)>, DeleteTenantError> { - // Tenant is stopping at this point. We know it will be deleted. - // No new timelines should be created. - // Tree sort timelines to delete from leafs to the root. - // NOTE: by calling clone we release the mutex which creates a possibility for a race: pending deletion - // can complete and remove timeline from the map in between our call to clone - // and `DeleteTimelineFlow::run`, so `run` wont find timeline in `timelines` map. - // timelines.lock is currently synchronous so we cant hold it across await point. - // So just ignore NotFound error if we get it from `run`. - // Beware: in case it becomes async and we try to hold it here, `run` also locks it, which can create a deadlock. - let timelines = tenant.timelines.lock().unwrap().clone(); - let sorted = - tree_sort_timelines(timelines, |t| t.get_ancestor_timeline_id()).context("tree sort")?; - - let mut already_running_deletions = vec![]; - - for (timeline_id, _) in sorted.into_iter().rev() { - let span = tracing::info_span!("timeline_delete", %timeline_id); - let res = DeleteTimelineFlow::run(tenant, timeline_id, true) - .instrument(span) - .await; - if let Err(e) = res { - match e { - DeleteTimelineError::NotFound => { - // Timeline deletion finished after call to clone above but before call - // to `DeleteTimelineFlow::run` and removed timeline from the map. - continue; - } - DeleteTimelineError::AlreadyInProgress(guard) => { - already_running_deletions.push((guard, timeline_id)); - continue; - } - e => return Err(DeleteTenantError::Timeline(e)), - } - } - } - - Ok(already_running_deletions) -} - -async fn ensure_timelines_dir_empty(timelines_path: &Utf8Path) -> Result<(), DeleteTenantError> { - // Assert timelines dir is empty. - if !fs_ext::is_directory_empty(timelines_path).await? { - // Display first 10 items in directory - let list = fs_ext::list_dir(timelines_path).await.context("list_dir")?; - let list = &list.into_iter().take(10).collect::>(); - return Err(DeleteTenantError::Other(anyhow::anyhow!( - "Timelines directory is not empty after all timelines deletion: {list:?}" - ))); - } - - Ok(()) -} - -async fn remove_tenant_remote_delete_mark( - conf: &PageServerConf, - remote_storage: &GenericRemoteStorage, - tenant_shard_id: &TenantShardId, - cancel: &CancellationToken, -) -> Result<(), DeleteTenantError> { - let path = remote_tenant_delete_mark_path(conf, tenant_shard_id)?; - backoff::retry( - || async { remote_storage.delete(&path, cancel).await }, - TimeoutOrCancel::caused_by_cancel, - FAILED_UPLOAD_WARN_THRESHOLD, - FAILED_REMOTE_OP_RETRIES, - "remove_tenant_remote_delete_mark", - cancel, - ) - .await - .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel)) - .and_then(|x| x) - .context("remove_tenant_remote_delete_mark")?; - Ok(()) -} - -// Cleanup fs traces: tenant config, timelines dir local delete mark, tenant dir -async fn cleanup_remaining_fs_traces( - conf: &PageServerConf, - tenant_shard_id: &TenantShardId, -) -> Result<(), DeleteTenantError> { - let rm = |p: Utf8PathBuf, is_dir: bool| async move { - if is_dir { - tokio::fs::remove_dir(&p).await - } else { - tokio::fs::remove_file(&p).await - } - .or_else(fs_ext::ignore_not_found) - .with_context(|| format!("failed to delete {p}")) - }; - - rm(conf.tenant_config_path(tenant_shard_id), false).await?; - rm(conf.tenant_location_config_path(tenant_shard_id), false).await?; - - fail::fail_point!("tenant-delete-before-remove-timelines-dir", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-remove-timelines-dir" - ))? - }); - - rm(conf.timelines_path(tenant_shard_id), true).await?; - - fail::fail_point!("tenant-delete-before-remove-deleted-mark", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-remove-deleted-mark" - ))? - }); - - // Make sure previous deletions are ordered before mark removal. - // Otherwise there is no guarantee that they reach the disk before mark deletion. - // So its possible for mark to reach disk first and for other deletions - // to be reordered later and thus missed if a crash occurs. - // Note that we dont need to sync after mark file is removed - // because we can tolerate the case when mark file reappears on startup. - let tenant_path = &conf.tenant_path(tenant_shard_id); - if tenant_path.exists() { - crashsafe::fsync_async(&conf.tenant_path(tenant_shard_id)) - .await - .context("fsync_pre_mark_remove")?; - } - - rm(conf.tenant_deleted_mark_file_path(tenant_shard_id), false).await?; - - rm(conf.tenant_heatmap_path(tenant_shard_id), false).await?; - - fail::fail_point!("tenant-delete-before-remove-tenant-dir", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-remove-tenant-dir" - ))? - }); - - rm(conf.tenant_path(tenant_shard_id), true).await?; - - Ok(()) -} - -#[derive(Default)] -pub enum DeleteTenantFlow { - #[default] - NotStarted, - InProgress, - Finished, -} - -impl DeleteTenantFlow { - pub(crate) async fn should_resume_deletion( - conf: &'static PageServerConf, - remote_mark_exists: bool, - tenant: &Tenant, - ) -> Result, DeleteTenantError> { - let acquire = |t: &Tenant| { - Some( - Arc::clone(&t.delete_progress) - .try_lock_owned() - .expect("we're the only owner during init"), - ) - }; - - if remote_mark_exists { - return Ok(acquire(tenant)); - } - - // Check local mark first, if its there there is no need to go to s3 to check whether remote one exists. - if conf - .tenant_deleted_mark_file_path(&tenant.tenant_shard_id) - .exists() - { - Ok(acquire(tenant)) - } else { - Ok(None) - } - } - - pub(crate) async fn resume_from_attach( - guard: DeletionGuard, - tenant: &Arc, - preload: Option, - tenants: &'static std::sync::RwLock, - ctx: &RequestContext, - ) -> Result<(), DeleteTenantError> { - let (_, progress) = completion::channel(); - - tenant - .set_stopping(progress, false, true) - .await - .expect("cant be stopping or broken"); - - tenant - .attach(preload, super::SpawnMode::Eager, ctx) - .await - .context("attach")?; - - Self::background( - guard, - tenant.conf, - tenant.remote_storage.clone(), - tenants, - tenant, - ) - .await - } - - async fn background( - mut guard: OwnedMutexGuard, - conf: &PageServerConf, - remote_storage: GenericRemoteStorage, - tenants: &'static std::sync::RwLock, - tenant: &Arc, - ) -> Result<(), DeleteTenantError> { - // Tree sort timelines, schedule delete for them. Mention retries from the console side. - // Note that if deletion fails we dont mark timelines as broken, - // the whole tenant will become broken as by `Self::schedule_background` logic - let already_running_timeline_deletions = schedule_ordered_timeline_deletions(tenant) - .await - .context("schedule_ordered_timeline_deletions")?; - - fail::fail_point!("tenant-delete-before-polling-ongoing-deletions", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-polling-ongoing-deletions" - ))? - }); - - // Wait for deletions that were already running at the moment when tenant deletion was requested. - // When we can lock deletion guard it means that corresponding timeline deletion finished. - for (guard, timeline_id) in already_running_timeline_deletions { - let flow = guard.lock().await; - if !flow.is_finished() { - return Err(DeleteTenantError::Other(anyhow::anyhow!( - "already running timeline deletion failed: {timeline_id}" - ))); - } - } - - // Remove top-level tenant objects that don't belong to a timeline, such as heatmap - let heatmap_path = remote_heatmap_path(&tenant.tenant_shard_id()); - if let Some(Err(e)) = backoff::retry( - || async { - remote_storage - .delete(&heatmap_path, &task_mgr::shutdown_token()) - .await - }, - TimeoutOrCancel::caused_by_cancel, - FAILED_UPLOAD_WARN_THRESHOLD, - FAILED_REMOTE_OP_RETRIES, - "remove_remote_tenant_heatmap", - &task_mgr::shutdown_token(), - ) - .await - { - tracing::warn!("Failed to delete heatmap at {heatmap_path}: {e}"); - } - - let timelines_path = conf.timelines_path(&tenant.tenant_shard_id); - // May not exist if we fail in cleanup_remaining_fs_traces after removing it - if timelines_path.exists() { - // sanity check to guard against layout changes - ensure_timelines_dir_empty(&timelines_path) - .await - .context("timelines dir not empty")?; - } - - remove_tenant_remote_delete_mark( - conf, - &remote_storage, - &tenant.tenant_shard_id, - &task_mgr::shutdown_token(), - ) - .await?; - - pausable_failpoint!("tenant-delete-before-cleanup-remaining-fs-traces-pausable"); - fail::fail_point!("tenant-delete-before-cleanup-remaining-fs-traces", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-cleanup-remaining-fs-traces" - ))? - }); - - cleanup_remaining_fs_traces(conf, &tenant.tenant_shard_id) - .await - .context("cleanup_remaining_fs_traces")?; - - { - // This block is simply removing the TenantSlot for this tenant. It requires a loop because - // we might conflict with a TenantSlot::InProgress marker and need to wait for it. - // - // This complexity will go away when we simplify how deletion works: - // https://github.com/neondatabase/neon/issues/5080 - loop { - // Under the TenantMap lock, try to remove the tenant. We usually succeed, but if - // we encounter an InProgress marker, yield the barrier it contains and wait on it. - let barrier = { - let mut locked = tenants.write().unwrap(); - let removed = locked.remove(tenant.tenant_shard_id); - - // FIXME: we should not be modifying this from outside of mgr.rs. - // This will go away when we simplify deletion (https://github.com/neondatabase/neon/issues/5080) - - // Update stats - match &removed { - TenantsMapRemoveResult::Occupied(slot) => { - crate::metrics::TENANT_MANAGER.slot_removed(slot); - } - TenantsMapRemoveResult::InProgress(barrier) => { - crate::metrics::TENANT_MANAGER - .slot_removed(&TenantSlot::InProgress(barrier.clone())); - } - TenantsMapRemoveResult::Vacant => { - // Nothing changed in map, no metric update - } - } - - match removed { - TenantsMapRemoveResult::Occupied(TenantSlot::Attached(tenant)) => { - match tenant.current_state() { - TenantState::Stopping { .. } | TenantState::Broken { .. } => { - // Expected: we put the tenant into stopping state before we start deleting it - } - state => { - // Unexpected state - tracing::warn!( - "Tenant in unexpected state {state} after deletion" - ); - } - } - break; - } - TenantsMapRemoveResult::Occupied(TenantSlot::Secondary(_)) => { - // This is unexpected: this secondary tenants should not have been created, and we - // are not in a position to shut it down from here. - tracing::warn!("Tenant transitioned to secondary mode while deleting!"); - break; - } - TenantsMapRemoveResult::Occupied(TenantSlot::InProgress(_)) => { - unreachable!("TenantsMap::remove handles InProgress separately, should never return it here"); - } - TenantsMapRemoveResult::Vacant => { - tracing::warn!( - "Tenant removed from TenantsMap before deletion completed" - ); - break; - } - TenantsMapRemoveResult::InProgress(barrier) => { - // An InProgress entry was found, we must wait on its barrier - barrier - } - } - }; - - tracing::info!( - "Waiting for competing operation to complete before deleting state for tenant" - ); - barrier.wait().await; - } - } - - *guard = Self::Finished; - - Ok(()) - } -} diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 326086a3ccdf..4fcdf1405288 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -51,7 +51,6 @@ use utils::fs_ext::PathExt; use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; -use super::delete::DeleteTenantError; use super::remote_timeline_client::remote_tenant_path; use super::secondary::SecondaryTenant; use super::timeline::detach_ancestor::PreparedTimelineDetach; @@ -109,12 +108,6 @@ pub(crate) enum TenantsMap { ShuttingDown(BTreeMap), } -pub(crate) enum TenantsMapRemoveResult { - Occupied(TenantSlot), - Vacant, - InProgress(utils::completion::Barrier), -} - /// When resolving a TenantId to a shard, we may be looking for the 0th /// shard, or we might be looking for whichever shard holds a particular page. #[derive(Copy, Clone)] @@ -191,26 +184,6 @@ impl TenantsMap { } } - /// Only for use from DeleteTenantFlow. This method directly removes a TenantSlot from the map. - /// - /// The normal way to remove a tenant is using a SlotGuard, which will gracefully remove the guarded - /// slot if the enclosed tenant is shutdown. - pub(crate) fn remove(&mut self, tenant_shard_id: TenantShardId) -> TenantsMapRemoveResult { - use std::collections::btree_map::Entry; - match self { - TenantsMap::Initializing => TenantsMapRemoveResult::Vacant, - TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => match m.entry(tenant_shard_id) { - Entry::Occupied(entry) => match entry.get() { - TenantSlot::InProgress(barrier) => { - TenantsMapRemoveResult::InProgress(barrier.clone()) - } - _ => TenantsMapRemoveResult::Occupied(entry.remove()), - }, - Entry::Vacant(_entry) => TenantsMapRemoveResult::Vacant, - }, - } - } - #[cfg(all(debug_assertions, not(test)))] pub(crate) fn len(&self) -> usize { match self { @@ -460,6 +433,18 @@ async fn init_load_tenant_configs( Ok(configs) } +#[derive(Debug, thiserror::Error)] +pub(crate) enum DeleteTenantError { + #[error("Tenant map slot error {0}")] + SlotError(#[from] TenantSlotError), + + #[error("Cancelled")] + Cancelled, + + #[error(transparent)] + Other(#[from] anyhow::Error), +} + /// Initialize repositories with locally available timelines. /// Timelines that are only partially available locally (remote storage has more data than this pageserver) /// are scheduled for download and added to the tenant once download is completed. @@ -629,7 +614,6 @@ pub async fn init_tenant_mgr( AttachedTenantConf::new(location_conf.tenant_conf, attached_conf), shard_identity, Some(init_order.clone()), - &TENANTS, SpawnMode::Lazy, &ctx, ) { @@ -685,7 +669,6 @@ fn tenant_spawn( location_conf: AttachedTenantConf, shard_identity: ShardIdentity, init_order: Option, - tenants: &'static std::sync::RwLock, mode: SpawnMode, ctx: &RequestContext, ) -> anyhow::Result> { @@ -712,7 +695,6 @@ fn tenant_spawn( location_conf, shard_identity, init_order, - tenants, mode, ctx, ) { @@ -1161,7 +1143,6 @@ impl TenantManager { attached_conf, shard_identity, None, - self.tenants, spawn_mode, ctx, )?; @@ -1283,7 +1264,6 @@ impl TenantManager { AttachedTenantConf::try_from(config)?, shard_identity, None, - self.tenants, SpawnMode::Eager, ctx, )?; @@ -1634,7 +1614,7 @@ impl TenantManager { for child_shard_id in &child_shards { let child_shard_id = *child_shard_id; let child_shard = { - let locked = TENANTS.read().unwrap(); + let locked = self.tenants.read().unwrap(); let peek_slot = tenant_map_peek_slot(&locked, &child_shard_id, TenantSlotPeekMode::Read)?; peek_slot.and_then(|s| s.get_attached()).cloned() @@ -1866,7 +1846,7 @@ impl TenantManager { deletion_queue_client: &DeletionQueueClient, ) -> Result<(), TenantStateError> { let tmp_path = self - .detach_tenant0(conf, &TENANTS, tenant_shard_id, deletion_queue_client) + .detach_tenant0(conf, tenant_shard_id, deletion_queue_client) .await?; spawn_background_purge(tmp_path); @@ -1876,7 +1856,6 @@ impl TenantManager { async fn detach_tenant0( &self, conf: &'static PageServerConf, - tenants: &std::sync::RwLock, tenant_shard_id: TenantShardId, deletion_queue_client: &DeletionQueueClient, ) -> Result { @@ -1890,7 +1869,7 @@ impl TenantManager { }; let removal_result = remove_tenant_from_memory( - tenants, + self.tenants, tenant_shard_id, tenant_dir_rename_operation(tenant_shard_id), ) @@ -1906,7 +1885,7 @@ impl TenantManager { pub(crate) fn list_tenants( &self, ) -> Result, TenantMapListError> { - let tenants = TENANTS.read().unwrap(); + let tenants = self.tenants.read().unwrap(); let m = match &*tenants { TenantsMap::Initializing => return Err(TenantMapListError::Initializing), TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m, @@ -2007,7 +1986,6 @@ impl TenantManager { AttachedTenantConf::try_from(config)?, shard_identity, None, - self.tenants, SpawnMode::Eager, ctx, )?; diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index 441298f3e9a2..6d747d424dde 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -255,7 +255,6 @@ impl DeleteTimelineFlow { } /// Shortcut to create Timeline in stopping state and spawn deletion task. - /// See corresponding parts of [`crate::tenant::delete::DeleteTenantFlow`] #[instrument(skip_all, fields(%timeline_id))] pub async fn resume_deletion( tenant: Arc, @@ -420,10 +419,6 @@ impl DeleteTimelineFlow { Ok(()) } - pub(crate) fn is_finished(&self) -> bool { - matches!(self, Self::Finished) - } - pub(crate) fn is_not_started(&self) -> bool { matches!(self, Self::NotStarted) } From de05f90735b3b54b6fa99b0b42817d03310ebf87 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 24 Jun 2024 11:53:43 +0100 Subject: [PATCH 04/57] pageserver: add more info-level logging in shard splits (#8137) ## Problem `test_sharding_autosplit` is occasionally failing on warnings about shard splits taking longer than expected (`Exclusive lock by ShardSplit was held for`...) It's not obvious which part is taking the time (I suspect remote storage uploads). Example: https://neon-github-public-dev.s3.amazonaws.com/reports/main/9618788427/index.html#testresult/b395294d5bdeb783/ ## Summary of changes - Since shard splits are infrequent events, we can afford to be very chatty: add a bunch of info-level logging throughout the process. --- pageserver/src/tenant.rs | 4 ++++ pageserver/src/tenant/mgr.rs | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 6a748f61e7e1..76dc52fa16b9 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -2151,6 +2151,7 @@ impl Tenant { // Upload an index from the parent: this is partly to provide freshness for the // child tenants that will copy it, and partly for general ease-of-debugging: there will // always be a parent shard index in the same generation as we wrote the child shard index. + tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index"); timeline .remote_client .schedule_index_upload_for_file_changes()?; @@ -2158,12 +2159,14 @@ impl Tenant { // Shut down the timeline's remote client: this means that the indices we write // for child shards will not be invalidated by the parent shard deleting layers. + tracing::info!(timeline_id=%timeline.timeline_id, "Shutting down remote storage client"); timeline.remote_client.shutdown().await; // Download methods can still be used after shutdown, as they don't flow through the remote client's // queue. In principal the RemoteTimelineClient could provide this without downloading it, but this // operation is rare, so it's simpler to just download it (and robustly guarantees that the index // we use here really is the remotely persistent one). + tracing::info!(timeline_id=%timeline.timeline_id, "Downloading index_part from parent"); let result = timeline.remote_client .download_index_file(&self.cancel) .instrument(info_span!("download_index_file", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline.timeline_id)) @@ -2176,6 +2179,7 @@ impl Tenant { }; for child_shard in child_shards { + tracing::info!(timeline_id=%timeline.timeline_id, "Uploading index_part for child {}", child_shard.to_index()); upload_index_part( &self.remote_storage, child_shard, diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 4fcdf1405288..1bc21d8b7805 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -1715,6 +1715,7 @@ impl TenantManager { let timelines = parent_shard.timelines.lock().unwrap().clone(); let parent_timelines = timelines.keys().cloned().collect::>(); for timeline in timelines.values() { + tracing::info!(timeline_id=%timeline.timeline_id, "Loading list of layers to hardlink"); let timeline_layers = timeline .layers .read() @@ -1754,7 +1755,12 @@ impl TenantManager { // Since we will do a large number of small filesystem metadata operations, batch them into // spawn_blocking calls rather than doing each one as a tokio::fs round-trip. + let span = tracing::Span::current(); let jh = tokio::task::spawn_blocking(move || -> anyhow::Result { + // Run this synchronous code in the same log context as the outer function that spawned it. + let _span = span.enter(); + + tracing::info!("Creating {} directories", create_dirs.len()); for dir in &create_dirs { if let Err(e) = std::fs::create_dir_all(dir) { // Ignore AlreadyExists errors, drop out on all other errors @@ -1768,6 +1774,11 @@ impl TenantManager { } for child_prefix in child_prefixes { + tracing::info!( + "Hard-linking {} parent layers into child path {}", + parent_layers.len(), + child_prefix + ); for relative_layer in &parent_layers { let parent_path = parent_path.join(relative_layer); let child_path = child_prefix.join(relative_layer); @@ -1793,6 +1804,7 @@ impl TenantManager { // Durability is not required for correctness, but if we crashed during split and // then came restarted with empty timeline dirs, it would be very inefficient to // re-populate from remote storage. + tracing::info!("fsyncing {} directories", create_dirs.len()); for dir in create_dirs { if let Err(e) = crashsafe::fsync(&dir) { // Something removed a newly created timeline dir out from underneath us? Extremely From 47fdf93cf0d8c60434d1501a6047830b49d2f4b2 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 24 Jun 2024 14:54:54 +0100 Subject: [PATCH 05/57] tests: fix a flake in test_sharding_split_compaction (#8136) ## Problem This test could occasionally trigger a "removing local file ... because it has unexpected length log" when using the `compact-shard-ancestors-persistent` failpoint is in use, which is unexpected because that failpoint stops the process when the remote metadata is in sync with local files. It was because there are two shards on the same pageserver, and while the one being compacted explicitly stops at the failpoint, another shard was compacting in the background and failing at an unclean point. The test intends to disable background compaction, but was mistakenly revoking the value of `compaction_period` when it updated `pitr_interval`. Example failure: https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8123/9602976462/index.html#/testresult/7dd6165da7daef40 ## Summary of changes - Update `TENANT_CONF` in the test to use properly typed values, so that it is usable in pageserver APIs as well as via neon_local. - When updating tenant config with `pitr_interval`, retain the overrides from the start of the test, so that there won't be any background compaction going on during the test. --- test_runner/regress/test_sharding.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 56075c597564..62a9f422ee4d 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -190,19 +190,20 @@ def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder, failpoint: """ Test that after a split, we clean up parent layer data in the child shards via compaction. """ + TENANT_CONF = { # small checkpointing and compaction targets to ensure we generate many upload operations - "checkpoint_distance": f"{128 * 1024}", - "compaction_threshold": "1", - "compaction_target_size": f"{128 * 1024}", + "checkpoint_distance": 128 * 1024, + "compaction_threshold": 1, + "compaction_target_size": 128 * 1024, # no PITR horizon, we specify the horizon when we request on-demand GC "pitr_interval": "3600s", # disable background compaction and GC. We invoke it manually when we want it to happen. "gc_period": "0s", "compaction_period": "0s", # create image layers eagerly, so that GC can remove some layers - "image_creation_threshold": "1", - "image_layer_creation_check_threshold": "0", + "image_creation_threshold": 1, + "image_layer_creation_check_threshold": 0, } neon_env_builder.storage_controller_config = { @@ -261,7 +262,9 @@ def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder, failpoint: env.pageserver.start() # Cleanup part 2: once layers are outside the PITR window, they will be rewritten if they are partially redundant - env.storage_controller.pageserver_api().set_tenant_config(tenant_id, {"pitr_interval": "0s"}) + updated_conf = TENANT_CONF.copy() + updated_conf["pitr_interval"] = "0s" + env.storage_controller.pageserver_api().set_tenant_config(tenant_id, updated_conf) env.storage_controller.reconcile_until_idle() for shard in shards: From a4db2af1f0667514ee5cbcb545a2f131b1b3538e Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Mon, 24 Jun 2024 15:07:59 +0100 Subject: [PATCH 06/57] Truncate waltmp file on creation (#8133) Previously in safekeeper code, new segment file was opened without truncate option. I don't think there is a reason to do it, this commit replaces it with `File::create` to make it simpler and remove `clippy::suspicious_open_options` linter warning. --- safekeeper/src/wal_storage.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 0c1731937c79..2aead70ffd03 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -231,11 +231,7 @@ impl PhysicalStorage { // half initialized segment, first bake it under tmp filename and // then rename. let tmp_path = self.timeline_dir.join("waltmp"); - #[allow(clippy::suspicious_open_options)] - let mut file = OpenOptions::new() - .create(true) - .write(true) - .open(&tmp_path) + let mut file = File::create(&tmp_path) .await .with_context(|| format!("Failed to open tmp wal file {:?}", &tmp_path))?; From d8ffe662a9bb9eb7a7a4c1ae0cc2b9837072a487 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 24 Jun 2024 11:31:06 -0400 Subject: [PATCH 07/57] fix(pageserver): handle version number in draw timeline (#8102) We now have a `vX` number in the file name, i.e., `000000067F0000000400000B150100000000-000000067F0000000400000D350100000000__00000000014B7AC8-v1-00000001` The related pull request for new-style path was merged a month ago https://github.com/neondatabase/neon/pull/7660 ## Summary of changes Fixed the draw timeline dir command to handle it. --------- Signed-off-by: Alex Chi Z --- pageserver/ctl/src/draw_timeline_dir.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pageserver/ctl/src/draw_timeline_dir.rs b/pageserver/ctl/src/draw_timeline_dir.rs index 389519c65a3a..bc939f9688a3 100644 --- a/pageserver/ctl/src/draw_timeline_dir.rs +++ b/pageserver/ctl/src/draw_timeline_dir.rs @@ -83,10 +83,18 @@ fn parse_filename(name: &str) -> (Range, Range) { let keys: Vec<&str> = split[0].split('-').collect(); let mut lsns: Vec<&str> = split[1].split('-').collect(); + // The current format of the layer file name: 000000067F0000000400000B150100000000-000000067F0000000400000D350100000000__00000000014B7AC8-v1-00000001 + + // Handle generation number `-00000001` part if lsns.last().expect("should").len() == 8 { lsns.pop(); } + // Handle version number `-v1` part + if lsns.last().expect("should").starts_with('v') { + lsns.pop(); + } + if lsns.len() == 1 { lsns.push(lsns[0]); } From 9211de0df7cec5910566189c99ee2131462eda16 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 24 Jun 2024 11:50:31 -0400 Subject: [PATCH 08/57] test(pageserver): add delta records tests for gc-compaction (#8078) Part of https://github.com/neondatabase/neon/issues/8002 This pull request adds tests for bottom-most gc-compaction with delta records. Also fixed a bug in the compaction process that creates overlapping delta layers by force splitting at the original delta layer boundary. --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 234 +++++++++++++++++-- pageserver/src/tenant/timeline/compaction.rs | 154 +++++++++--- 2 files changed, 339 insertions(+), 49 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 76dc52fa16b9..62f066862a16 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4007,6 +4007,7 @@ mod tests { use storage_layer::PersistentLayerKey; use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; + use timeline::GcInfo; use utils::bin_ser::BeSer; use utils::id::TenantId; @@ -6684,49 +6685,48 @@ mod tests { // img layer at 0x10 let img_layer = (0..10) - .map(|id| (get_key(id), test_img(&format!("value {id}@0x10")))) + .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10")))) .collect_vec(); let delta1 = vec![ - // TODO: we should test a real delta record here, which requires us to add a variant of NeonWalRecord for testing purpose. ( get_key(1), Lsn(0x20), - Value::Image(test_img("value 1@0x20")), + Value::Image(Bytes::from("value 1@0x20")), ), ( get_key(2), Lsn(0x30), - Value::Image(test_img("value 2@0x30")), + Value::Image(Bytes::from("value 2@0x30")), ), ( get_key(3), Lsn(0x40), - Value::Image(test_img("value 3@0x40")), + Value::Image(Bytes::from("value 3@0x40")), ), ]; let delta2 = vec![ ( get_key(5), Lsn(0x20), - Value::Image(test_img("value 5@0x20")), + Value::Image(Bytes::from("value 5@0x20")), ), ( get_key(6), Lsn(0x20), - Value::Image(test_img("value 6@0x20")), + Value::Image(Bytes::from("value 6@0x20")), ), ]; let delta3 = vec![ ( get_key(8), Lsn(0x40), - Value::Image(test_img("value 8@0x40")), + Value::Image(Bytes::from("value 8@0x40")), ), ( get_key(9), Lsn(0x40), - Value::Image(test_img("value 9@0x40")), + Value::Image(Bytes::from("value 9@0x40")), ), ]; @@ -6748,9 +6748,42 @@ mod tests { guard.cutoffs.horizon = Lsn(0x30); } + let expected_result = [ + Bytes::from_static(b"value 0@0x10"), + Bytes::from_static(b"value 1@0x20"), + Bytes::from_static(b"value 2@0x30"), + Bytes::from_static(b"value 3@0x40"), + Bytes::from_static(b"value 4@0x10"), + Bytes::from_static(b"value 5@0x20"), + Bytes::from_static(b"value 6@0x20"), + Bytes::from_static(b"value 7@0x10"), + Bytes::from_static(b"value 8@0x40"), + Bytes::from_static(b"value 9@0x40"), + ]; + + for (idx, expected) in expected_result.iter().enumerate() { + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x50), &ctx) + .await + .unwrap(), + expected + ); + } + let cancel = CancellationToken::new(); tline.compact_with_gc(&cancel, &ctx).await.unwrap(); + for (idx, expected) in expected_result.iter().enumerate() { + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x50), &ctx) + .await + .unwrap(), + expected + ); + } + // Check if the image layer at the GC horizon contains exactly what we want let image_at_gc_horizon = tline .inspect_image_layers(Lsn(0x30), &ctx) @@ -6761,14 +6794,22 @@ mod tests { .collect::>(); assert_eq!(image_at_gc_horizon.len(), 10); - let expected_lsn = [0x10, 0x20, 0x30, 0x10, 0x10, 0x20, 0x20, 0x10, 0x10, 0x10]; + let expected_result = [ + Bytes::from_static(b"value 0@0x10"), + Bytes::from_static(b"value 1@0x20"), + Bytes::from_static(b"value 2@0x30"), + Bytes::from_static(b"value 3@0x10"), + Bytes::from_static(b"value 4@0x10"), + Bytes::from_static(b"value 5@0x20"), + Bytes::from_static(b"value 6@0x20"), + Bytes::from_static(b"value 7@0x10"), + Bytes::from_static(b"value 8@0x10"), + Bytes::from_static(b"value 9@0x10"), + ]; for idx in 0..10 { assert_eq!( image_at_gc_horizon[idx], - ( - get_key(idx as u32), - test_img(&format!("value {idx}@{:#x}", expected_lsn[idx])) - ) + (get_key(idx as u32), expected_result[idx].clone()) ); } @@ -6801,7 +6842,7 @@ mod tests { }, // The delta layer that is cut in the middle PersistentLayerKey { - key_range: Key::MIN..get_key(9), + key_range: get_key(3)..get_key(4), lsn_range: Lsn(0x30)..Lsn(0x41), is_delta: true }, @@ -6886,6 +6927,9 @@ mod tests { tline.get(get_key(2), Lsn(0x50), &ctx).await?, Bytes::from_static(b"0x10,0x20,0x30") ); + + // Need to remove the limit of "Neon WAL redo requires base image". + // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new()); // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new()); @@ -6980,4 +7024,164 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_simple_bottom_most_compaction_deltas() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_simple_bottom_most_compaction_deltas")?; + let (tenant, ctx) = harness.load().await; + + fn get_key(id: u32) -> Key { + // using aux key here b/c they are guaranteed to be inside `collect_keyspace`. + let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + + // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon. + // + // | D1 | | D3 | + // -| |-- gc horizon ----------------- + // | | | D2 | + // --------- img layer ------------------ + // + // What we should expact from this compaction is: + // | Part of D1 | | D3 | + // --------- img layer with D1+D2 at GC horizon------------------ + + // img layer at 0x10 + let img_layer = (0..10) + .map(|id| (get_key(id), Bytes::from(format!("value {id}@0x10")))) + .collect_vec(); + + let delta1 = vec![ + ( + get_key(1), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append("@0x20")), + ), + ( + get_key(2), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append("@0x30")), + ), + ( + get_key(3), + Lsn(0x40), + Value::WalRecord(NeonWalRecord::wal_append("@0x40")), + ), + ]; + let delta2 = vec![ + ( + get_key(5), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append("@0x20")), + ), + ( + get_key(6), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append("@0x20")), + ), + ]; + let delta3 = vec![ + ( + get_key(8), + Lsn(0x40), + Value::WalRecord(NeonWalRecord::wal_append("@0x40")), + ), + ( + get_key(9), + Lsn(0x40), + Value::WalRecord(NeonWalRecord::wal_append("@0x40")), + ), + ]; + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + vec![delta1, delta2, delta3], // delta layers + vec![(Lsn(0x10), img_layer)], // image layers + Lsn(0x50), + ) + .await?; + { + // Update GC info + let mut guard = tline.gc_info.write().unwrap(); + *guard = GcInfo { + retain_lsns: vec![], + cutoffs: GcCutoffs { + pitr: Lsn(0x30), + horizon: Lsn(0x30), + }, + leases: Default::default(), + }; + } + + let expected_result = [ + Bytes::from_static(b"value 0@0x10"), + Bytes::from_static(b"value 1@0x10@0x20"), + Bytes::from_static(b"value 2@0x10@0x30"), + Bytes::from_static(b"value 3@0x10@0x40"), + Bytes::from_static(b"value 4@0x10"), + Bytes::from_static(b"value 5@0x10@0x20"), + Bytes::from_static(b"value 6@0x10@0x20"), + Bytes::from_static(b"value 7@0x10"), + Bytes::from_static(b"value 8@0x10@0x40"), + Bytes::from_static(b"value 9@0x10@0x40"), + ]; + + let expected_result_at_gc_horizon = [ + Bytes::from_static(b"value 0@0x10"), + Bytes::from_static(b"value 1@0x10@0x20"), + Bytes::from_static(b"value 2@0x10@0x30"), + Bytes::from_static(b"value 3@0x10"), + Bytes::from_static(b"value 4@0x10"), + Bytes::from_static(b"value 5@0x10@0x20"), + Bytes::from_static(b"value 6@0x10@0x20"), + Bytes::from_static(b"value 7@0x10"), + Bytes::from_static(b"value 8@0x10"), + Bytes::from_static(b"value 9@0x10"), + ]; + + for idx in 0..10 { + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x50), &ctx) + .await + .unwrap(), + &expected_result[idx] + ); + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x30), &ctx) + .await + .unwrap(), + &expected_result_at_gc_horizon[idx] + ); + } + + let cancel = CancellationToken::new(); + tline.compact_with_gc(&cancel, &ctx).await.unwrap(); + + for idx in 0..10 { + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x50), &ctx) + .await + .unwrap(), + &expected_result[idx] + ); + assert_eq!( + tline + .get(get_key(idx as u32), Lsn(0x30), &ctx) + .await + .unwrap(), + &expected_result_at_gc_horizon[idx] + ); + } + + Ok(()) + } } diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 8a95029f33a6..de1263fadf96 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -965,6 +965,8 @@ impl Timeline { _cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<(), CompactionError> { + use std::collections::BTreeSet; + use crate::tenant::storage_layer::ValueReconstructState; // Step 0: pick all delta layers + image layers below/intersect with the GC horizon. // The layer selection has the following properties: @@ -986,20 +988,30 @@ impl Timeline { (selected_layers, gc_cutoff) }; // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs. + // Also, collect the layer information to decide when to split the new delta layers. let mut all_key_values = Vec::new(); + let mut delta_split_points = BTreeSet::new(); for layer in &layer_selection { all_key_values.extend(layer.load_key_values(ctx).await?); + let desc = layer.layer_desc(); + if desc.is_delta() { + // TODO: is it correct to only record split points for deltas intersecting with the GC horizon? (exclude those below/above the horizon) + // so that we can avoid having too many small delta layers. + let key_range = desc.get_key_range(); + delta_split_points.insert(key_range.start); + delta_split_points.insert(key_range.end); + } } // Key small to large, LSN low to high, if the same LSN has both image and delta due to the merge of delta layers and - // image layers, make image appear later than delta. + // image layers, make image appear before than delta. struct ValueWrapper<'a>(&'a crate::repository::Value); impl Ord for ValueWrapper<'_> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { use crate::repository::Value; use std::cmp::Ordering; match (self.0, other.0) { - (Value::Image(_), Value::WalRecord(_)) => Ordering::Greater, - (Value::WalRecord(_), Value::Image(_)) => Ordering::Less, + (Value::Image(_), Value::WalRecord(_)) => Ordering::Less, + (Value::WalRecord(_), Value::Image(_)) => Ordering::Greater, _ => Ordering::Equal, } } @@ -1018,13 +1030,6 @@ impl Timeline { all_key_values.sort_by(|(k1, l1, v1), (k2, l2, v2)| { (k1, l1, ValueWrapper(v1)).cmp(&(k2, l2, ValueWrapper(v2))) }); - let max_lsn = all_key_values - .iter() - .map(|(_, lsn, _)| lsn) - .max() - .copied() - .unwrap() - + 1; // Step 2: Produce images+deltas. TODO: ensure newly-produced delta does not overlap with other deltas. // Data of the same key. let mut accumulated_values = Vec::new(); @@ -1043,7 +1048,19 @@ impl Timeline { // We have a list of deltas/images. We want to create image layers while collect garbages. for (key, lsn, val) in accumulated_values.iter().rev() { if *lsn > horizon { - keys_above_horizon.push((*key, *lsn, val.clone())); // TODO: ensure one LSN corresponds to either delta or image instead of both + if let Some((_, prev_lsn, _)) = keys_above_horizon.last_mut() { + if *prev_lsn == *lsn { + // The case that we have an LSN with both data from the delta layer and the image layer. As + // `ValueWrapper` ensures that an image is ordered before a delta at the same LSN, we simply + // drop this delta and keep the image. + // + // For example, we have delta layer key1@0x10, key1@0x20, and image layer key1@0x10, we will + // keep the image for key1@0x10 and the delta for key1@0x20. key1@0x10 delta will be simply + // dropped. + continue; + } + } + keys_above_horizon.push((*key, *lsn, val.clone())); } else if *lsn <= horizon { match val { crate::repository::Value::Image(image) => { @@ -1068,15 +1085,59 @@ impl Timeline { Ok((keys_above_horizon, img)) } - let mut delta_layer_writer = DeltaLayerWriter::new( - self.conf, - self.timeline_id, - self.tenant_shard_id, - all_key_values.first().unwrap().0, - gc_cutoff..max_lsn, // TODO: off by one? - ctx, - ) - .await?; + async fn flush_deltas( + deltas: &mut Vec<(Key, Lsn, crate::repository::Value)>, + last_key: Key, + delta_split_points: &[Key], + current_delta_split_point: &mut usize, + tline: &Arc, + gc_cutoff: Lsn, + ctx: &RequestContext, + ) -> anyhow::Result> { + // Check if we need to split the delta layer. We split at the original delta layer boundary to avoid + // overlapping layers. + // + // If we have a structure like this: + // + // | Delta 1 | | Delta 4 | + // |---------| Delta 2 |---------| + // | Delta 3 | | Delta 5 | + // + // And we choose to compact delta 2+3+5. We will get an overlapping delta layer with delta 1+4. + // A simple solution here is to split the delta layers using the original boundary, while this + // might produce a lot of small layers. This should be improved and fixed in the future. + let mut need_split = false; + while *current_delta_split_point < delta_split_points.len() + && last_key >= delta_split_points[*current_delta_split_point] + { + *current_delta_split_point += 1; + need_split = true; + } + if !need_split { + return Ok(None); + } + let deltas = std::mem::take(deltas); + if deltas.is_empty() { + return Ok(None); + } + let end_lsn = deltas.iter().map(|(_, lsn, _)| lsn).max().copied().unwrap() + 1; + let mut delta_layer_writer = DeltaLayerWriter::new( + tline.conf, + tline.timeline_id, + tline.tenant_shard_id, + deltas.first().unwrap().0, + gc_cutoff..end_lsn, + ctx, + ) + .await?; + let key_end = deltas.last().unwrap().0.next(); + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + let delta_layer = delta_layer_writer.finish(key_end, tline, ctx).await?; + Ok(Some(delta_layer)) + } + let mut image_layer_writer = ImageLayerWriter::new( self.conf, self.timeline_id, @@ -1087,6 +1148,10 @@ impl Timeline { ) .await?; + let mut delta_values = Vec::new(); + let delta_split_points = delta_split_points.into_iter().collect_vec(); + let mut current_delta_split_point = 0; + let mut delta_layers = Vec::new(); for item @ (key, _, _) in &all_key_values { if &last_key == key { accumulated_values.push(item); @@ -1094,33 +1159,54 @@ impl Timeline { let (deltas, image) = flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff) .await?; + // Put the image into the image layer. Currently we have a single big layer for the compaction. image_layer_writer.put_image(last_key, image, ctx).await?; - for (key, lsn, val) in deltas { - delta_layer_writer.put_value(key, lsn, val, ctx).await?; - } + delta_values.extend(deltas); + delta_layers.extend( + flush_deltas( + &mut delta_values, + last_key, + &delta_split_points, + &mut current_delta_split_point, + self, + gc_cutoff, + ctx, + ) + .await?, + ); accumulated_values.clear(); accumulated_values.push(item); last_key = *key; } } + + // TODO: move this part to the loop body let (deltas, image) = flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff).await?; + // Put the image into the image layer. Currently we have a single big layer for the compaction. image_layer_writer.put_image(last_key, image, ctx).await?; - for (key, lsn, val) in deltas { - delta_layer_writer.put_value(key, lsn, val, ctx).await?; - } - accumulated_values.clear(); - // TODO: split layers - let delta_layer = delta_layer_writer.finish(last_key, self, ctx).await?; + delta_values.extend(deltas); + delta_layers.extend( + flush_deltas( + &mut delta_values, + last_key, + &delta_split_points, + &mut current_delta_split_point, + self, + gc_cutoff, + ctx, + ) + .await?, + ); + let image_layer = image_layer_writer.finish(self, ctx).await?; + let mut compact_to = Vec::new(); + compact_to.extend(delta_layers); + compact_to.push(image_layer); // Step 3: Place back to the layer map. { let mut guard = self.layers.write().await; - guard.finish_gc_compaction( - &layer_selection, - &[delta_layer.clone(), image_layer.clone()], - &self.metrics, - ) + guard.finish_gc_compaction(&layer_selection, &compact_to, &self.metrics) }; Ok(()) } From 3d760938e12e463343cb97e2dd0e2a916c4f3943 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 24 Jun 2024 17:57:16 +0100 Subject: [PATCH 09/57] storcon_cli: remove old tenant-scatter command (#8127) ## Problem This command was used in the very early days of sharding, before the storage controller had anti-affinity + scheduling optimization to spread out shards. ## Summary of changes - Remove `storcon_cli tenant-scatter` --- control_plane/storcon_cli/src/main.rs | 92 +-------------------------- 1 file changed, 2 insertions(+), 90 deletions(-) diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 7b48b75c21ea..775aedb60001 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -1,5 +1,5 @@ use futures::StreamExt; -use std::{collections::HashMap, str::FromStr, time::Duration}; +use std::{str::FromStr, time::Duration}; use clap::{Parser, Subcommand}; use pageserver_api::{ @@ -21,7 +21,7 @@ use utils::id::{NodeId, TenantId}; use pageserver_api::controller_api::{ NodeConfigureRequest, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy, - TenantLocateResponse, TenantShardMigrateRequest, TenantShardMigrateResponse, + TenantShardMigrateRequest, TenantShardMigrateResponse, }; #[derive(Subcommand, Debug)] @@ -110,12 +110,6 @@ enum Command { #[arg(long)] config: String, }, - /// Attempt to balance the locations for a tenant across pageservers. This is a client-side - /// alternative to the storage controller's scheduling optimization behavior. - TenantScatter { - #[arg(long)] - tenant_id: TenantId, - }, /// Print details about a particular tenant, including all its shards' states. TenantDescribe { #[arg(long)] @@ -498,88 +492,6 @@ async fn main() -> anyhow::Result<()> { }) .await?; } - Command::TenantScatter { tenant_id } => { - // Find the shards - let locate_response = storcon_client - .dispatch::<(), TenantLocateResponse>( - Method::GET, - format!("control/v1/tenant/{tenant_id}/locate"), - None, - ) - .await?; - let shards = locate_response.shards; - - let mut node_to_shards: HashMap> = HashMap::new(); - let shard_count = shards.len(); - for s in shards { - let entry = node_to_shards.entry(s.node_id).or_default(); - entry.push(s.shard_id); - } - - // Load list of available nodes - let nodes_resp = storcon_client - .dispatch::<(), Vec>( - Method::GET, - "control/v1/node".to_string(), - None, - ) - .await?; - - for node in nodes_resp { - if matches!(node.availability, NodeAvailabilityWrapper::Active) { - node_to_shards.entry(node.id).or_default(); - } - } - - let max_shard_per_node = shard_count / node_to_shards.len(); - - loop { - let mut migrate_shard = None; - for shards in node_to_shards.values_mut() { - if shards.len() > max_shard_per_node { - // Pick the emptiest - migrate_shard = Some(shards.pop().unwrap()); - } - } - let Some(migrate_shard) = migrate_shard else { - break; - }; - - // Pick the emptiest node to migrate to - let mut destinations = node_to_shards - .iter() - .map(|(k, v)| (k, v.len())) - .collect::>(); - destinations.sort_by_key(|i| i.1); - let (destination_node, destination_count) = *destinations.first().unwrap(); - if destination_count + 1 > max_shard_per_node { - // Even the emptiest destination doesn't have space: we're done - break; - } - let destination_node = *destination_node; - - node_to_shards - .get_mut(&destination_node) - .unwrap() - .push(migrate_shard); - - println!("Migrate {} -> {} ...", migrate_shard, destination_node); - - storcon_client - .dispatch::( - Method::PUT, - format!("control/v1/tenant/{migrate_shard}/migrate"), - Some(TenantShardMigrateRequest { - tenant_shard_id: migrate_shard, - node_id: destination_node, - }), - ) - .await?; - println!("Migrate {} -> {} OK", migrate_shard, destination_node); - } - - // Spread the shards across the nodes - } Command::TenantDescribe { tenant_id } => { let describe_response = storcon_client .dispatch::<(), TenantDescribeResponse>( From 1ea5d8b1327d2e93cbe11682f60a90e35d42d1ee Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 24 Jun 2024 18:03:53 +0100 Subject: [PATCH 10/57] tests: accomodate some messages that can fail tests (#8144) ## Problem - `test_storage_controller_many_tenants` can fail with warnings in the storage controller about tenant creation holding a lock for too long, because this test stresses the machine running the test with many concurrent timeline creations - `test_tenant_delete_smoke` can fail when synthetic remote storage errors show up ## Summary of changes - tolerate warnings about slow timeline creation in test_storage_controller_many_tenants - tolerate both possible errors during error_tolerant_delete --- .../performance/test_storage_controller_scale.py | 11 ++++++++++- test_runner/regress/test_tenant_delete.py | 8 ++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/test_runner/performance/test_storage_controller_scale.py b/test_runner/performance/test_storage_controller_scale.py index cb013ae8c3e3..a4c8c8ac421a 100644 --- a/test_runner/performance/test_storage_controller_scale.py +++ b/test_runner/performance/test_storage_controller_scale.py @@ -48,7 +48,16 @@ def test_storage_controller_many_tenants( # We will intentionally stress reconciler concurrrency, which triggers a warning when lots # of shards are hitting the delayed path. - env.storage_controller.allowed_errors.append(".*Many shards are waiting to reconcile") + env.storage_controller.allowed_errors.extend( + [ + # We will intentionally stress reconciler concurrrency, which triggers a warning when lots + # of shards are hitting the delayed path. + ".*Many shards are waiting to reconcile", + # We will create many timelines concurrently, so they might get slow enough to trip the warning + # that timeline creation is holding a lock too long. + ".*Shared lock by TimelineCreate.*was held.*", + ] + ) for ps in env.pageservers: # This can happen because when we do a loop over all pageservers and mark them offline/active, diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index a3316f2f4592..d3fba32a19e0 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -31,8 +31,12 @@ def error_tolerant_delete(ps_http, tenant_id): if e.status_code == 500: # This test uses failure injection, which can produce 500s as the pageserver expects # the object store to always be available, and the ListObjects during deletion is generally - # an infallible operation - assert "simulated failure of remote operation" in e.message + # an infallible operation. This can show up as a clear simulated error, or as a general + # error during delete_objects() + assert ( + "simulated failure of remote operation" in e.message + or "failed to delete" in e.message + ) else: raise else: From 219e78f885486698a67da6ad62ef9f6d001b118a Mon Sep 17 00:00:00 2001 From: Yuchen Liang <70461588+yliang412@users.noreply.github.com> Date: Mon, 24 Jun 2024 16:12:24 -0400 Subject: [PATCH 11/57] feat(pageserver): add an optional lease to the get_lsn_by_timestamp API (#8104) Part of #7497, closes #8072. ## Problem Currently the `get_lsn_by_timestamp` and branch creation pageserver APIs do not provide a pleasant client experience where the looked-up LSN might be GC-ed between the two API calls. This PR attempts to prevent common races between GC and branch creation by making use of LSN leases provided in #8084. A lease can be optionally granted to a looked-up LSN. With the lease, GC will not touch layers needed to reconstruct all pages at this LSN for the duration of the lease. Signed-off-by: Yuchen Liang --- pageserver/src/http/openapi_spec.yml | 11 +++++++ pageserver/src/http/routes.rs | 27 +++++++++++++++- test_runner/fixtures/pageserver/http.py | 6 ++-- test_runner/regress/test_lsn_mapping.py | 43 ++++++++++++++++++++----- 4 files changed, 76 insertions(+), 11 deletions(-) diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 1bc8fe906645..e583992a58f9 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -236,6 +236,13 @@ paths: type: string format: date-time description: A timestamp to get the LSN + - name: with_lease + in: query + required: false + schema: + type: boolean + description: Whether to grant a lease to the corresponding LSN. Default to false. + responses: "200": description: OK @@ -1029,6 +1036,10 @@ components: kind: type: string enum: [past, present, future, nodata] + valid_until: + type: string + format: date-time + description: The expiration time of the granted lease. LsnLease: type: object diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index cfa507fed000..450f89820e5b 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -21,6 +21,7 @@ use pageserver_api::models::IngestAuxFilesRequest; use pageserver_api::models::ListAuxFilesRequest; use pageserver_api::models::LocationConfig; use pageserver_api::models::LocationConfigListResponse; +use pageserver_api::models::LsnLease; use pageserver_api::models::ShardParameters; use pageserver_api::models::TenantDetails; use pageserver_api::models::TenantLocationConfigResponse; @@ -728,6 +729,8 @@ async fn get_lsn_by_timestamp_handler( .map_err(ApiError::BadRequest)?; let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp); + let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false); + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); let timeline = @@ -736,10 +739,15 @@ async fn get_lsn_by_timestamp_handler( let result = timeline .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx) .await?; + #[derive(serde::Serialize, Debug)] struct Result { lsn: Lsn, kind: &'static str, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(flatten)] + lease: Option, } let (lsn, kind) = match result { LsnForTimestamp::Present(lsn) => (lsn, "present"), @@ -747,11 +755,28 @@ async fn get_lsn_by_timestamp_handler( LsnForTimestamp::Past(lsn) => (lsn, "past"), LsnForTimestamp::NoData(lsn) => (lsn, "nodata"), }; - let result = Result { lsn, kind }; + + let lease = if with_lease { + timeline + .make_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx) + .inspect_err(|_| { + warn!("fail to grant a lease to {}", lsn); + }) + .ok() + } else { + None + }; + + let result = Result { lsn, kind, lease }; + let valid_until = result + .lease + .as_ref() + .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string()); tracing::info!( lsn=?result.lsn, kind=%result.kind, timestamp=%timestamp_raw, + valid_until=?valid_until, "lsn_by_timestamp finished" ); json_response(StatusCode::OK, result) diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index ecc83a954657..64c7ddee6c8c 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -619,13 +619,15 @@ def timeline_get_lsn_by_timestamp( tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, timestamp: datetime, + with_lease: bool = False, **kwargs, ): log.info( - f"Requesting lsn by timestamp {timestamp}, tenant {tenant_id}, timeline {timeline_id}" + f"Requesting lsn by timestamp {timestamp}, tenant {tenant_id}, timeline {timeline_id}, {with_lease=}" ) + with_lease_query = f"{with_lease=}".lower() res = self.get( - f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp?timestamp={timestamp.isoformat()}Z", + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp?timestamp={timestamp.isoformat()}Z&{with_lease_query}", **kwargs, ) self.verbose_error(res) diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index 263730a82347..67e82f8d309f 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -12,10 +12,24 @@ from requests.exceptions import ReadTimeout -# -# Test pageserver get_lsn_by_timestamp API -# -def test_lsn_mapping(neon_env_builder: NeonEnvBuilder): +def assert_lsn_lease_granted(result, with_lease: bool): + """ + Asserts an LSN lease is granted when `with_lease` flag is turned on. + Always asserts no LSN lease is granted when `with_lease` flag is off. + """ + if with_lease: + assert result.get("valid_until") + else: + assert result.get("valid_until") is None + + +@pytest.mark.parametrize("with_lease", [True, False]) +def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool): + """ + Test pageserver get_lsn_by_timestamp API. + + :param with_lease: Whether to get a lease associated with returned LSN. + """ env = neon_env_builder.init_start() tenant_id, _ = env.neon_cli.create_tenant( @@ -67,23 +81,33 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder): # Check edge cases # Timestamp is in the future probe_timestamp = tbl[-1][1] + timedelta(hours=1) - result = client.timeline_get_lsn_by_timestamp(tenant_id, timeline_id, probe_timestamp) + result = client.timeline_get_lsn_by_timestamp( + tenant_id, timeline_id, probe_timestamp, with_lease=with_lease + ) assert result["kind"] == "future" + assert_lsn_lease_granted(result, with_lease) # make sure that we return a well advanced lsn here assert Lsn(result["lsn"]) > start_lsn # Timestamp is in the unreachable past probe_timestamp = tbl[0][1] - timedelta(hours=10) - result = client.timeline_get_lsn_by_timestamp(tenant_id, timeline_id, probe_timestamp) + result = client.timeline_get_lsn_by_timestamp( + tenant_id, timeline_id, probe_timestamp, with_lease=with_lease + ) assert result["kind"] == "past" + assert_lsn_lease_granted(result, with_lease) + # make sure that we return the minimum lsn here at the start of the range assert Lsn(result["lsn"]) < start_lsn # Probe a bunch of timestamps in the valid range for i in range(1, len(tbl), 100): probe_timestamp = tbl[i][1] - result = client.timeline_get_lsn_by_timestamp(tenant_id, timeline_id, probe_timestamp) + result = client.timeline_get_lsn_by_timestamp( + tenant_id, timeline_id, probe_timestamp, with_lease=with_lease + ) assert result["kind"] not in ["past", "nodata"] + assert_lsn_lease_granted(result, with_lease) lsn = result["lsn"] # Call get_lsn_by_timestamp to get the LSN # Launch a new read-only node at that LSN, and check that only the rows @@ -105,8 +129,11 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder): # Timestamp is in the unreachable past probe_timestamp = tbl[0][1] - timedelta(hours=10) - result = client.timeline_get_lsn_by_timestamp(tenant_id, timeline_id_child, probe_timestamp) + result = client.timeline_get_lsn_by_timestamp( + tenant_id, timeline_id_child, probe_timestamp, with_lease=with_lease + ) assert result["kind"] == "past" + assert_lsn_lease_granted(result, with_lease) # make sure that we return the minimum lsn here at the start of the range assert Lsn(result["lsn"]) >= last_flush_lsn From d502313841bf5f31d7aff629f93e09284d984fb3 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 25 Jun 2024 16:29:32 +0300 Subject: [PATCH 12/57] Fix MVCC bug with prepared xact with subxacts on standby (#8152) We did not recover the subtransaction IDs of prepared transactions when starting a hot standby from a shutdown checkpoint. As a result, such subtransactions were considered as aborted, rather than in-progress. That would lead to hint bits being set incorrectly, and the subtransactions suddenly becoming visible to old snapshots when the prepared transaction was committed. To fix, update pg_subtrans with prepared transactions's subxids when starting hot standby from a shutdown checkpoint. The snapshots taken from that state need to be marked as "suboverflowed", so that we also check the pg_subtrans. Discussion: https://www.postgresql.org/message-id/6b852e98-2d49-4ca1-9e95-db419a2696e0%40iki.fi NEON: cherry-picked from the upstream thread ahead of time, to unblock https://github.com/neondatabase/neon/pull/7288. I expect this to be committed to upstream in the next few days, superseding this. NOTE: I did not include the new regression test on v15 and v14 branches, because the test would need some adapting, and we don't run the perl tests on Neon anyway. --- vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 4c51945a6167..aa88bd536b48 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba +Subproject commit aa88bd536b48b22328aac748be0dcfff760135d0 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index e22098d86d6c..2092a6dcee79 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit e22098d86d6c40276b6bd75c29133a33fb283ab6 +Subproject commit 2092a6dcee794bb0cb17471bd964690dd7c7355f diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 9837db157837..3bf9219f6ef5 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 9837db157837fcf43ef7348be0017d3a2238cd27 +Subproject commit 3bf9219f6ef5e943393e9430872e26184e92d1c6 diff --git a/vendor/revisions.json b/vendor/revisions.json index f945ea6d730e..d48f1defec36 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "9837db157837fcf43ef7348be0017d3a2238cd27"], - "v15": ["15.7", "e22098d86d6c40276b6bd75c29133a33fb283ab6"], - "v14": ["14.12", "4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba"] + "v16": ["16.3", "3bf9219f6ef5e943393e9430872e26184e92d1c6"], + "v15": ["15.7", "2092a6dcee794bb0cb17471bd964690dd7c7355f"], + "v14": ["14.12", "aa88bd536b48b22328aac748be0dcfff760135d0"] } From 7026dde9eba4bb37f5ed0182c34ca95d27c014a6 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Tue, 25 Jun 2024 15:06:18 +0100 Subject: [PATCH 13/57] storcon: update db related dependencides (#8155) ## Problem Storage controller runs into memory corruption issue on the drain/fill code paths. ## Summary of changes Update db related depdencies in the unlikely case that the issue was fixed in diesel. --- Cargo.lock | 103 ++++++++++++++++++++++++++++---------- workspace_hack/Cargo.toml | 1 - 2 files changed, 76 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70c837c14645..5393538c5902 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1246,7 +1246,7 @@ dependencies = [ "tokio-postgres", "tokio-stream", "tokio-util", - "toml_edit", + "toml_edit 0.19.10", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -1362,8 +1362,8 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-util", - "toml", - "toml_edit", + "toml 0.7.4", + "toml_edit 0.19.10", "tracing", "url", "utils", @@ -1669,9 +1669,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.4" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" +checksum = "62d6dcd069e7b5fe49a302411f759d4cf1cf2c27fe798ef46fb8baefc053dd2b" dependencies = [ "bitflags 2.4.1", "byteorder", @@ -1684,11 +1684,12 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" +checksum = "59de76a222c2b8059f789cbe07afbfd8deb8c31dd0bc2a21f85e256c1def8259" dependencies = [ "diesel_table_macro_syntax", + "dsl_auto_type", "proc-macro2", "quote", "syn 2.0.52", @@ -1696,9 +1697,9 @@ dependencies = [ [[package]] name = "diesel_migrations" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" +checksum = "8a73ce704bad4231f001bff3314d91dce4aba0770cee8b233991859abc15c1f6" dependencies = [ "diesel", "migrations_internals", @@ -1707,9 +1708,9 @@ dependencies = [ [[package]] name = "diesel_table_macro_syntax" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ "syn 2.0.52", ] @@ -1745,6 +1746,20 @@ dependencies = [ "const-random", ] +[[package]] +name = "dsl_auto_type" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0892a17df262a24294c382f0d5997571006e7a4348b4327557c4ff1cd4a8bccc" +dependencies = [ + "darling", + "either", + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "dyn-clone" version = "1.0.14" @@ -3084,19 +3099,19 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" +checksum = "fd01039851e82f8799046eabbb354056283fb265c8ec0996af940f4e85a380ff" dependencies = [ "serde", - "toml", + "toml 0.8.14", ] [[package]] name = "migrations_macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" +checksum = "ffb161cc72176cb37aa47f1fc520d3ef02263d67d661f44f05d05a079e1237fd" dependencies = [ "migrations_internals", "proc-macro2", @@ -3576,7 +3591,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "toml_edit", + "toml_edit 0.19.10", "utils", "workspace_hack", ] @@ -3659,7 +3674,7 @@ dependencies = [ "tokio-stream", "tokio-tar", "tokio-util", - "toml_edit", + "toml_edit 0.19.10", "tracing", "twox-hash", "url", @@ -4665,7 +4680,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml_edit", + "toml_edit 0.19.10", "tracing", "utils", "workspace_hack", @@ -5164,7 +5179,7 @@ dependencies = [ "tokio-stream", "tokio-tar", "tokio-util", - "toml_edit", + "toml_edit 0.19.10", "tracing", "tracing-subscriber", "url", @@ -5443,9 +5458,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -6330,14 +6345,26 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.10", +] + +[[package]] +name = "toml" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.14", ] [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -6352,7 +6379,20 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.4.6", +] + +[[package]] +name = "toml_edit" +version = "0.22.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +dependencies = [ + "indexmap 2.0.1", + "serde", + "serde_spanned", + "toml_datetime", + "winnow 0.6.13", ] [[package]] @@ -7335,6 +7375,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -7424,7 +7473,7 @@ dependencies = [ "tokio-rustls 0.24.0", "tokio-util", "toml_datetime", - "toml_edit", + "toml_edit 0.19.10", "tonic", "tower", "tracing", diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index 139a5647c560..f43076171f21 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -115,7 +115,6 @@ syn-dff4ba8e3ae991db = { package = "syn", version = "1", features = ["extra-trai syn-f595c2ba2a3f28df = { package = "syn", version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing", "serde"] } toml_datetime = { version = "0.6", default-features = false, features = ["serde"] } -toml_edit = { version = "0.19", features = ["serde"] } zstd = { version = "0.13" } zstd-safe = { version = "7", default-features = false, features = ["arrays", "legacy", "std", "zdict_builder"] } zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] } From 947f6da75e10042b2fd66a4ff523c8a1d5da3aeb Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 25 Jun 2024 17:04:44 +0200 Subject: [PATCH 14/57] L0 flush: avoid short-lived allocation when checking key_range empty (#8154) We only use `keys` to check if it's empty so we can bail out early. No need to collect the keys for that. Found this while doing research for https://github.com/neondatabase/neon/issues/7418 --- pageserver/src/tenant/storage_layer/inmemory_layer.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 1ecc56ce993f..6624fb7e6ba5 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -622,18 +622,16 @@ impl InMemoryLayer { let end_lsn = *self.end_lsn.get().unwrap(); - let keys: Vec<_> = if let Some(key_range) = key_range { + let key_count = if let Some(key_range) = key_range { inner .index .iter() .filter(|(k, _)| key_range.contains(k)) - .map(|(k, m)| (k.to_i128(), m)) - .collect() + .count() } else { - inner.index.iter().map(|(k, m)| (k.to_i128(), m)).collect() + inner.index.len() }; - - if keys.is_empty() { + if key_count == 0 { return Ok(None); } From 9b2f9419d9451514e6f11c79db7de3adaac2f0ba Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Tue, 25 Jun 2024 16:18:22 +0100 Subject: [PATCH 15/57] CI: upload docker cache only from main (#8157) ## Problem The Docker build cache gets invalidated by PRs ## Summary of changes - Upload cache only from the main branch --- .github/workflows/build-build-tools-image.yml | 2 +- .github/workflows/build_and_test.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 6e90a80ab7ab..5a94dd8e6f2d 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -78,7 +78,7 @@ jobs: pull: true file: Dockerfile.build-tools cache-from: type=registry,ref=neondatabase/build-tools:cache-${{ matrix.arch }} - cache-to: type=registry,ref=neondatabase/build-tools:cache-${{ matrix.arch }},mode=max + cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=neondatabase/build-tools:cache-{0},mode=max', matrix.arch) || '' }} tags: neondatabase/build-tools:${{ inputs.image-tag }}-${{ matrix.arch }} - name: Remove custom docker config directory diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index e9adf28b9998..113b37ae518b 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -763,7 +763,7 @@ jobs: pull: true file: Dockerfile cache-from: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }} - cache-to: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }},mode=max + cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=neondatabase/neon:cache-{0},mode=max', matrix.arch) || '' }} tags: | neondatabase/neon:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} @@ -855,7 +855,7 @@ jobs: pull: true file: Dockerfile.compute-node cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }} - cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max + cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=neondatabase/compute-node-{0}:cache-{1},mode=max', matrix.version, matrix.arch) || '' }} tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} @@ -875,7 +875,7 @@ jobs: file: Dockerfile.compute-node target: neon-pg-ext-test cache-from: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }} - cache-to: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max + cache-to: ${{ github.ref_name == 'main' && format('type=registry,ref=neondatabase/neon-test-extensions-{0}:cache-{1},mode=max', matrix.version, matrix.arch) || '' }} tags: | neondatabase/neon-test-extensions-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}-${{ matrix.arch }} From 961fc0ba8f8355911a34151f40a105b29ba5002c Mon Sep 17 00:00:00 2001 From: Yuchen Liang <70461588+yliang412@users.noreply.github.com> Date: Tue, 25 Jun 2024 11:43:12 -0400 Subject: [PATCH 16/57] feat(pageserver): add metrics for number of valid leases after each refresh (#8147) Part of #7497, closes #8120. ## Summary of changes This PR adds a metric to track the number of valid leases after `GCInfo` gets refreshed each time. Besides this metric, we should also track disk space and synthetic size (after #8071 is closed) to make sure leases are used properly. Signed-off-by: Yuchen Liang --- pageserver/src/metrics.rs | 17 +++++++++++++++++ pageserver/src/tenant.rs | 5 +++++ test_runner/fixtures/metrics.py | 1 + 3 files changed, 23 insertions(+) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 5c8f350f7b2a..c6b160733167 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -545,6 +545,15 @@ static AUX_FILE_SIZE: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); +static VALID_LSN_LEASE_COUNT: Lazy = Lazy::new(|| { + register_uint_gauge_vec!( + "pageserver_valid_lsn_lease_count", + "The number of valid leases after refreshing gc info.", + &["tenant_id", "shard_id", "timeline_id"], + ) + .expect("failed to define a metric") +}); + pub(crate) mod initial_logical_size { use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec}; use once_cell::sync::Lazy; @@ -2055,6 +2064,8 @@ pub(crate) struct TimelineMetrics { pub directory_entries_count_gauge: Lazy UIntGauge>>, pub evictions: IntCounter, pub evictions_with_low_residence_duration: std::sync::RwLock, + /// Number of valid LSN leases. + pub valid_lsn_lease_count_gauge: UIntGauge, shutdown: std::sync::atomic::AtomicBool, } @@ -2153,6 +2164,10 @@ impl TimelineMetrics { let evictions_with_low_residence_duration = evictions_with_low_residence_duration_builder .build(&tenant_id, &shard_id, &timeline_id); + let valid_lsn_lease_count_gauge = VALID_LSN_LEASE_COUNT + .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id]) + .unwrap(); + TimelineMetrics { tenant_id, shard_id, @@ -2175,6 +2190,7 @@ impl TimelineMetrics { evictions_with_low_residence_duration: std::sync::RwLock::new( evictions_with_low_residence_duration, ), + valid_lsn_lease_count_gauge, shutdown: std::sync::atomic::AtomicBool::default(), } } @@ -2224,6 +2240,7 @@ impl TimelineMetrics { } let _ = EVICTIONS.remove_label_values(&[tenant_id, shard_id, timeline_id]); let _ = AUX_FILE_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]); + let _ = VALID_LSN_LEASE_COUNT.remove_label_values(&[tenant_id, shard_id, timeline_id]); self.evictions_with_low_residence_duration .write() diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 62f066862a16..4e03e09a9b8d 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -2960,6 +2960,11 @@ impl Tenant { let now = SystemTime::now(); target.leases.retain(|_, lease| !lease.is_expired(&now)); + timeline + .metrics + .valid_lsn_lease_count_gauge + .set(target.leases.len() as u64); + match gc_cutoffs.remove(&timeline.timeline_id) { Some(cutoffs) => { target.retain_lsns = branchpoints; diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index e01bb6da5165..41fa8e679f28 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -149,6 +149,7 @@ def histogram(prefix_without_trailing_underscore: str) -> List[str]: "pageserver_evictions_total", "pageserver_evictions_with_low_residence_duration_total", "pageserver_aux_file_estimated_size", + "pageserver_valid_lsn_lease_count", *PAGESERVER_PER_TENANT_REMOTE_TIMELINE_CLIENT_METRICS, # "pageserver_directory_entries_count", -- only used if above a certain threshold # "pageserver_broken_tenants_count" -- used only for broken From 64a4461191e17521f54cd9c334b034fcf7a12a0b Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 25 Jun 2024 19:05:13 +0300 Subject: [PATCH 17/57] Fix submodule references to match the REL_*_STABLE_neon branches (#8159) No code changes, just point to the correct commit SHAs. --- vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index aa88bd536b48..7845c122d51d 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit aa88bd536b48b22328aac748be0dcfff760135d0 +Subproject commit 7845c122d51d3ebb547a984a640ac0310a2fadce diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index 2092a6dcee79..2ff5ecc67c64 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit 2092a6dcee794bb0cb17471bd964690dd7c7355f +Subproject commit 2ff5ecc67c64e5fe44b7dde598e64e4538e0c373 diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 3bf9219f6ef5..d55e0aca104a 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 3bf9219f6ef5e943393e9430872e26184e92d1c6 +Subproject commit d55e0aca104af0b611cf5565f1033b2acd2dcc1c diff --git a/vendor/revisions.json b/vendor/revisions.json index d48f1defec36..e755cf2e9dfa 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "3bf9219f6ef5e943393e9430872e26184e92d1c6"], - "v15": ["15.7", "2092a6dcee794bb0cb17471bd964690dd7c7355f"], - "v14": ["14.12", "aa88bd536b48b22328aac748be0dcfff760135d0"] + "v16": ["16.3", "d55e0aca104af0b611cf5565f1033b2acd2dcc1c"], + "v15": ["15.7", "2ff5ecc67c64e5fe44b7dde598e64e4538e0c373"], + "v14": ["14.12", "7845c122d51d3ebb547a984a640ac0310a2fadce"] } From 07f21dd6b67e46d86ddc45eb69703f84b118fecb Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 25 Jun 2024 17:38:06 +0100 Subject: [PATCH 18/57] pageserver: remove attach/detach apis (#8134) ## Problem These APIs have been deprecated for some time, but were still used from test code. Closes: https://github.com/neondatabase/neon/issues/4282 ## Summary of changes - It is still convenient to do a "tenant_attach" from a test without having to write out a location_conf body, so those test methods have been retained with implementations that call through to their location_conf equivalent. --- libs/pageserver_api/src/models.rs | 37 --------- libs/utils/src/http/json.rs | 16 +--- pageserver/src/http/openapi_spec.yml | 13 +-- pageserver/src/http/routes.rs | 83 +------------------ storage_controller/src/service.rs | 7 ++ test_runner/fixtures/neon_fixtures.py | 2 - test_runner/fixtures/pageserver/http.py | 46 ++++------ .../regress/test_attach_tenant_config.py | 45 ++++------ test_runner/regress/test_remote_storage.py | 13 +-- test_runner/regress/test_tenant_detach.py | 48 ----------- test_runner/regress/test_timeline_size.py | 2 +- .../test_walredo_not_left_behind_on_detach.py | 2 +- 12 files changed, 54 insertions(+), 260 deletions(-) diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 3db75b7d0e39..b1e4525cc03c 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -607,31 +607,6 @@ impl TenantConfigRequest { } } -#[derive(Debug, Deserialize)] -pub struct TenantAttachRequest { - #[serde(default)] - pub config: TenantAttachConfig, - #[serde(default)] - pub generation: Option, -} - -/// Newtype to enforce deny_unknown_fields on TenantConfig for -/// its usage inside `TenantAttachRequest`. -#[derive(Debug, Serialize, Deserialize, Default)] -#[serde(deny_unknown_fields)] -pub struct TenantAttachConfig { - #[serde(flatten)] - allowing_unknown_fields: TenantConfig, -} - -impl std::ops::Deref for TenantAttachConfig { - type Target = TenantConfig; - - fn deref(&self) -> &Self::Target { - &self.allowing_unknown_fields - } -} - /// See [`TenantState::attachment_status`] and the OpenAPI docs for context. #[derive(Serialize, Deserialize, Clone)] #[serde(tag = "slug", content = "data", rename_all = "snake_case")] @@ -1554,18 +1529,6 @@ mod tests { "expect unknown field `unknown_field` error, got: {}", err ); - - let attach_request = json!({ - "config": { - "unknown_field": "unknown_value".to_string(), - }, - }); - let err = serde_json::from_value::(attach_request).unwrap_err(); - assert!( - err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err - ); } #[test] diff --git a/libs/utils/src/http/json.rs b/libs/utils/src/http/json.rs index 7ca62561feb2..6c25440b429d 100644 --- a/libs/utils/src/http/json.rs +++ b/libs/utils/src/http/json.rs @@ -8,22 +8,15 @@ use super::error::ApiError; pub async fn json_request Deserialize<'de>>( request: &mut Request, ) -> Result { - json_request_or_empty_body(request) - .await? - .context("missing request body") - .map_err(ApiError::BadRequest) -} - -/// Will be removed as part of -pub async fn json_request_or_empty_body Deserialize<'de>>( - request: &mut Request, -) -> Result, ApiError> { let body = hyper::body::aggregate(request.body_mut()) .await .context("Failed to read request body") .map_err(ApiError::BadRequest)?; + if body.remaining() == 0 { - return Ok(None); + return Err(ApiError::BadRequest(anyhow::anyhow!( + "missing request body" + ))); } let mut deser = serde_json::de::Deserializer::from_reader(body.reader()); @@ -31,7 +24,6 @@ pub async fn json_request_or_empty_body Deserialize<'de>>( serde_path_to_error::deserialize(&mut deser) // intentionally stringify because the debug version is not helpful in python logs .map_err(|e| anyhow::anyhow!("Failed to parse json request: {e}")) - .map(Some) .map_err(ApiError::BadRequest) } diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index e583992a58f9..58ff6e3f83cc 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -367,16 +367,7 @@ paths: $ref: "#/components/schemas/TenantLocationConfigResponse" "409": description: | - The tenant is already known to Pageserver in some way, - and hence this `/attach` call has been rejected. - - Some examples of how this can happen: - - tenant was created on this pageserver - - tenant attachment was started by an earlier call to `/attach`. - - Callers should poll the tenant status's `attachment_status` field, - like for status 202. See the longer description for `POST /attach` - for details. + The tenant is already being modified, perhaps by a concurrent call to this API content: application/json: schema: @@ -762,8 +753,6 @@ components: For example this can be caused by s3 being unreachable. The retry may be implemented with call to detach, though it would be better to not automate it and inspec failed state manually before proceeding with a retry. - - See the tenant `/attach` endpoint for more information. type: object required: - slug diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 450f89820e5b..d6ba9ee35e17 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -31,13 +31,11 @@ use pageserver_api::models::TenantShardLocation; use pageserver_api::models::TenantShardSplitRequest; use pageserver_api::models::TenantShardSplitResponse; use pageserver_api::models::TenantSorting; -use pageserver_api::models::TenantState; use pageserver_api::models::TopTenantShardItem; use pageserver_api::models::TopTenantShardsRequest; use pageserver_api::models::TopTenantShardsResponse; use pageserver_api::models::{ - DownloadRemoteLayersTaskSpawnRequest, LocationConfigMode, TenantAttachRequest, - TenantLocationConfigRequest, + DownloadRemoteLayersTaskSpawnRequest, LocationConfigMode, TenantLocationConfigRequest, }; use pageserver_api::shard::ShardCount; use pageserver_api::shard::TenantShardId; @@ -51,7 +49,6 @@ use utils::auth::JwtAuth; use utils::failpoint_support::failpoints_handler; use utils::http::endpoint::prometheus_metrics_handler; use utils::http::endpoint::request_span; -use utils::http::json::json_request_or_empty_body; use utils::http::request::{get_request_param, must_get_query_param, parse_query_param}; use crate::context::{DownloadBehavior, RequestContext}; @@ -821,58 +818,6 @@ async fn get_timestamp_of_lsn_handler( } } -async fn tenant_attach_handler( - mut request: Request, - _cancel: CancellationToken, -) -> Result, ApiError> { - let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; - check_permission(&request, Some(tenant_id))?; - - let maybe_body: Option = json_request_or_empty_body(&mut request).await?; - let tenant_conf = match &maybe_body { - Some(request) => TenantConfOpt::try_from(&*request.config).map_err(ApiError::BadRequest)?, - None => TenantConfOpt::default(), - }; - - let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn); - - info!("Handling tenant attach {tenant_id}"); - - let state = get_state(&request); - - let generation = get_request_generation(state, maybe_body.as_ref().and_then(|r| r.generation))?; - - let tenant_shard_id = TenantShardId::unsharded(tenant_id); - let shard_params = ShardParameters::default(); - let location_conf = LocationConf::attached_single(tenant_conf, generation, &shard_params); - - let tenant = state - .tenant_manager - .upsert_location(tenant_shard_id, location_conf, None, SpawnMode::Eager, &ctx) - .await?; - - let Some(tenant) = tenant else { - // This should never happen: indicates a bug in upsert_location - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "Upsert succeeded but didn't return tenant!" - ))); - }; - - // We might have successfully constructed a Tenant, but it could still - // end up in a broken state: - if let TenantState::Broken { - reason, - backtrace: _, - } = tenant.current_state() - { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "Tenant state is Broken: {reason}" - ))); - } - - json_response(StatusCode::ACCEPTED, ()) -} - async fn timeline_delete_handler( request: Request, _cancel: CancellationToken, @@ -903,26 +848,6 @@ async fn timeline_delete_handler( json_response(StatusCode::ACCEPTED, ()) } -async fn tenant_detach_handler( - request: Request, - _cancel: CancellationToken, -) -> Result, ApiError> { - let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; - check_permission(&request, Some(tenant_id))?; - // This is a legacy API (`/location_conf` is the replacement). It only supports unsharded tenants - let tenant_shard_id = TenantShardId::unsharded(tenant_id); - - let state = get_state(&request); - let conf = state.conf; - state - .tenant_manager - .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client) - .instrument(info_span!("tenant_detach", %tenant_id, shard_id=%tenant_shard_id.shard_slug())) - .await?; - - json_response(StatusCode::OK, ()) -} - async fn tenant_reset_handler( request: Request, _cancel: CancellationToken, @@ -2711,12 +2636,6 @@ pub fn make_router( .post("/v1/tenant/:tenant_shard_id/timeline", |r| { api_handler(r, timeline_create_handler) }) - .post("/v1/tenant/:tenant_id/attach", |r| { - api_handler(r, tenant_attach_handler) - }) - .post("/v1/tenant/:tenant_id/detach", |r| { - api_handler(r, tenant_detach_handler) - }) .post("/v1/tenant/:tenant_shard_id/reset", |r| { api_handler(r, tenant_reset_handler) }) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 388e0eadc8e9..e329f42dd610 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -1231,6 +1231,13 @@ impl Service { &self, attach_req: AttachHookRequest, ) -> anyhow::Result { + let _tenant_lock = trace_exclusive_lock( + &self.tenant_op_locks, + attach_req.tenant_shard_id.tenant_id, + TenantOperations::ShardSplit, + ) + .await; + // This is a test hook. To enable using it on tenants that were created directly with // the pageserver API (not via this service), we will auto-create any missing tenant // shards with default state. diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index b624c84fad42..84fb1f7cb47d 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2684,7 +2684,6 @@ def tenant_attach( self, tenant_id: TenantId, config: None | Dict[str, Any] = None, - config_null: bool = False, generation: Optional[int] = None, override_storage_controller_generation: bool = False, ): @@ -2702,7 +2701,6 @@ def tenant_attach( return client.tenant_attach( tenant_id, config, - config_null, generation=generation, ) diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 64c7ddee6c8c..2a7cbea20010 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -1,6 +1,5 @@ from __future__ import annotations -import json import time from collections import defaultdict from dataclasses import dataclass @@ -253,39 +252,30 @@ def tenant_attach( self, tenant_id: Union[TenantId, TenantShardId], config: None | Dict[str, Any] = None, - config_null: bool = False, generation: Optional[int] = None, ): - if config_null: - assert config is None - body: Any = None - else: - # null-config is prohibited by the API - config = config or {} - body = {"config": config} - if generation is not None: - body.update({"generation": generation}) + config = config or {} - res = self.post( - f"http://localhost:{self.port}/v1/tenant/{tenant_id}/attach", - data=json.dumps(body), - headers={"Content-Type": "application/json"}, + return self.tenant_location_conf( + tenant_id, + location_conf={ + "mode": "AttachedSingle", + "secondary_conf": None, + "tenant_conf": config, + "generation": generation, + }, ) - self.verbose_error(res) - - def tenant_detach(self, tenant_id: TenantId, detach_ignored=False, timeout_secs=None): - params = {} - if detach_ignored: - params["detach_ignored"] = "true" - - kwargs = {} - if timeout_secs is not None: - kwargs["timeout"] = timeout_secs - res = self.post( - f"http://localhost:{self.port}/v1/tenant/{tenant_id}/detach", params=params, **kwargs + def tenant_detach(self, tenant_id: TenantId): + return self.tenant_location_conf( + tenant_id, + location_conf={ + "mode": "Detached", + "secondary_conf": None, + "tenant_conf": {}, + "generation": None, + }, ) - self.verbose_error(res) def tenant_reset(self, tenant_id: Union[TenantId, TenantShardId], drop_cache: bool): params = {} diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index f4667a82dc33..e117c2140f5e 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -7,7 +7,7 @@ NeonEnv, NeonEnvBuilder, ) -from fixtures.pageserver.http import PageserverApiException, TenantConfig +from fixtures.pageserver.http import TenantConfig from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import wait_until @@ -82,8 +82,8 @@ def test_null_body(negative_env: NegativeTests): tenant_id = negative_env.tenant_id ps_http = env.pageserver.http_client() - res = ps_http.post( - f"{ps_http.base_url}/v1/tenant/{tenant_id}/attach", + res = ps_http.put( + f"{ps_http.base_url}/v1/tenant/{tenant_id}/location_config", data=b"null", headers={"Content-Type": "application/json"}, ) @@ -99,35 +99,16 @@ def test_null_config(negative_env: NegativeTests): tenant_id = negative_env.tenant_id ps_http = env.pageserver.http_client() - res = ps_http.post( - f"{ps_http.base_url}/v1/tenant/{tenant_id}/attach", - data=b'{"config": null}', + res = ps_http.put( + f"{ps_http.base_url}/v1/tenant/{tenant_id}/location_config", + json={"mode": "AttachedSingle", "generation": 1, "tenant_conf": None}, headers={"Content-Type": "application/json"}, ) assert res.status_code == 400 -def test_config_with_unknown_keys_is_bad_request(negative_env: NegativeTests): - """ - If we send a config with unknown keys, the request should be rejected with status 400. - """ - - env = negative_env.neon_env - tenant_id = negative_env.tenant_id - - config_with_unknown_keys = { - "compaction_period": "1h", - "this_key_does_not_exist": "some value", - } - - with pytest.raises(PageserverApiException) as e: - env.pageserver.tenant_attach(tenant_id, config=config_with_unknown_keys) - assert e.type == PageserverApiException - assert e.value.status_code == 400 - - @pytest.mark.parametrize("content_type", [None, "application/json"]) -def test_no_config(positive_env: NeonEnv, content_type: Optional[str]): +def test_empty_config(positive_env: NeonEnv, content_type: Optional[str]): """ When the 'config' body attribute is omitted, the request should be accepted and the tenant should use the default configuration @@ -141,11 +122,13 @@ def test_no_config(positive_env: NeonEnv, content_type: Optional[str]): ps_http.tenant_detach(tenant_id) assert tenant_id not in [TenantId(t["id"]) for t in ps_http.tenant_list()] - body = {"generation": env.storage_controller.attach_hook_issue(tenant_id, env.pageserver.id)} - - ps_http.post( - f"{ps_http.base_url}/v1/tenant/{tenant_id}/attach", - json=body, + ps_http.put( + f"{ps_http.base_url}/v1/tenant/{tenant_id}/location_config", + json={ + "mode": "AttachedSingle", + "generation": env.storage_controller.attach_hook_issue(tenant_id, env.pageserver.id), + "tenant_conf": {}, + }, headers=None if content_type else {"Content-Type": "application/json"}, ).raise_for_status() diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index 7f79bf5d5cab..b26bd3422f30 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -164,13 +164,14 @@ def test_remote_storage_backup_and_restore( "data": {"reason": "storage-sync-list-remote-timelines"}, } + # Even though the tenant is broken, subsequent calls to location_conf API will succeed, but + # the tenant will always end up in a broken state as a result of the failpoint. # Ensure that even though the tenant is broken, retrying the attachment fails - with pytest.raises(Exception, match="Tenant state is Broken"): - # Use same generation as in previous attempt - gen_state = env.storage_controller.inspect(tenant_id) - assert gen_state is not None - generation = gen_state[0] - env.pageserver.tenant_attach(tenant_id, generation=generation) + tenant_info = wait_until_tenant_state(pageserver_http, tenant_id, "Broken", 15) + gen_state = env.storage_controller.inspect(tenant_id) + assert gen_state is not None + generation = gen_state[0] + env.pageserver.tenant_attach(tenant_id, generation=generation) # Restart again, this implicitly clears the failpoint. # test_remote_failures=1 remains active, though, as it's in the pageserver config. diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 4c49e6fb856c..2056840558e6 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -275,16 +275,6 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS) - # first check for non existing tenant - tenant_id = TenantId.generate() - with pytest.raises( - expected_exception=PageserverApiException, - match=f"NotFound: tenant {tenant_id}", - ) as excinfo: - pageserver_http.tenant_detach(tenant_id) - - assert excinfo.value.status_code == 404 - # create new nenant tenant_id, timeline_id = env.neon_cli.create_tenant() @@ -344,44 +334,6 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): pageserver_http.timeline_gc(tenant_id, timeline_id, 0) -# Creates a tenant, and detaches it with extra paremeter that forces ignored tenant detach. -# Tenant should be detached without issues. -def test_tenant_detach_regular_tenant(neon_simple_env: NeonEnv): - env = neon_simple_env - client = env.pageserver.http_client() - - # create a new tenant - tenant_id, _ = env.neon_cli.create_tenant() - - env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS) - - # assert tenant exists on disk - assert env.pageserver.tenant_dir(tenant_id).exists() - - endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) - # we rely upon autocommit after each statement - endpoint.safe_psql_many( - queries=[ - "CREATE TABLE t(key int primary key, value text)", - "INSERT INTO t SELECT generate_series(1,100000), 'payload'", - ] - ) - - log.info("detaching regular tenant with detach ignored flag") - client.tenant_detach(tenant_id, True) - - log.info("regular tenant detached without error") - - # check that nothing is left on disk for deleted tenant - assert not env.pageserver.tenant_dir(tenant_id).exists() - - # assert the tenant does not exists in the Pageserver - tenants_after_detach = [tenant["id"] for tenant in client.tenant_list()] - assert ( - tenant_id not in tenants_after_detach - ), f"Ignored and then detached tenant {tenant_id} should not be present in pageserver's memory" - - def test_detach_while_attaching( neon_env_builder: NeonEnvBuilder, ): diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index 3110833563cd..f47356839c26 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -840,7 +840,7 @@ def all_active(): # Detaching a stuck tenant should proceed promptly # (reproducer for https://github.com/neondatabase/neon/pull/6430) - env.pageserver.http_client().tenant_detach(detach_tenant_id, timeout_secs=10) + env.pageserver.http_client().tenant_detach(detach_tenant_id) tenant_ids.remove(detach_tenant_id) # FIXME: currently the mechanism for cancelling attach is to set state to broken, which is reported spuriously at error level env.pageserver.allowed_errors.append( diff --git a/test_runner/regress/test_walredo_not_left_behind_on_detach.py b/test_runner/regress/test_walredo_not_left_behind_on_detach.py index ad37807dba21..375cfcb4feb0 100644 --- a/test_runner/regress/test_walredo_not_left_behind_on_detach.py +++ b/test_runner/regress/test_walredo_not_left_behind_on_detach.py @@ -37,7 +37,7 @@ def test_walredo_not_left_behind_on_detach(neon_env_builder: NeonEnvBuilder): expected_exception=PageserverApiException, match=f"NotFound: tenant {tenant_id}", ): - pageserver_http.tenant_detach(tenant_id) + pageserver_http.tenant_status(tenant_id) # create new nenant tenant_id, _ = env.neon_cli.create_tenant() From cd9a550d97f5863f8c123e66d08fed6360a8c771 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 25 Jun 2024 20:03:27 +0200 Subject: [PATCH 19/57] clippy-deny the `todo!()` macro (#4340) `todo!()` shouldn't slip into prod code --- .neon_clippy_args | 3 ++- libs/walproposer/src/walproposer.rs | 2 ++ trace/src/main.rs | 8 -------- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.neon_clippy_args b/.neon_clippy_args index 25e09c61a67d..4db32cf35c07 100644 --- a/.neon_clippy_args +++ b/.neon_clippy_args @@ -1,4 +1,5 @@ # * `-A unknown_lints` – do not warn about unknown lint suppressions # that people with newer toolchains might use # * `-D warnings` - fail on any warnings (`cargo` returns non-zero exit status) -export CLIPPY_COMMON_ARGS="--locked --workspace --all-targets -- -A unknown_lints -D warnings" +# * `-D clippy::todo` - don't let `todo!()` slip into `main` +export CLIPPY_COMMON_ARGS="--locked --workspace --all-targets -- -A unknown_lints -D warnings -D clippy::todo" diff --git a/libs/walproposer/src/walproposer.rs b/libs/walproposer/src/walproposer.rs index f7b72b205f9d..37b1e0fa873b 100644 --- a/libs/walproposer/src/walproposer.rs +++ b/libs/walproposer/src/walproposer.rs @@ -1,3 +1,5 @@ +#![allow(clippy::todo)] + use std::ffi::CString; use crate::{ diff --git a/trace/src/main.rs b/trace/src/main.rs index 049f922b6fb2..79e1df988dae 100644 --- a/trace/src/main.rs +++ b/trace/src/main.rs @@ -38,12 +38,6 @@ enum Command { /// Print stats and anomalies about the traces Analyze, - - /// Draw the traces in svg format - Draw, - - /// Send the read requests to a pageserver - Replay, } // HACK This function will change and improve as we see what kind of analysis is useful. @@ -167,8 +161,6 @@ fn main() -> anyhow::Result<()> { analyze_trace(reader); } } - Command::Draw => todo!(), - Command::Replay => todo!(), } Ok(()) From 6c5d3b52634a3bb49f48eae24a9204cea67a7e77 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Tue, 25 Jun 2024 19:07:54 +0100 Subject: [PATCH 20/57] proxy fix wake compute console retry (#8141) ## Problem 1. Proxy is retrying errors from cplane that shouldn't be retried 2. ~~Proxy is not using the retry_after_ms value~~ ## Summary of changes 1. Correct the could_retry impl for ConsoleError. 2. ~~Update could_retry interface to support returning a fixed wait duration.~~ --- proxy/src/console/messages.rs | 111 ++++++++++++++++++----------- proxy/src/console/provider.rs | 48 ++++++++----- proxy/src/proxy/connect_compute.rs | 20 +++--- proxy/src/proxy/retry.rs | 54 ++++++++------ proxy/src/proxy/tests.rs | 25 +++++-- proxy/src/proxy/wake_compute.rs | 89 ++++++----------------- proxy/src/serverless/backend.rs | 13 ++-- 7 files changed, 193 insertions(+), 167 deletions(-) diff --git a/proxy/src/console/messages.rs b/proxy/src/console/messages.rs index 3b7d681a41d1..d28d13ba692b 100644 --- a/proxy/src/console/messages.rs +++ b/proxy/src/console/messages.rs @@ -5,7 +5,7 @@ use std::fmt::{self, Display}; use crate::auth::IpPattern; use crate::intern::{BranchIdInt, EndpointIdInt, ProjectIdInt}; -use crate::proxy::retry::ShouldRetry; +use crate::proxy::retry::CouldRetry; /// Generic error response with human-readable description. /// Note that we can't always present it to user as is. @@ -64,45 +64,47 @@ impl Display for ConsoleError { } } -impl ShouldRetry for ConsoleError { +impl CouldRetry for ConsoleError { fn could_retry(&self) -> bool { - if self.status.is_none() || self.status.as_ref().unwrap().details.retry_info.is_none() { - // retry some temporary failures because the compute was in a bad state - // (bad request can be returned when the endpoint was in transition) - return match &self { - ConsoleError { - http_status_code: http::StatusCode::BAD_REQUEST, - .. - } => true, - // don't retry when quotas are exceeded - ConsoleError { - http_status_code: http::StatusCode::UNPROCESSABLE_ENTITY, - ref error, - .. - } => !error.contains("compute time quota of non-primary branches is exceeded"), - // locked can be returned when the endpoint was in transition - // or when quotas are exceeded. don't retry when quotas are exceeded - ConsoleError { - http_status_code: http::StatusCode::LOCKED, - ref error, - .. - } => { - !error.contains("quota exceeded") - && !error.contains("the limit for current plan reached") - } - _ => false, - }; + // If the error message does not have a status, + // the error is unknown and probably should not retry automatically + let Some(status) = &self.status else { + return false; + }; + + // retry if the retry info is set. + if status.details.retry_info.is_some() { + return true; } - // retry if the response has a retry delay - if let Some(retry_info) = self - .status - .as_ref() - .and_then(|s| s.details.retry_info.as_ref()) - { - retry_info.retry_delay_ms > 0 - } else { - false + // if no retry info set, attempt to use the error code to guess the retry state. + let reason = status + .details + .error_info + .map_or(Reason::Unknown, |e| e.reason); + match reason { + // not a transitive error + Reason::RoleProtected => false, + // on retry, it will still not be found + Reason::ResourceNotFound + | Reason::ProjectNotFound + | Reason::EndpointNotFound + | Reason::BranchNotFound => false, + // we were asked to go away + Reason::RateLimitExceeded + | Reason::NonDefaultBranchComputeTimeExceeded + | Reason::ActiveTimeQuotaExceeded + | Reason::ComputeTimeQuotaExceeded + | Reason::WrittenDataQuotaExceeded + | Reason::DataTransferQuotaExceeded + | Reason::LogicalSizeQuotaExceeded => false, + // transitive error. control plane is currently busy + // but might be ready soon + Reason::RunningOperations => true, + Reason::ConcurrencyLimitReached => true, + Reason::LockAlreadyTaken => true, + // unknown error. better not retry it. + Reason::Unknown => false, } } } @@ -121,7 +123,7 @@ pub struct Details { pub user_facing_message: Option, } -#[derive(Debug, Deserialize)] +#[derive(Copy, Clone, Debug, Deserialize)] pub struct ErrorInfo { pub reason: Reason, // Schema could also have `metadata` field, but it's not structured. Skip it for now. @@ -129,30 +131,59 @@ pub struct ErrorInfo { #[derive(Clone, Copy, Debug, Deserialize, Default)] pub enum Reason { + /// RoleProtected indicates that the role is protected and the attempted operation is not permitted on protected roles. #[serde(rename = "ROLE_PROTECTED")] RoleProtected, + /// ResourceNotFound indicates that a resource (project, endpoint, branch, etc.) wasn't found, + /// usually due to the provided ID not being correct or because the subject doesn't have enough permissions to + /// access the requested resource. + /// Prefer a more specific reason if possible, e.g., ProjectNotFound, EndpointNotFound, etc. #[serde(rename = "RESOURCE_NOT_FOUND")] ResourceNotFound, + /// ProjectNotFound indicates that the project wasn't found, usually due to the provided ID not being correct, + /// or that the subject doesn't have enough permissions to access the requested project. #[serde(rename = "PROJECT_NOT_FOUND")] ProjectNotFound, + /// EndpointNotFound indicates that the endpoint wasn't found, usually due to the provided ID not being correct, + /// or that the subject doesn't have enough permissions to access the requested endpoint. #[serde(rename = "ENDPOINT_NOT_FOUND")] EndpointNotFound, + /// BranchNotFound indicates that the branch wasn't found, usually due to the provided ID not being correct, + /// or that the subject doesn't have enough permissions to access the requested branch. #[serde(rename = "BRANCH_NOT_FOUND")] BranchNotFound, + /// RateLimitExceeded indicates that the rate limit for the operation has been exceeded. #[serde(rename = "RATE_LIMIT_EXCEEDED")] RateLimitExceeded, + /// NonDefaultBranchComputeTimeExceeded indicates that the compute time quota of non-default branches has been + /// exceeded. #[serde(rename = "NON_PRIMARY_BRANCH_COMPUTE_TIME_EXCEEDED")] - NonPrimaryBranchComputeTimeExceeded, + NonDefaultBranchComputeTimeExceeded, + /// ActiveTimeQuotaExceeded indicates that the active time quota was exceeded. #[serde(rename = "ACTIVE_TIME_QUOTA_EXCEEDED")] ActiveTimeQuotaExceeded, + /// ComputeTimeQuotaExceeded indicates that the compute time quota was exceeded. #[serde(rename = "COMPUTE_TIME_QUOTA_EXCEEDED")] ComputeTimeQuotaExceeded, + /// WrittenDataQuotaExceeded indicates that the written data quota was exceeded. #[serde(rename = "WRITTEN_DATA_QUOTA_EXCEEDED")] WrittenDataQuotaExceeded, + /// DataTransferQuotaExceeded indicates that the data transfer quota was exceeded. #[serde(rename = "DATA_TRANSFER_QUOTA_EXCEEDED")] DataTransferQuotaExceeded, + /// LogicalSizeQuotaExceeded indicates that the logical size quota was exceeded. #[serde(rename = "LOGICAL_SIZE_QUOTA_EXCEEDED")] LogicalSizeQuotaExceeded, + /// RunningOperations indicates that the project already has some running operations + /// and scheduling of new ones is prohibited. + #[serde(rename = "RUNNING_OPERATIONS")] + RunningOperations, + /// ConcurrencyLimitReached indicates that the concurrency limit for an action was reached. + #[serde(rename = "CONCURRENCY_LIMIT_REACHED")] + ConcurrencyLimitReached, + /// LockAlreadyTaken indicates that the we attempted to take a lock that was already taken. + #[serde(rename = "LOCK_ALREADY_TAKEN")] + LockAlreadyTaken, #[default] #[serde(other)] Unknown, @@ -170,7 +201,7 @@ impl Reason { } } -#[derive(Debug, Deserialize)] +#[derive(Copy, Clone, Debug, Deserialize)] pub struct RetryInfo { pub retry_delay_ms: u64, } diff --git a/proxy/src/console/provider.rs b/proxy/src/console/provider.rs index 915c2ee7a64e..bec55a83435f 100644 --- a/proxy/src/console/provider.rs +++ b/proxy/src/console/provider.rs @@ -25,9 +25,9 @@ use tracing::info; pub mod errors { use crate::{ - console::messages::{self, ConsoleError}, + console::messages::{self, ConsoleError, Reason}, error::{io_error, ReportableError, UserFacingError}, - proxy::retry::ShouldRetry, + proxy::retry::CouldRetry, }; use thiserror::Error; @@ -76,21 +76,22 @@ pub mod errors { ApiError::Console(e) => { use crate::error::ErrorKind::*; match e.get_reason() { - crate::console::messages::Reason::RoleProtected => User, - crate::console::messages::Reason::ResourceNotFound => User, - crate::console::messages::Reason::ProjectNotFound => User, - crate::console::messages::Reason::EndpointNotFound => User, - crate::console::messages::Reason::BranchNotFound => User, - crate::console::messages::Reason::RateLimitExceeded => ServiceRateLimit, - crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { - User - } - crate::console::messages::Reason::ActiveTimeQuotaExceeded => User, - crate::console::messages::Reason::ComputeTimeQuotaExceeded => User, - crate::console::messages::Reason::WrittenDataQuotaExceeded => User, - crate::console::messages::Reason::DataTransferQuotaExceeded => User, - crate::console::messages::Reason::LogicalSizeQuotaExceeded => User, - crate::console::messages::Reason::Unknown => match &e { + Reason::RoleProtected => User, + Reason::ResourceNotFound => User, + Reason::ProjectNotFound => User, + Reason::EndpointNotFound => User, + Reason::BranchNotFound => User, + Reason::RateLimitExceeded => ServiceRateLimit, + Reason::NonDefaultBranchComputeTimeExceeded => User, + Reason::ActiveTimeQuotaExceeded => User, + Reason::ComputeTimeQuotaExceeded => User, + Reason::WrittenDataQuotaExceeded => User, + Reason::DataTransferQuotaExceeded => User, + Reason::LogicalSizeQuotaExceeded => User, + Reason::ConcurrencyLimitReached => ControlPlane, + Reason::LockAlreadyTaken => ControlPlane, + Reason::RunningOperations => ControlPlane, + Reason::Unknown => match &e { ConsoleError { http_status_code: http::StatusCode::NOT_FOUND | http::StatusCode::NOT_ACCEPTABLE, @@ -128,7 +129,7 @@ pub mod errors { } } - impl ShouldRetry for ApiError { + impl CouldRetry for ApiError { fn could_retry(&self) -> bool { match self { // retry some transport errors @@ -239,6 +240,17 @@ pub mod errors { } } } + + impl CouldRetry for WakeComputeError { + fn could_retry(&self) -> bool { + match self { + WakeComputeError::BadComputeAddress(_) => false, + WakeComputeError::ApiError(e) => e.could_retry(), + WakeComputeError::TooManyConnections => false, + WakeComputeError::TooManyConnectionAttempts(_) => false, + } + } + } } /// Auth secret which is managed by the cloud. diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index 409d45b39a34..82180aaee3cf 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -7,7 +7,7 @@ use crate::{ error::ReportableError, metrics::{ConnectOutcome, ConnectionFailureKind, Metrics, RetriesMetricGroup, RetryType}, proxy::{ - retry::{retry_after, ShouldRetry}, + retry::{retry_after, should_retry, CouldRetry}, wake_compute::wake_compute, }, Host, @@ -17,6 +17,8 @@ use pq_proto::StartupMessageParams; use tokio::time; use tracing::{error, info, warn}; +use super::retry::ShouldRetryWakeCompute; + const CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(2); /// If we couldn't connect, a cached connection info might be to blame @@ -104,7 +106,7 @@ pub async fn connect_to_compute( connect_to_compute_retry_config: RetryConfig, ) -> Result where - M::ConnectError: ShouldRetry + std::fmt::Debug, + M::ConnectError: CouldRetry + ShouldRetryWakeCompute + std::fmt::Debug, M::Error: From, { let mut num_retries = 0; @@ -139,10 +141,10 @@ where error!(error = ?err, "could not connect to compute node"); - let node_info = if !node_info.cached() || !err.should_retry_database_address() { + let node_info = if !node_info.cached() || !err.should_retry_wake_compute() { // If we just recieved this from cplane and dodn't get it from cache, we shouldn't retry. // Do not need to retrieve a new node_info, just return the old one. - if !err.should_retry(num_retries, connect_to_compute_retry_config) { + if should_retry(&err, num_retries, connect_to_compute_retry_config) { Metrics::get().proxy.retries_metric.observe( RetriesMetricGroup { outcome: ConnectOutcome::Failed, @@ -188,9 +190,8 @@ where return Ok(res); } Err(e) => { - let retriable = e.should_retry(num_retries, connect_to_compute_retry_config); - if !retriable { - error!(error = ?e, num_retries, retriable, "couldn't connect to compute node"); + if !should_retry(&e, num_retries, connect_to_compute_retry_config) { + error!(error = ?e, num_retries, retriable = false, "couldn't connect to compute node"); Metrics::get().proxy.retries_metric.observe( RetriesMetricGroup { outcome: ConnectOutcome::Failed, @@ -200,9 +201,10 @@ where ); return Err(e.into()); } - warn!(error = ?e, num_retries, retriable, "couldn't connect to compute node"); + + warn!(error = ?e, num_retries, retriable = true, "couldn't connect to compute node"); } - } + }; let wait_duration = retry_after(num_retries, connect_to_compute_retry_config); num_retries += 1; diff --git a/proxy/src/proxy/retry.rs b/proxy/src/proxy/retry.rs index 8dec1f1137a2..644b183a9183 100644 --- a/proxy/src/proxy/retry.rs +++ b/proxy/src/proxy/retry.rs @@ -2,20 +2,22 @@ use crate::{compute, config::RetryConfig}; use std::{error::Error, io}; use tokio::time; -pub trait ShouldRetry { +pub trait CouldRetry { + /// Returns true if the error could be retried fn could_retry(&self) -> bool; - fn should_retry(&self, num_retries: u32, config: RetryConfig) -> bool { - match self { - _ if num_retries >= config.max_retries => false, - err => err.could_retry(), - } - } - fn should_retry_database_address(&self) -> bool { - true - } } -impl ShouldRetry for io::Error { +pub trait ShouldRetryWakeCompute { + /// Returns true if we need to invalidate the cache for this node. + /// If false, we can continue retrying with the current node cache. + fn should_retry_wake_compute(&self) -> bool; +} + +pub fn should_retry(err: &impl CouldRetry, num_retries: u32, config: RetryConfig) -> bool { + num_retries < config.max_retries && err.could_retry() +} + +impl CouldRetry for io::Error { fn could_retry(&self) -> bool { use std::io::ErrorKind; matches!( @@ -25,7 +27,7 @@ impl ShouldRetry for io::Error { } } -impl ShouldRetry for tokio_postgres::error::DbError { +impl CouldRetry for tokio_postgres::error::DbError { fn could_retry(&self) -> bool { use tokio_postgres::error::SqlState; matches!( @@ -36,7 +38,9 @@ impl ShouldRetry for tokio_postgres::error::DbError { | &SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, ) } - fn should_retry_database_address(&self) -> bool { +} +impl ShouldRetryWakeCompute for tokio_postgres::error::DbError { + fn should_retry_wake_compute(&self) -> bool { use tokio_postgres::error::SqlState; // Here are errors that happens after the user successfully authenticated to the database. // TODO: there are pgbouncer errors that should be retried, but they are not listed here. @@ -53,7 +57,7 @@ impl ShouldRetry for tokio_postgres::error::DbError { } } -impl ShouldRetry for tokio_postgres::Error { +impl CouldRetry for tokio_postgres::Error { fn could_retry(&self) -> bool { if let Some(io_err) = self.source().and_then(|x| x.downcast_ref()) { io::Error::could_retry(io_err) @@ -63,29 +67,33 @@ impl ShouldRetry for tokio_postgres::Error { false } } - fn should_retry_database_address(&self) -> bool { - if let Some(io_err) = self.source().and_then(|x| x.downcast_ref()) { - io::Error::should_retry_database_address(io_err) - } else if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) { - tokio_postgres::error::DbError::should_retry_database_address(db_err) +} +impl ShouldRetryWakeCompute for tokio_postgres::Error { + fn should_retry_wake_compute(&self) -> bool { + if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) { + tokio_postgres::error::DbError::should_retry_wake_compute(db_err) } else { + // likely an IO error. Possible the compute has shutdown and the + // cache is stale. true } } } -impl ShouldRetry for compute::ConnectionError { +impl CouldRetry for compute::ConnectionError { fn could_retry(&self) -> bool { match self { compute::ConnectionError::Postgres(err) => err.could_retry(), compute::ConnectionError::CouldNotConnect(err) => err.could_retry(), + compute::ConnectionError::WakeComputeError(err) => err.could_retry(), _ => false, } } - fn should_retry_database_address(&self) -> bool { +} +impl ShouldRetryWakeCompute for compute::ConnectionError { + fn should_retry_wake_compute(&self) -> bool { match self { - compute::ConnectionError::Postgres(err) => err.should_retry_database_address(), - compute::ConnectionError::CouldNotConnect(err) => err.should_retry_database_address(), + compute::ConnectionError::Postgres(err) => err.should_retry_wake_compute(), // the cache entry was not checked for validity compute::ConnectionError::TooManyConnectionAttempts(_) => false, _ => true, diff --git a/proxy/src/proxy/tests.rs b/proxy/src/proxy/tests.rs index 96683511fec5..8119f39fae6b 100644 --- a/proxy/src/proxy/tests.rs +++ b/proxy/src/proxy/tests.rs @@ -5,21 +5,21 @@ mod mitm; use std::time::Duration; use super::connect_compute::ConnectMechanism; -use super::retry::ShouldRetry; +use super::retry::CouldRetry; use super::*; use crate::auth::backend::{ ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo, MaybeOwned, TestBackend, }; use crate::config::{CertResolver, RetryConfig}; use crate::console::caches::NodeInfoCache; -use crate::console::messages::{ConsoleError, MetricsAuxInfo}; +use crate::console::messages::{ConsoleError, Details, MetricsAuxInfo, Status}; use crate::console::provider::{CachedAllowedIps, CachedRoleSecret, ConsoleBackend}; use crate::console::{self, CachedNodeInfo, NodeInfo}; use crate::error::ErrorKind; -use crate::proxy::retry::retry_after; use crate::{http, sasl, scram, BranchId, EndpointId, ProjectId}; use anyhow::{bail, Context}; use async_trait::async_trait; +use retry::{retry_after, ShouldRetryWakeCompute}; use rstest::rstest; use rustls::pki_types; use tokio_postgres::config::SslMode; @@ -438,11 +438,16 @@ impl std::fmt::Display for TestConnectError { impl std::error::Error for TestConnectError {} -impl ShouldRetry for TestConnectError { +impl CouldRetry for TestConnectError { fn could_retry(&self) -> bool { self.retryable } } +impl ShouldRetryWakeCompute for TestConnectError { + fn should_retry_wake_compute(&self) -> bool { + true + } +} #[async_trait] impl ConnectMechanism for TestConnectMechanism { @@ -485,7 +490,7 @@ impl TestBackend for TestConnectMechanism { ConnectAction::Wake => Ok(helper_create_cached_node_info(self.cache)), ConnectAction::WakeFail => { let err = console::errors::ApiError::Console(ConsoleError { - http_status_code: http::StatusCode::FORBIDDEN, + http_status_code: http::StatusCode::BAD_REQUEST, error: "TEST".into(), status: None, }); @@ -496,7 +501,15 @@ impl TestBackend for TestConnectMechanism { let err = console::errors::ApiError::Console(ConsoleError { http_status_code: http::StatusCode::BAD_REQUEST, error: "TEST".into(), - status: None, + status: Some(Status { + code: "error".into(), + message: "error".into(), + details: Details { + error_info: None, + retry_info: Some(console::messages::RetryInfo { retry_delay_ms: 1 }), + user_facing_message: None, + }, + }), }); assert!(err.could_retry()); Err(console::errors::WakeComputeError::ApiError(err)) diff --git a/proxy/src/proxy/wake_compute.rs b/proxy/src/proxy/wake_compute.rs index c166cf4389dc..fef349aac04e 100644 --- a/proxy/src/proxy/wake_compute.rs +++ b/proxy/src/proxy/wake_compute.rs @@ -1,18 +1,16 @@ use crate::config::RetryConfig; -use crate::console::messages::ConsoleError; +use crate::console::messages::{ConsoleError, Reason}; use crate::console::{errors::WakeComputeError, provider::CachedNodeInfo}; use crate::context::RequestMonitoring; use crate::metrics::{ ConnectOutcome, ConnectionFailuresBreakdownGroup, Metrics, RetriesMetricGroup, RetryType, WakeupFailureKind, }; -use crate::proxy::retry::retry_after; +use crate::proxy::retry::{retry_after, should_retry}; use hyper1::StatusCode; -use std::ops::ControlFlow; use tracing::{error, info, warn}; use super::connect_compute::ComputeConnectBackend; -use super::retry::ShouldRetry; pub async fn wake_compute( num_retries: &mut u32, @@ -22,9 +20,8 @@ pub async fn wake_compute( ) -> Result { let retry_type = RetryType::WakeCompute; loop { - let wake_res = api.wake_compute(ctx).await; - match handle_try_wake(wake_res, *num_retries, config) { - Err(e) => { + match api.wake_compute(ctx).await { + Err(e) if !should_retry(&e, *num_retries, config) => { error!(error = ?e, num_retries, retriable = false, "couldn't wake compute node"); report_error(&e, false); Metrics::get().proxy.retries_metric.observe( @@ -36,11 +33,11 @@ pub async fn wake_compute( ); return Err(e); } - Ok(ControlFlow::Continue(e)) => { + Err(e) => { warn!(error = ?e, num_retries, retriable = true, "couldn't wake compute node"); report_error(&e, true); } - Ok(ControlFlow::Break(n)) => { + Ok(n) => { Metrics::get().proxy.retries_metric.observe( RetriesMetricGroup { outcome: ConnectOutcome::Success, @@ -63,70 +60,28 @@ pub async fn wake_compute( } } -/// Attempts to wake up the compute node. -/// * Returns Ok(Continue(e)) if there was an error waking but retries are acceptable -/// * Returns Ok(Break(node)) if the wakeup succeeded -/// * Returns Err(e) if there was an error -pub fn handle_try_wake( - result: Result, - num_retries: u32, - config: RetryConfig, -) -> Result, WakeComputeError> { - match result { - Err(err) => match &err { - WakeComputeError::ApiError(api) if api.should_retry(num_retries, config) => { - Ok(ControlFlow::Continue(err)) - } - _ => Err(err), - }, - // Ready to try again. - Ok(new) => Ok(ControlFlow::Break(new)), - } -} - fn report_error(e: &WakeComputeError, retry: bool) { use crate::console::errors::ApiError; let kind = match e { WakeComputeError::BadComputeAddress(_) => WakeupFailureKind::BadComputeAddress, WakeComputeError::ApiError(ApiError::Transport(_)) => WakeupFailureKind::ApiTransportError, WakeComputeError::ApiError(ApiError::Console(e)) => match e.get_reason() { - crate::console::messages::Reason::RoleProtected => { - WakeupFailureKind::ApiConsoleBadRequest - } - crate::console::messages::Reason::ResourceNotFound => { - WakeupFailureKind::ApiConsoleBadRequest - } - crate::console::messages::Reason::ProjectNotFound => { - WakeupFailureKind::ApiConsoleBadRequest - } - crate::console::messages::Reason::EndpointNotFound => { - WakeupFailureKind::ApiConsoleBadRequest - } - crate::console::messages::Reason::BranchNotFound => { - WakeupFailureKind::ApiConsoleBadRequest - } - crate::console::messages::Reason::RateLimitExceeded => { - WakeupFailureKind::ApiConsoleLocked - } - crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::ActiveTimeQuotaExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::ComputeTimeQuotaExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::WrittenDataQuotaExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::DataTransferQuotaExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::LogicalSizeQuotaExceeded => { - WakeupFailureKind::QuotaExceeded - } - crate::console::messages::Reason::Unknown => match e { + Reason::RoleProtected => WakeupFailureKind::ApiConsoleBadRequest, + Reason::ResourceNotFound => WakeupFailureKind::ApiConsoleBadRequest, + Reason::ProjectNotFound => WakeupFailureKind::ApiConsoleBadRequest, + Reason::EndpointNotFound => WakeupFailureKind::ApiConsoleBadRequest, + Reason::BranchNotFound => WakeupFailureKind::ApiConsoleBadRequest, + Reason::RateLimitExceeded => WakeupFailureKind::ApiConsoleLocked, + Reason::NonDefaultBranchComputeTimeExceeded => WakeupFailureKind::QuotaExceeded, + Reason::ActiveTimeQuotaExceeded => WakeupFailureKind::QuotaExceeded, + Reason::ComputeTimeQuotaExceeded => WakeupFailureKind::QuotaExceeded, + Reason::WrittenDataQuotaExceeded => WakeupFailureKind::QuotaExceeded, + Reason::DataTransferQuotaExceeded => WakeupFailureKind::QuotaExceeded, + Reason::LogicalSizeQuotaExceeded => WakeupFailureKind::QuotaExceeded, + Reason::ConcurrencyLimitReached => WakeupFailureKind::ApiConsoleLocked, + Reason::LockAlreadyTaken => WakeupFailureKind::ApiConsoleLocked, + Reason::RunningOperations => WakeupFailureKind::ApiConsoleLocked, + Reason::Unknown => match e { ConsoleError { http_status_code: StatusCode::LOCKED, ref error, diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 05d60612385c..6c34d48338b3 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -16,7 +16,10 @@ use crate::{ context::RequestMonitoring, error::{ErrorKind, ReportableError, UserFacingError}, intern::EndpointIdInt, - proxy::{connect_compute::ConnectMechanism, retry::ShouldRetry}, + proxy::{ + connect_compute::ConnectMechanism, + retry::{CouldRetry, ShouldRetryWakeCompute}, + }, rate_limiter::EndpointRateLimiter, Host, }; @@ -179,7 +182,7 @@ impl UserFacingError for HttpConnError { } } -impl ShouldRetry for HttpConnError { +impl CouldRetry for HttpConnError { fn could_retry(&self) -> bool { match self { HttpConnError::ConnectionError(e) => e.could_retry(), @@ -190,9 +193,11 @@ impl ShouldRetry for HttpConnError { HttpConnError::TooManyConnectionAttempts(_) => false, } } - fn should_retry_database_address(&self) -> bool { +} +impl ShouldRetryWakeCompute for HttpConnError { + fn should_retry_wake_compute(&self) -> bool { match self { - HttpConnError::ConnectionError(e) => e.should_retry_database_address(), + HttpConnError::ConnectionError(e) => e.should_retry_wake_compute(), // we never checked cache validity HttpConnError::TooManyConnectionAttempts(_) => false, _ => true, From 76864e6a2a67f1ae0480bffafbad7114d77c1826 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 25 Jun 2024 16:49:29 -0400 Subject: [PATCH 21/57] feat(pageserver): add image layer iterator (#8006) part of https://github.com/neondatabase/neon/issues/8002 ## Summary of changes This pull request adds the image layer iterator. It buffers a fixed amount of key-value pairs in memory, and give developer an iterator abstraction over the image layer. Once the buffer is exhausted, it will issue 1 I/O to fetch the next batch. Due to the Rust lifetime mysteries, the `get_stream_from` API has been refactored to `into_stream` and consumes `self`. Delta layer iterator implementation will be similar, therefore I'll add it after this pull request gets merged. --------- Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/key.rs | 7 +- pageserver/src/tenant/block_io.rs | 1 + pageserver/src/tenant/disk_btree.rs | 32 ++- .../src/tenant/storage_layer/delta_layer.rs | 28 +-- .../src/tenant/storage_layer/image_layer.rs | 209 +++++++++++++++++- pageserver/src/tenant/storage_layer/layer.rs | 14 +- pageserver/src/tenant/vectored_blob_io.rs | 110 ++++++++- 7 files changed, 363 insertions(+), 38 deletions(-) diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index 997c1cc43ad7..cd430bfab7d4 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -160,8 +160,9 @@ impl Key { key } - /// Convert a 18B slice to a key. This function should not be used for metadata keys because field2 is handled differently. - /// Use [`Key::from_i128`] instead if you want to handle 16B keys (i.e., metadata keys). + /// Convert a 18B slice to a key. This function should not be used for 16B metadata keys because `field2` is handled differently. + /// Use [`Key::from_i128`] instead if you want to handle 16B keys (i.e., metadata keys). There are some restrictions on `field2`, + /// and therefore not all 18B slices are valid page server keys. pub fn from_slice(b: &[u8]) -> Self { Key { field1: b[0], @@ -173,7 +174,7 @@ impl Key { } } - /// Convert a key to a 18B slice. This function should not be used for metadata keys because field2 is handled differently. + /// Convert a key to a 18B slice. This function should not be used for getting a 16B metadata key because `field2` is handled differently. /// Use [`Key::to_i128`] instead if you want to get a 16B key (i.e., metadata keys). pub fn write_to_byte_slice(&self, buf: &mut [u8]) { buf[0] = self.field1; diff --git a/pageserver/src/tenant/block_io.rs b/pageserver/src/tenant/block_io.rs index 92928116c1f6..b406d5033243 100644 --- a/pageserver/src/tenant/block_io.rs +++ b/pageserver/src/tenant/block_io.rs @@ -160,6 +160,7 @@ impl<'a> BlockCursor<'a> { /// /// The file is assumed to be immutable. This doesn't provide any functions /// for modifying the file, nor for invalidating the cache if it is modified. +#[derive(Clone)] pub struct FileBlockReader<'a> { pub file: &'a VirtualFile, diff --git a/pageserver/src/tenant/disk_btree.rs b/pageserver/src/tenant/disk_btree.rs index 119df3e6c408..b76498b60859 100644 --- a/pageserver/src/tenant/disk_btree.rs +++ b/pageserver/src/tenant/disk_btree.rs @@ -212,6 +212,7 @@ impl<'a, const L: usize> OnDiskNode<'a, L> { /// /// Public reader object, to search the tree. /// +#[derive(Clone)] pub struct DiskBtreeReader where R: BlockReader, @@ -259,27 +260,38 @@ where Ok(result) } - pub fn iter<'a>( - &'a self, - start_key: &'a [u8; L], - ctx: &'a RequestContext, - ) -> DiskBtreeIterator<'a> { + pub fn iter<'a>(self, start_key: &'a [u8; L], ctx: &'a RequestContext) -> DiskBtreeIterator<'a> + where + R: 'a, + { DiskBtreeIterator { - stream: Box::pin(self.get_stream_from(start_key, ctx)), + stream: Box::pin(self.into_stream(start_key, ctx)), } } /// Return a stream which yields all key, value pairs from the index /// starting from the first key greater or equal to `start_key`. /// - /// Note that this is a copy of [`Self::visit`]. + /// Note 1: that this is a copy of [`Self::visit`]. /// TODO: Once the sequential read path is removed this will become /// the only index traversal method. - pub fn get_stream_from<'a>( - &'a self, + /// + /// Note 2: this function used to take `&self` but it now consumes `self`. This is due to + /// the lifetime constraints of the reader and the stream / iterator it creates. Using `&self` + /// requires the reader to be present when the stream is used, and this creates a lifetime + /// dependency between the reader and the stream. Now if we want to create an iterator that + /// holds the stream, someone will need to keep a reference to the reader, which is inconvenient + /// to use from the image/delta layer APIs. + /// + /// Feel free to add the `&self` variant back if it's necessary. + pub fn into_stream<'a>( + self, start_key: &'a [u8; L], ctx: &'a RequestContext, - ) -> impl Stream, u64), DiskBtreeError>> + 'a { + ) -> impl Stream, u64), DiskBtreeError>> + 'a + where + R: 'a, + { try_stream! { let mut stack = Vec::new(); stack.push((self.root_blk, None)); diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 5e01ecd71de0..ab3ef4980fbe 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -941,7 +941,7 @@ impl DeltaLayerInner { ); let mut result = Vec::new(); let mut stream = - Box::pin(self.stream_index_forwards(&index_reader, &[0; DELTA_KEY_SIZE], ctx)); + Box::pin(self.stream_index_forwards(index_reader, &[0; DELTA_KEY_SIZE], ctx)); let block_reader = FileBlockReader::new(&self.file, self.file_id); let cursor = block_reader.block_cursor(); let mut buf = Vec::new(); @@ -976,7 +976,7 @@ impl DeltaLayerInner { ctx: &RequestContext, ) -> anyhow::Result> where - Reader: BlockReader, + Reader: BlockReader + Clone, { let ctx = RequestContextBuilder::extend(ctx) .page_content_kind(PageContentKind::DeltaLayerBtreeNode) @@ -986,7 +986,7 @@ impl DeltaLayerInner { let mut range_end_handled = false; let start_key = DeltaKey::from_key_lsn(&range.start, lsn_range.start); - let index_stream = index_reader.get_stream_from(&start_key.0, &ctx); + let index_stream = index_reader.clone().into_stream(&start_key.0, &ctx); let mut index_stream = std::pin::pin!(index_stream); while let Some(index_entry) = index_stream.next().await { @@ -1241,7 +1241,7 @@ impl DeltaLayerInner { block_reader, ); - let stream = self.stream_index_forwards(&tree_reader, &[0u8; DELTA_KEY_SIZE], ctx); + let stream = self.stream_index_forwards(tree_reader, &[0u8; DELTA_KEY_SIZE], ctx); let stream = stream.map_ok(|(key, lsn, pos)| Item::Actual(key, lsn, pos)); // put in a sentinel value for getting the end offset for last item, and not having to // repeat the whole read part @@ -1300,7 +1300,7 @@ impl DeltaLayerInner { offsets.start.pos(), offsets.end.pos(), meta, - max_read_size, + Some(max_read_size), )) } } else { @@ -1459,17 +1459,17 @@ impl DeltaLayerInner { fn stream_index_forwards<'a, R>( &'a self, - reader: &'a DiskBtreeReader, + reader: DiskBtreeReader, start: &'a [u8; DELTA_KEY_SIZE], ctx: &'a RequestContext, ) -> impl futures::stream::Stream< Item = Result<(Key, Lsn, BlobRef), crate::tenant::disk_btree::DiskBtreeError>, > + 'a where - R: BlockReader, + R: BlockReader + 'a, { use futures::stream::TryStreamExt; - let stream = reader.get_stream_from(start, ctx); + let stream = reader.into_stream(start, ctx); stream.map_ok(|(key, value)| { let key = DeltaKey::from_slice(&key); let (key, lsn) = (key.key(), key.lsn()); @@ -1857,7 +1857,7 @@ mod test { .finish(entries_meta.key_range.end, &timeline, &ctx) .await?; - let inner = resident.as_delta(&ctx).await?; + let inner = resident.get_as_delta(&ctx).await?; let file_size = inner.file.metadata().await?.len(); tracing::info!( @@ -2044,11 +2044,11 @@ mod test { let copied_layer = writer.finish(Key::MAX, &branch, ctx).await.unwrap(); - copied_layer.as_delta(ctx).await.unwrap(); + copied_layer.get_as_delta(ctx).await.unwrap(); assert_keys_and_values_eq( - new_layer.as_delta(ctx).await.unwrap(), - copied_layer.as_delta(ctx).await.unwrap(), + new_layer.get_as_delta(ctx).await.unwrap(), + copied_layer.get_as_delta(ctx).await.unwrap(), truncate_at, ctx, ) @@ -2073,7 +2073,7 @@ mod test { source.index_root_blk, &source_reader, ); - let source_stream = source.stream_index_forwards(&source_tree, &start_key, ctx); + let source_stream = source.stream_index_forwards(source_tree, &start_key, ctx); let source_stream = source_stream.filter(|res| match res { Ok((_, lsn, _)) => ready(lsn < &truncated_at), _ => ready(true), @@ -2086,7 +2086,7 @@ mod test { truncated.index_root_blk, &truncated_reader, ); - let truncated_stream = truncated.stream_index_forwards(&truncated_tree, &start_key, ctx); + let truncated_stream = truncated.stream_index_forwards(truncated_tree, &start_key, ctx); let mut truncated_stream = std::pin::pin!(truncated_stream); let mut scratch_left = Vec::new(); diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 06e2f0938437..99bce1890d47 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -495,7 +495,7 @@ impl ImageLayerInner { let tree_reader = DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, &block_reader); let mut result = Vec::new(); - let mut stream = Box::pin(tree_reader.get_stream_from(&[0; KEY_SIZE], ctx)); + let mut stream = Box::pin(tree_reader.into_stream(&[0; KEY_SIZE], ctx)); let block_reader = FileBlockReader::new(&self.file, self.file_id); let cursor = block_reader.block_cursor(); while let Some(item) = stream.next().await { @@ -544,7 +544,7 @@ impl ImageLayerInner { let mut search_key: [u8; KEY_SIZE] = [0u8; KEY_SIZE]; range.start.write_to_byte_slice(&mut search_key); - let index_stream = tree_reader.get_stream_from(&search_key, &ctx); + let index_stream = tree_reader.clone().into_stream(&search_key, &ctx); let mut index_stream = std::pin::pin!(index_stream); while let Some(index_entry) = index_stream.next().await { @@ -689,6 +689,24 @@ impl ImageLayerInner { }; } } + + #[cfg(test)] + pub(crate) fn iter<'a>(&'a self, ctx: &'a RequestContext) -> ImageLayerIterator<'a> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let tree_reader = + DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, block_reader); + ImageLayerIterator { + image_layer: self, + ctx, + index_iter: tree_reader.iter(&[0; KEY_SIZE], ctx), + key_values_batch: std::collections::VecDeque::new(), + is_end: false, + planner: crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner::new( + 1024 * 8192, // The default value. Unit tests might use a different value. 1024 * 8K = 8MB buffer. + 1024, // The default value. Unit tests might use a different value + ), + } + } } /// A builder object for constructing a new image layer. @@ -943,11 +961,77 @@ impl Drop for ImageLayerWriter { } } +#[cfg(test)] +pub struct ImageLayerIterator<'a> { + image_layer: &'a ImageLayerInner, + ctx: &'a RequestContext, + planner: crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner, + index_iter: crate::tenant::disk_btree::DiskBtreeIterator<'a>, + key_values_batch: std::collections::VecDeque<(Key, Lsn, Value)>, + is_end: bool, +} + +#[cfg(test)] +impl<'a> ImageLayerIterator<'a> { + /// Retrieve a batch of key-value pairs into the iterator buffer. + async fn next_batch(&mut self) -> anyhow::Result<()> { + assert!(self.key_values_batch.is_empty()); + assert!(!self.is_end); + + let plan = loop { + if let Some(res) = self.index_iter.next().await { + let (raw_key, offset) = res?; + if let Some(batch_plan) = self.planner.handle( + Key::from_slice(&raw_key[..KEY_SIZE]), + self.image_layer.lsn, + offset, + BlobFlag::None, + ) { + break batch_plan; + } + } else { + self.is_end = true; + let payload_end = self.image_layer.index_start_blk as u64 * PAGE_SZ as u64; + break self.planner.handle_range_end(payload_end); + } + }; + let vectored_blob_reader = VectoredBlobReader::new(&self.image_layer.file); + let mut next_batch = std::collections::VecDeque::new(); + let buf_size = plan.size(); + let buf = BytesMut::with_capacity(buf_size); + let blobs_buf = vectored_blob_reader + .read_blobs(&plan, buf, self.ctx) + .await?; + let frozen_buf: Bytes = blobs_buf.buf.freeze(); + for meta in blobs_buf.blobs.iter() { + let img_buf = frozen_buf.slice(meta.start..meta.end); + next_batch.push_back((meta.meta.key, self.image_layer.lsn, Value::Image(img_buf))); + } + self.key_values_batch = next_batch; + Ok(()) + } + + pub async fn next(&mut self) -> anyhow::Result> { + if self.key_values_batch.is_empty() { + if self.is_end { + return Ok(None); + } + self.next_batch().await?; + } + Ok(Some( + self.key_values_batch + .pop_front() + .expect("should not be empty"), + )) + } +} + #[cfg(test)] mod test { - use std::time::Duration; + use std::{sync::Arc, time::Duration}; use bytes::Bytes; + use itertools::Itertools; use pageserver_api::{ key::Key, shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}, @@ -959,11 +1043,19 @@ mod test { }; use crate::{ - tenant::{config::TenantConf, harness::TenantHarness}, + context::RequestContext, + repository::Value, + tenant::{ + config::TenantConf, + harness::{TenantHarness, TIMELINE_ID}, + storage_layer::ResidentLayer, + vectored_blob_io::StreamingVectoredReadPlanner, + Tenant, Timeline, + }, DEFAULT_PG_VERSION, }; - use super::ImageLayerWriter; + use super::{ImageLayerIterator, ImageLayerWriter}; #[tokio::test] async fn image_layer_rewrite() { @@ -1134,4 +1226,111 @@ mod test { } } } + + async fn produce_image_layer( + tenant: &Tenant, + tline: &Arc, + mut images: Vec<(Key, Bytes)>, + lsn: Lsn, + ctx: &RequestContext, + ) -> anyhow::Result { + images.sort(); + let (key_start, _) = images.first().unwrap(); + let (key_last, _) = images.last().unwrap(); + let key_end = key_last.next(); + let key_range = *key_start..key_end; + let mut writer = ImageLayerWriter::new( + tenant.conf, + tline.timeline_id, + tenant.tenant_shard_id, + &key_range, + lsn, + ctx, + ) + .await?; + + for (key, img) in images { + writer.put_image(key, img, ctx).await?; + } + let img_layer = writer.finish(tline, ctx).await?; + + Ok::<_, anyhow::Error>(img_layer) + } + + async fn assert_img_iter_equal( + img_iter: &mut ImageLayerIterator<'_>, + expect: &[(Key, Bytes)], + expect_lsn: Lsn, + ) { + let mut expect_iter = expect.iter(); + loop { + let o1 = img_iter.next().await.unwrap(); + let o2 = expect_iter.next(); + match (o1, o2) { + (None, None) => break, + (Some((k1, l1, v1)), Some((k2, i2))) => { + let Value::Image(i1) = v1 else { + panic!("expect Value::Image") + }; + assert_eq!(&k1, k2); + assert_eq!(l1, expect_lsn); + assert_eq!(&i1, i2); + } + (o1, o2) => panic!("iterators length mismatch: {:?}, {:?}", o1, o2), + } + } + } + + #[tokio::test] + async fn image_layer_iterator() { + let harness = TenantHarness::create("image_layer_iterator").unwrap(); + let (tenant, ctx) = harness.load().await; + + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + fn get_key(id: u32) -> Key { + let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + const N: usize = 1000; + let test_imgs = (0..N) + .map(|idx| (get_key(idx as u32), Bytes::from(format!("img{idx:05}")))) + .collect_vec(); + let resident_layer = + produce_image_layer(&tenant, &tline, test_imgs.clone(), Lsn(0x10), &ctx) + .await + .unwrap(); + let img_layer = resident_layer.get_as_image(&ctx).await.unwrap(); + for max_read_size in [1, 1024] { + for batch_size in [1, 2, 4, 8, 3, 7, 13] { + println!("running with batch_size={batch_size} max_read_size={max_read_size}"); + // Test if the batch size is correctly determined + let mut iter = img_layer.iter(&ctx); + iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size); + let mut num_items = 0; + for _ in 0..3 { + iter.next_batch().await.unwrap(); + num_items += iter.key_values_batch.len(); + if max_read_size == 1 { + // every key should be a batch b/c the value is larger than max_read_size + assert_eq!(iter.key_values_batch.len(), 1); + } else { + assert_eq!(iter.key_values_batch.len(), batch_size); + } + if num_items >= N { + break; + } + iter.key_values_batch.clear(); + } + // Test if the result is correct + let mut iter = img_layer.iter(&ctx); + iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size); + assert_img_iter_equal(&mut iter, &test_imgs, Lsn(0x10)).await; + } + } + } } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 32acb3f0cd0f..d856909f2eda 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -1905,7 +1905,7 @@ impl ResidentLayer { } #[cfg(test)] - pub(crate) async fn as_delta( + pub(crate) async fn get_as_delta( &self, ctx: &RequestContext, ) -> anyhow::Result<&delta_layer::DeltaLayerInner> { @@ -1915,6 +1915,18 @@ impl ResidentLayer { Image(_) => Err(anyhow::anyhow!("image layer")), } } + + #[cfg(test)] + pub(crate) async fn get_as_image( + &self, + ctx: &RequestContext, + ) -> anyhow::Result<&image_layer::ImageLayerInner> { + use LayerKind::*; + match self.downloaded.get(&self.owner.0, ctx).await? { + Image(ref d) => Ok(d), + Delta(_) => Err(anyhow::anyhow!("delta layer")), + } + } } impl AsLayerDesc for ResidentLayer { diff --git a/pageserver/src/tenant/vectored_blob_io.rs b/pageserver/src/tenant/vectored_blob_io.rs index 6e825760e382..1241a1390209 100644 --- a/pageserver/src/tenant/vectored_blob_io.rs +++ b/pageserver/src/tenant/vectored_blob_io.rs @@ -77,7 +77,7 @@ pub(crate) struct VectoredReadBuilder { start: u64, end: u64, blobs_at: VecMap, - max_read_size: usize, + max_read_size: Option, } impl VectoredReadBuilder { @@ -90,7 +90,7 @@ impl VectoredReadBuilder { start_offset: u64, end_offset: u64, meta: BlobMeta, - max_read_size: usize, + max_read_size: Option, ) -> Self { let mut blobs_at = VecMap::default(); blobs_at @@ -111,7 +111,13 @@ impl VectoredReadBuilder { pub(crate) fn extend(&mut self, start: u64, end: u64, meta: BlobMeta) -> VectoredReadExtended { tracing::trace!(start, end, "trying to extend"); let size = (end - start) as usize; - if self.end == start && self.size() + size <= self.max_read_size { + if self.end == start && { + if let Some(max_read_size) = self.max_read_size { + self.size() + size <= max_read_size + } else { + true + } + } { self.end = end; self.blobs_at .append(start, meta) @@ -157,7 +163,7 @@ pub struct VectoredReadPlanner { // Arguments for previous blob passed into [`VectoredReadPlanner::handle`] prev: Option<(Key, Lsn, u64, BlobFlag)>, - max_read_size: usize, + max_read_size: Option, } impl VectoredReadPlanner { @@ -165,7 +171,20 @@ impl VectoredReadPlanner { Self { blobs: BTreeMap::new(), prev: None, - max_read_size, + max_read_size: Some(max_read_size), + } + } + + /// This function should *only* be used if the caller has a way to control the limit. e.g., in [`StreamingVectoredReadPlanner`], + /// it uses the vectored read planner to avoid duplicated logic on handling blob start/end, while expecting the vectored + /// read planner to give a single read to a continuous range of bytes in the image layer. Therefore, it does not need the + /// code path to split reads into chunks of `max_read_size`, and controls the read size itself. + #[cfg(test)] + pub(crate) fn new_caller_controlled_max_limit() -> Self { + Self { + blobs: BTreeMap::new(), + prev: None, + max_read_size: None, } } @@ -354,6 +373,87 @@ impl<'a> VectoredBlobReader<'a> { } } +/// Read planner used in [`crate::tenant::storage_layer::image_layer::ImageLayerIterator`]. It provides a streaming API for +/// getting read blobs. It returns a batch when `handle` gets called and when the current key would exceed the read_size and +/// max_cnt constraints. Underlying it uses [`VectoredReadPlanner`]. +#[cfg(test)] +pub struct StreamingVectoredReadPlanner { + planner: VectoredReadPlanner, + /// Max read size per batch + max_read_size: u64, + /// Max item count per batch + max_cnt: usize, + /// The first offset of this batch + this_batch_first_offset: Option, + /// Size of the current batch + cnt: usize, +} + +#[cfg(test)] +impl StreamingVectoredReadPlanner { + pub fn new(max_read_size: u64, max_cnt: usize) -> Self { + assert!(max_cnt > 0); + assert!(max_read_size > 0); + Self { + // We want to have exactly one read syscall (plus several others for index lookup) for each `next_batch` call. + // Therefore, we enforce `self.max_read_size` by ourselves instead of using the VectoredReadPlanner's capability, + // to avoid splitting into two I/Os. + planner: VectoredReadPlanner::new_caller_controlled_max_limit(), + max_cnt, + max_read_size, + this_batch_first_offset: None, + cnt: 0, + } + } + + fn emit(&mut self, this_batch_first_offset: u64) -> VectoredRead { + let planner = std::mem::replace( + &mut self.planner, + VectoredReadPlanner::new_caller_controlled_max_limit(), + ); + self.this_batch_first_offset = Some(this_batch_first_offset); + self.cnt = 1; + let mut batch = planner.finish(); + assert_eq!(batch.len(), 1, "should have exactly one read batch"); + batch.pop().unwrap() + } + + pub fn handle( + &mut self, + key: Key, + lsn: Lsn, + offset: u64, + flag: BlobFlag, + ) -> Option { + if let Some(begin_offset) = self.this_batch_first_offset { + // Each batch will have at least one item b/c `self.this_batch_first_offset` is set + // after one item gets processed + if offset - begin_offset > self.max_read_size { + self.planner.handle_range_end(offset); // End the current batch with the offset + let batch = self.emit(offset); // Produce a batch + self.planner.handle(key, lsn, offset, flag); // Add this key to the next batch + return Some(batch); + } + } else { + self.this_batch_first_offset = Some(offset) + } + if self.cnt >= self.max_cnt { + self.planner.handle_range_end(offset); // End the current batch with the offset + let batch = self.emit(offset); // Produce a batch + self.planner.handle(key, lsn, offset, flag); // Add this key to the next batch + return Some(batch); + } + self.planner.handle(key, lsn, offset, flag); // Add this key to the current batch + self.cnt += 1; + None + } + + pub fn handle_range_end(&mut self, offset: u64) -> VectoredRead { + self.planner.handle_range_end(offset); + self.emit(offset) + } +} + #[cfg(test)] mod tests { use super::*; From 9b98823d615c991422b6edd3ec3197192f763cf2 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 25 Jun 2024 19:00:14 -0400 Subject: [PATCH 22/57] bottom-most-compaction: use in test_gc_feedback + fix bugs (#8103) Adds manual compaction trigger; add gc compaction to test_gc_feedback Part of https://github.com/neondatabase/neon/issues/8002 ``` test_gc_feedback[debug-pg15].logical_size: 50 Mb test_gc_feedback[debug-pg15].physical_size: 2269 Mb test_gc_feedback[debug-pg15].physical/logical ratio: 44.5302 test_gc_feedback[debug-pg15].max_total_num_of_deltas: 7 test_gc_feedback[debug-pg15].max_num_of_deltas_above_image: 2 test_gc_feedback[debug-pg15].logical_size_after_bottom_most_compaction: 50 Mb test_gc_feedback[debug-pg15].physical_size_after_bottom_most_compaction: 287 Mb test_gc_feedback[debug-pg15].physical/logical ratio after bottom_most_compaction: 5.6312 test_gc_feedback[debug-pg15].max_total_num_of_deltas_after_bottom_most_compaction: 4 test_gc_feedback[debug-pg15].max_num_of_deltas_above_image_after_bottom_most_compaction: 1 ``` ## Summary of changes * Add the manual compaction trigger * Use in test_gc_feedback * Add a guard to avoid running it with retain_lsns * Fix: Do `schedule_compaction_update` after compaction * Fix: Supply deltas in the correct order to reconstruct value --------- Signed-off-by: Alex Chi Z --- pageserver/src/http/routes.rs | 8 ++++ .../src/tenant/storage_layer/delta_layer.rs | 1 - .../src/tenant/storage_layer/image_layer.rs | 1 - pageserver/src/tenant/storage_layer/layer.rs | 2 - pageserver/src/tenant/timeline.rs | 2 +- pageserver/src/tenant/timeline/compaction.rs | 41 ++++++++++++---- .../src/tenant/timeline/layer_manager.rs | 1 - test_runner/fixtures/pageserver/http.py | 3 ++ test_runner/performance/test_gc_feedback.py | 48 ++++++++++++++++++- 9 files changed, 92 insertions(+), 15 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index d6ba9ee35e17..41d096d7bbb6 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1652,6 +1652,14 @@ async fn timeline_compact_handler( if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? { flags |= CompactFlags::ForceImageLayerCreation; } + if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? { + if !cfg!(feature = "testing") { + return Err(ApiError::InternalServerError(anyhow!( + "enhanced_gc_bottom_most_compaction is only available in testing mode" + ))); + } + flags |= CompactFlags::EnhancedGcBottomMostCompaction; + } let wait_until_uploaded = parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false); diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index ab3ef4980fbe..bf5d9249ebb5 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -928,7 +928,6 @@ impl DeltaLayerInner { } /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. - #[cfg(test)] pub(super) async fn load_key_values( &self, ctx: &RequestContext, diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 99bce1890d47..50aacbd9ad46 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -486,7 +486,6 @@ impl ImageLayerInner { } /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. - #[cfg(test)] pub(super) async fn load_key_values( &self, ctx: &RequestContext, diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index d856909f2eda..7eb42d81869b 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -389,7 +389,6 @@ impl Layer { } /// Get all key/values in the layer. Should be replaced with an iterator-based API in the future. - #[cfg(test)] pub(crate) async fn load_key_values( &self, ctx: &RequestContext, @@ -1774,7 +1773,6 @@ impl DownloadedLayer { } } - #[cfg(test)] async fn load_key_values( &self, owner: &Arc, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 5398ad399c84..1175b750179d 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -686,6 +686,7 @@ pub enum GetLogicalSizePriority { pub(crate) enum CompactFlags { ForceRepartition, ForceImageLayerCreation, + EnhancedGcBottomMostCompaction, } impl std::fmt::Debug for Timeline { @@ -1096,7 +1097,6 @@ impl Timeline { /// scan iterator interface. We could optimize this interface later to avoid some checks in the vectored /// get path to maintain and split the probing and to-be-probe keyspace. We also need to ensure that /// the scan operation will not cause OOM in the future. - #[allow(dead_code)] pub(crate) async fn scan( &self, keyspace: KeySpace, diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index de1263fadf96..efaa6144af95 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -47,10 +47,14 @@ impl Timeline { /// TODO: cancellation pub(crate) async fn compact_legacy( self: &Arc, - _cancel: &CancellationToken, + cancel: &CancellationToken, flags: EnumSet, ctx: &RequestContext, ) -> Result<(), CompactionError> { + if flags.contains(CompactFlags::EnhancedGcBottomMostCompaction) { + return self.compact_with_gc(cancel, ctx).await; + } + // High level strategy for compaction / image creation: // // 1. First, calculate the desired "partitioning" of the @@ -959,15 +963,20 @@ impl Timeline { /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon, /// and create delta layers with all deltas >= gc horizon. - #[cfg(test)] pub(crate) async fn compact_with_gc( self: &Arc, _cancel: &CancellationToken, ctx: &RequestContext, ) -> Result<(), CompactionError> { + use crate::tenant::storage_layer::ValueReconstructState; use std::collections::BTreeSet; - use crate::tenant::storage_layer::ValueReconstructState; + info!("running enhanced gc bottom-most compaction"); + + scopeguard::defer! { + info!("done enhanced gc bottom-most compaction"); + }; + // Step 0: pick all delta layers + image layers below/intersect with the GC horizon. // The layer selection has the following properties: // 1. If a layer is in the selection, all layers below it are in the selection. @@ -976,6 +985,11 @@ impl Timeline { let guard = self.layers.read().await; let layers = guard.layer_map(); let gc_info = self.gc_info.read().unwrap(); + if !gc_info.retain_lsns.is_empty() || !gc_info.leases.is_empty() { + return Err(CompactionError::Other(anyhow!( + "enhanced legacy compaction currently does not support retain_lsns (branches)" + ))); + } let gc_cutoff = Lsn::min(gc_info.cutoffs.horizon, gc_info.cutoffs.pitr); let mut selected_layers = Vec::new(); // TODO: consider retain_lsns @@ -987,6 +1001,11 @@ impl Timeline { } (selected_layers, gc_cutoff) }; + info!( + "picked {} layers for compaction with gc_cutoff={}", + layer_selection.len(), + gc_cutoff + ); // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs. // Also, collect the layer information to decide when to split the new delta layers. let mut all_key_values = Vec::new(); @@ -1064,10 +1083,8 @@ impl Timeline { } else if *lsn <= horizon { match val { crate::repository::Value::Image(image) => { - if lsn <= &horizon { - base_image = Some((*lsn, image.clone())); - break; - } + base_image = Some((*lsn, image.clone())); + break; } crate::repository::Value::WalRecord(wal) => { delta_above_base_image.push((*lsn, wal.clone())); @@ -1075,7 +1092,7 @@ impl Timeline { } } } - delta_above_base_image.reverse(); + // do not reverse delta_above_base_image, reconstruct state expects reversely-ordered records keys_above_horizon.reverse(); let state = ValueReconstructState { img: base_image, @@ -1200,6 +1217,11 @@ impl Timeline { ); let image_layer = image_layer_writer.finish(self, ctx).await?; + info!( + "produced {} delta layers and {} image layers", + delta_layers.len(), + 1 + ); let mut compact_to = Vec::new(); compact_to.extend(delta_layers); compact_to.push(image_layer); @@ -1208,6 +1230,9 @@ impl Timeline { let mut guard = self.layers.write().await; guard.finish_gc_compaction(&layer_selection, &compact_to, &self.metrics) }; + + self.remote_client + .schedule_compaction_update(&layer_selection, &compact_to)?; Ok(()) } } diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 550a9a567a43..948237e06a5e 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -227,7 +227,6 @@ impl LayerManager { } /// Called when a GC-compaction is completed. - #[cfg(test)] pub(crate) fn finish_gc_compaction( &mut self, compact_from: &[Layer], diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 2a7cbea20010..794961271418 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -573,6 +573,7 @@ def timeline_compact( force_repartition=False, force_image_layer_creation=False, wait_until_uploaded=False, + enhanced_gc_bottom_most_compaction=False, ): self.is_testing_enabled_or_skip() query = {} @@ -582,6 +583,8 @@ def timeline_compact( query["force_image_layer_creation"] = "true" if wait_until_uploaded: query["wait_until_uploaded"] = "true" + if enhanced_gc_bottom_most_compaction: + query["enhanced_gc_bottom_most_compaction"] = "true" log.info(f"Requesting compact: tenant {tenant_id}, timeline {timeline_id}") res = self.put( diff --git a/test_runner/performance/test_gc_feedback.py b/test_runner/performance/test_gc_feedback.py index 9a03994b2925..4c326111c247 100644 --- a/test_runner/performance/test_gc_feedback.py +++ b/test_runner/performance/test_gc_feedback.py @@ -33,7 +33,7 @@ def test_gc_feedback(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma "checkpoint_distance": f"{1024 ** 2}", "compaction_target_size": f"{1024 ** 2}", # set PITR interval to be small, so we can do GC - "pitr_interval": "10 s", + "pitr_interval": "60 s", # "compaction_threshold": "3", # "image_creation_threshold": "2", } @@ -99,6 +99,52 @@ def test_gc_feedback(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma MetricReport.LOWER_IS_BETTER, ) + client.timeline_compact(tenant_id, timeline_id, enhanced_gc_bottom_most_compaction=True) + tline_detail = client.timeline_detail(tenant_id, timeline_id) + logical_size = tline_detail["current_logical_size"] + physical_size = tline_detail["current_physical_size"] + + max_num_of_deltas_above_image = 0 + max_total_num_of_deltas = 0 + for key_range in client.perf_info(tenant_id, timeline_id): + max_total_num_of_deltas = max(max_total_num_of_deltas, key_range["total_num_of_deltas"]) + max_num_of_deltas_above_image = max( + max_num_of_deltas_above_image, key_range["num_of_deltas_above_image"] + ) + zenbenchmark.record( + "logical_size_after_bottom_most_compaction", + logical_size // MB, + "Mb", + MetricReport.LOWER_IS_BETTER, + ) + zenbenchmark.record( + "physical_size_after_bottom_most_compaction", + physical_size // MB, + "Mb", + MetricReport.LOWER_IS_BETTER, + ) + zenbenchmark.record( + "physical/logical ratio after bottom_most_compaction", + physical_size / logical_size, + "", + MetricReport.LOWER_IS_BETTER, + ) + zenbenchmark.record( + "max_total_num_of_deltas_after_bottom_most_compaction", + max_total_num_of_deltas, + "", + MetricReport.LOWER_IS_BETTER, + ) + zenbenchmark.record( + "max_num_of_deltas_above_image_after_bottom_most_compaction", + max_num_of_deltas_above_image, + "", + MetricReport.LOWER_IS_BETTER, + ) + + with endpoint.cursor() as cur: + cur.execute("SELECT * FROM t") # ensure data is not corrupted + layer_map_path = env.repo_dir / "layer-map.json" log.info(f"Writing layer map to {layer_map_path}") with layer_map_path.open("w") as f: From 9b623d3a2cc8048de2b5b8475bb51a747037aa4b Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Wed, 26 Jun 2024 07:46:52 +0200 Subject: [PATCH 23/57] add commit hash to S3 object identifier for artifacts on S3 (#8161) In future we may want to run periodic tests on dedicated cloud instances that are not GitHub action runners. To allow these to download artifact binaries for a specific commit hash we want to make the search by commit hash possible and prefix the S3 objects with `artifacts/${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}` --------- Co-authored-by: Alexander Bayandin --- .github/actions/download/action.yml | 2 +- .github/actions/upload/action.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/download/action.yml b/.github/actions/download/action.yml index ce26e7825b81..01c216b1ac23 100644 --- a/.github/actions/download/action.yml +++ b/.github/actions/download/action.yml @@ -26,7 +26,7 @@ runs: TARGET: ${{ inputs.path }} ARCHIVE: /tmp/downloads/${{ inputs.name }}.tar.zst SKIP_IF_DOES_NOT_EXIST: ${{ inputs.skip-if-does-not-exist }} - PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}', github.run_id, github.run_attempt) }} + PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}/{2}', github.event.pull_request.head.sha || github.sha, github.run_id, github.run_attempt) }} run: | BUCKET=neon-github-public-dev FILENAME=$(basename $ARCHIVE) diff --git a/.github/actions/upload/action.yml b/.github/actions/upload/action.yml index 63973dfbe7dd..edcece7d2be0 100644 --- a/.github/actions/upload/action.yml +++ b/.github/actions/upload/action.yml @@ -8,7 +8,7 @@ inputs: description: "A directory or file to upload" required: true prefix: - description: "S3 prefix. Default is '${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'" + description: "S3 prefix. Default is '${GITHUB_SHA}/${GITHUB_RUN_ID}/${GITHUB_RUN_ATTEMPT}'" required: false runs: @@ -45,7 +45,7 @@ runs: env: SOURCE: ${{ inputs.path }} ARCHIVE: /tmp/uploads/${{ inputs.name }}.tar.zst - PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}', github.run_id, github.run_attempt) }} + PREFIX: artifacts/${{ inputs.prefix || format('{0}/{1}/{2}', github.event.pull_request.head.sha || github.sha, github.run_id , github.run_attempt) }} run: | BUCKET=neon-github-public-dev FILENAME=$(basename $ARCHIVE) From fdadd6a15216e97dc5ee55c74be92030087c06e1 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 26 Jun 2024 15:13:03 +0300 Subject: [PATCH 24/57] Remove primary_is_running (#8162) This was a half-finished mechanism to allow a replica to enter hot standby mode sooner, without waiting for a running-xacts record. It had issues, and we are working on a better mechanism to replace it. The control plane might still set the flag in the spec file, but compute_ctl will simply ignore it. --- compute_tools/src/config.rs | 6 ------ control_plane/src/endpoint.rs | 1 - libs/compute_api/src/spec.rs | 6 ------ pgxn/neon/neon.c | 10 ---------- test_runner/fixtures/neon_fixtures.py | 1 - 5 files changed, 24 deletions(-) diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 2c4aec4116ae..479100eb895f 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -83,12 +83,6 @@ pub fn write_postgres_conf( ComputeMode::Replica => { // hot_standby is 'on' by default, but let's be explicit writeln!(file, "hot_standby=on")?; - - // Inform the replica about the primary state - // Default is 'false' - if let Some(primary_is_running) = spec.primary_is_running { - writeln!(file, "neon.primary_is_running={}", primary_is_running)?; - } } } diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 20371e1cb8e1..b928bbfc308e 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -592,7 +592,6 @@ impl Endpoint { remote_extensions, pgbouncer_settings: None, shard_stripe_size: Some(shard_stripe_size), - primary_is_running: None, }; let spec_path = self.endpoint_path().join("spec.json"); std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?; diff --git a/libs/compute_api/src/spec.rs b/libs/compute_api/src/spec.rs index 1c4ee2089fed..883c624f71cd 100644 --- a/libs/compute_api/src/spec.rs +++ b/libs/compute_api/src/spec.rs @@ -96,12 +96,6 @@ pub struct ComputeSpec { // Stripe size for pageserver sharding, in pages #[serde(default)] pub shard_stripe_size: Option, - - // When we are starting a new replica in hot standby mode, - // we need to know if the primary is running. - // This is used to determine if replica should wait for - // RUNNING_XACTS from primary or not. - pub primary_is_running: Option, } /// Feature flag to signal `compute_ctl` to enable certain experimental functionality. diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index 276d1542fe3b..b6b2db7e71ad 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -41,7 +41,6 @@ PG_MODULE_MAGIC; void _PG_init(void); static int logical_replication_max_snap_files = 300; -bool primary_is_running = false; static void InitLogicalReplicationMonitor(void) @@ -289,15 +288,6 @@ _PG_init(void) pg_init_extension_server(); - DefineCustomBoolVariable( - "neon.primary_is_running", - "true if the primary was running at replica startup. false otherwise", - NULL, - &primary_is_running, - false, - PGC_POSTMASTER, - 0, - NULL, NULL, NULL); /* * Important: This must happen after other parts of the extension are * loaded, otherwise any settings to GUCs that were set before the diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 84fb1f7cb47d..d8da2a3a3e75 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3539,7 +3539,6 @@ def create( # and make tests more stable. config_lines = ["max_replication_write_lag=15MB"] + config_lines - config_lines = ["neon.primary_is_running=on"] + config_lines self.config(config_lines) return self From 5d2f9ffa89bf98290344aed0a22fcede04664831 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 26 Jun 2024 09:34:41 -0400 Subject: [PATCH 25/57] test(bottom-most-compaction): wal apply order (#8163) A follow-up on https://github.com/neondatabase/neon/pull/8103/. Previously, main branch fails with: ``` assertion `left == right` failed left: b"value 3@0x10@0x30@0x28@0x40" right: b"value 3@0x10@0x28@0x30@0x40" ``` This gets fixed after #8103 gets merged. Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 4e03e09a9b8d..30e855eaa2dc 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -7069,6 +7069,16 @@ mod tests { Lsn(0x30), Value::WalRecord(NeonWalRecord::wal_append("@0x30")), ), + ( + get_key(3), + Lsn(0x28), + Value::WalRecord(NeonWalRecord::wal_append("@0x28")), + ), + ( + get_key(3), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append("@0x30")), + ), ( get_key(3), Lsn(0x40), @@ -7128,7 +7138,7 @@ mod tests { Bytes::from_static(b"value 0@0x10"), Bytes::from_static(b"value 1@0x10@0x20"), Bytes::from_static(b"value 2@0x10@0x30"), - Bytes::from_static(b"value 3@0x10@0x40"), + Bytes::from_static(b"value 3@0x10@0x28@0x30@0x40"), Bytes::from_static(b"value 4@0x10"), Bytes::from_static(b"value 5@0x10@0x20"), Bytes::from_static(b"value 6@0x10@0x20"), @@ -7141,7 +7151,7 @@ mod tests { Bytes::from_static(b"value 0@0x10"), Bytes::from_static(b"value 1@0x10@0x20"), Bytes::from_static(b"value 2@0x10@0x30"), - Bytes::from_static(b"value 3@0x10"), + Bytes::from_static(b"value 3@0x10@0x28@0x30"), Bytes::from_static(b"value 4@0x10"), Bytes::from_static(b"value 5@0x10@0x20"), Bytes::from_static(b"value 6@0x10@0x20"), From 47e5bf3bbbb97b3f95d545a03bc0c20c782eb806 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Wed, 26 Jun 2024 15:26:52 +0100 Subject: [PATCH 26/57] Improve term reject message in walproposer (#8164) Co-authored-by: Tristan Partin --- pgxn/neon/walproposer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pgxn/neon/walproposer.c b/pgxn/neon/walproposer.c index dbc67a24f5ca..c53257923a3f 100644 --- a/pgxn/neon/walproposer.c +++ b/pgxn/neon/walproposer.c @@ -1447,7 +1447,7 @@ RecvAppendResponses(Safekeeper *sk) * core as this is kinda expected scenario. */ disable_core_dump(); - wp_log(PANIC, "WAL acceptor %s:%s with term " INT64_FORMAT " rejected our request, our term " INT64_FORMAT "", + wp_log(PANIC, "WAL acceptor %s:%s with term " INT64_FORMAT " rejected our request, our term " INT64_FORMAT ", meaning another compute is running at the same time, and it conflicts with us", sk->host, sk->port, sk->appendResponse.term, wp->propTerm); } From d7e349d33c019554402ff2b4e519a668638ec88f Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 26 Jun 2024 16:11:26 +0100 Subject: [PATCH 27/57] proxy: report blame for passthrough disconnect io errors (#8170) ## Problem Hard to debug the disconnection reason currently. ## Summary of changes Keep track of error-direction, and therefore error source (client vs compute) during passthrough. --- proxy/src/bin/pg_sni_router.rs | 9 ++-- proxy/src/proxy.rs | 8 +++- proxy/src/proxy/copy_bidirectional.rs | 66 ++++++++++++++++++++------- proxy/src/proxy/passthrough.rs | 10 ++-- proxy/src/serverless/websocket.rs | 8 +++- 5 files changed, 75 insertions(+), 26 deletions(-) diff --git a/proxy/src/bin/pg_sni_router.rs b/proxy/src/bin/pg_sni_router.rs index e1674049a60a..44e880838e07 100644 --- a/proxy/src/bin/pg_sni_router.rs +++ b/proxy/src/bin/pg_sni_router.rs @@ -10,7 +10,7 @@ use itertools::Itertools; use proxy::config::TlsServerEndPoint; use proxy::context::RequestMonitoring; use proxy::metrics::{Metrics, ThreadPoolMetrics}; -use proxy::proxy::{copy_bidirectional_client_compute, run_until_cancelled}; +use proxy::proxy::{copy_bidirectional_client_compute, run_until_cancelled, ErrorSource}; use rustls::pki_types::PrivateKeyDer; use tokio::net::TcpListener; @@ -286,7 +286,10 @@ async fn handle_client( // Starting from here we only proxy the client's traffic. info!("performing the proxy pass..."); - let _ = copy_bidirectional_client_compute(&mut tls_stream, &mut client).await?; - Ok(()) + match copy_bidirectional_client_compute(&mut tls_stream, &mut client).await { + Ok(_) => Ok(()), + Err(ErrorSource::Client(err)) => Err(err).context("client"), + Err(ErrorSource::Compute(err)) => Err(err).context("compute"), + } } diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 072f51958f48..3edefcf21a49 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -8,6 +8,7 @@ pub mod passthrough; pub mod retry; pub mod wake_compute; pub use copy_bidirectional::copy_bidirectional_client_compute; +pub use copy_bidirectional::ErrorSource; use crate::{ auth, @@ -148,8 +149,11 @@ pub async fn task_main( ctx.log_connect(); match p.proxy_pass().instrument(span.clone()).await { Ok(()) => {} - Err(e) => { - error!(parent: &span, "per-client task finished with an error: {e:#}"); + Err(ErrorSource::Client(e)) => { + error!(parent: &span, "per-client task finished with an IO error from the client: {e:#}"); + } + Err(ErrorSource::Compute(e)) => { + error!(parent: &span, "per-client task finished with an IO error from the compute: {e:#}"); } } } diff --git a/proxy/src/proxy/copy_bidirectional.rs b/proxy/src/proxy/copy_bidirectional.rs index aaf3688f21a0..3c45fff969f8 100644 --- a/proxy/src/proxy/copy_bidirectional.rs +++ b/proxy/src/proxy/copy_bidirectional.rs @@ -13,12 +13,39 @@ enum TransferState { Done(u64), } +#[derive(Debug)] +pub enum ErrorDirection { + Read(io::Error), + Write(io::Error), +} + +impl ErrorSource { + fn from_client(err: ErrorDirection) -> ErrorSource { + match err { + ErrorDirection::Read(client) => Self::Client(client), + ErrorDirection::Write(compute) => Self::Compute(compute), + } + } + fn from_compute(err: ErrorDirection) -> ErrorSource { + match err { + ErrorDirection::Write(client) => Self::Client(client), + ErrorDirection::Read(compute) => Self::Compute(compute), + } + } +} + +#[derive(Debug)] +pub enum ErrorSource { + Client(io::Error), + Compute(io::Error), +} + fn transfer_one_direction( cx: &mut Context<'_>, state: &mut TransferState, r: &mut A, w: &mut B, -) -> Poll> +) -> Poll> where A: AsyncRead + AsyncWrite + Unpin + ?Sized, B: AsyncRead + AsyncWrite + Unpin + ?Sized, @@ -32,7 +59,7 @@ where *state = TransferState::ShuttingDown(count); } TransferState::ShuttingDown(count) => { - ready!(w.as_mut().poll_shutdown(cx))?; + ready!(w.as_mut().poll_shutdown(cx)).map_err(ErrorDirection::Write)?; *state = TransferState::Done(*count); } TransferState::Done(count) => return Poll::Ready(Ok(*count)), @@ -44,7 +71,7 @@ where pub async fn copy_bidirectional_client_compute( client: &mut Client, compute: &mut Compute, -) -> Result<(u64, u64), std::io::Error> +) -> Result<(u64, u64), ErrorSource> where Client: AsyncRead + AsyncWrite + Unpin + ?Sized, Compute: AsyncRead + AsyncWrite + Unpin + ?Sized, @@ -54,9 +81,11 @@ where poll_fn(|cx| { let mut client_to_compute_result = - transfer_one_direction(cx, &mut client_to_compute, client, compute)?; + transfer_one_direction(cx, &mut client_to_compute, client, compute) + .map_err(ErrorSource::from_client)?; let mut compute_to_client_result = - transfer_one_direction(cx, &mut compute_to_client, compute, client)?; + transfer_one_direction(cx, &mut compute_to_client, compute, client) + .map_err(ErrorSource::from_compute)?; // Early termination checks from compute to client. if let TransferState::Done(_) = compute_to_client { @@ -65,18 +94,20 @@ where // Initiate shutdown client_to_compute = TransferState::ShuttingDown(buf.amt); client_to_compute_result = - transfer_one_direction(cx, &mut client_to_compute, client, compute)?; + transfer_one_direction(cx, &mut client_to_compute, client, compute) + .map_err(ErrorSource::from_client)?; } } - // Early termination checks from compute to client. + // Early termination checks from client to compute. if let TransferState::Done(_) = client_to_compute { if let TransferState::Running(buf) = &compute_to_client { info!("Client is done, terminate compute"); // Initiate shutdown compute_to_client = TransferState::ShuttingDown(buf.amt); compute_to_client_result = - transfer_one_direction(cx, &mut compute_to_client, client, compute)?; + transfer_one_direction(cx, &mut compute_to_client, compute, client) + .map_err(ErrorSource::from_compute)?; } } @@ -138,7 +169,7 @@ impl CopyBuffer { cx: &mut Context<'_>, mut reader: Pin<&mut R>, mut writer: Pin<&mut W>, - ) -> Poll> + ) -> Poll> where R: AsyncRead + ?Sized, W: AsyncWrite + ?Sized, @@ -149,11 +180,11 @@ impl CopyBuffer { // Top up the buffer towards full if we can read a bit more // data - this should improve the chances of a large write if !me.read_done && me.cap < me.buf.len() { - ready!(me.poll_fill_buf(cx, reader.as_mut()))?; + ready!(me.poll_fill_buf(cx, reader.as_mut())).map_err(ErrorDirection::Read)?; } Poll::Pending } - res => res, + res => res.map_err(ErrorDirection::Write), } } @@ -162,7 +193,7 @@ impl CopyBuffer { cx: &mut Context<'_>, mut reader: Pin<&mut R>, mut writer: Pin<&mut W>, - ) -> Poll> + ) -> Poll> where R: AsyncRead + ?Sized, W: AsyncWrite + ?Sized, @@ -176,12 +207,13 @@ impl CopyBuffer { match self.poll_fill_buf(cx, reader.as_mut()) { Poll::Ready(Ok(())) => (), - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Ready(Err(err)) => return Poll::Ready(Err(ErrorDirection::Read(err))), Poll::Pending => { // Try flushing when the reader has no progress to avoid deadlock // when the reader depends on buffered writer. if self.need_flush { - ready!(writer.as_mut().poll_flush(cx))?; + ready!(writer.as_mut().poll_flush(cx)) + .map_err(ErrorDirection::Write)?; self.need_flush = false; } @@ -194,10 +226,10 @@ impl CopyBuffer { while self.pos < self.cap { let i = ready!(self.poll_write_buf(cx, reader.as_mut(), writer.as_mut()))?; if i == 0 { - return Poll::Ready(Err(io::Error::new( + return Poll::Ready(Err(ErrorDirection::Write(io::Error::new( io::ErrorKind::WriteZero, "write zero byte into writer", - ))); + )))); } else { self.pos += i; self.amt += i as u64; @@ -216,7 +248,7 @@ impl CopyBuffer { // If we've written all the data and we've seen EOF, flush out the // data and finish the transfer. if self.pos == self.cap && self.read_done { - ready!(writer.as_mut().poll_flush(cx))?; + ready!(writer.as_mut().poll_flush(cx)).map_err(ErrorDirection::Write)?; return Poll::Ready(Ok(self.amt)); } } diff --git a/proxy/src/proxy/passthrough.rs b/proxy/src/proxy/passthrough.rs index 62de79946f3c..9942fac383b7 100644 --- a/proxy/src/proxy/passthrough.rs +++ b/proxy/src/proxy/passthrough.rs @@ -10,13 +10,15 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tracing::info; use utils::measured_stream::MeasuredStream; +use super::copy_bidirectional::ErrorSource; + /// Forward bytes in both directions (client <-> compute). #[tracing::instrument(skip_all)] pub async fn proxy_pass( client: impl AsyncRead + AsyncWrite + Unpin, compute: impl AsyncRead + AsyncWrite + Unpin, aux: MetricsAuxInfo, -) -> anyhow::Result<()> { +) -> Result<(), ErrorSource> { let usage = USAGE_METRICS.register(Ids { endpoint_id: aux.endpoint_id, branch_id: aux.branch_id, @@ -66,9 +68,11 @@ pub struct ProxyPassthrough { } impl ProxyPassthrough { - pub async fn proxy_pass(self) -> anyhow::Result<()> { + pub async fn proxy_pass(self) -> Result<(), ErrorSource> { let res = proxy_pass(self.client, self.compute.stream, self.aux).await; - self.compute.cancel_closure.try_cancel_query().await?; + if let Err(err) = self.compute.cancel_closure.try_cancel_query().await { + tracing::error!(?err, "could not cancel the query in the database"); + } res } } diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 0e9772733da4..0d5b88f07b0c 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -1,3 +1,4 @@ +use crate::proxy::ErrorSource; use crate::{ cancellation::CancellationHandlerMain, config::ProxyConfig, @@ -7,6 +8,7 @@ use crate::{ proxy::{handle_client, ClientMode}, rate_limiter::EndpointRateLimiter, }; +use anyhow::Context as _; use bytes::{Buf, BufMut, Bytes, BytesMut}; use framed_websockets::{Frame, OpCode, WebSocketServer}; use futures::{Sink, Stream}; @@ -165,7 +167,11 @@ pub async fn serve_websocket( Ok(Some(p)) => { ctx.set_success(); ctx.log_connect(); - p.proxy_pass().await + match p.proxy_pass().await { + Ok(()) => Ok(()), + Err(ErrorSource::Client(err)) => Err(err).context("client"), + Err(ErrorSource::Compute(err)) => Err(err).context("compute"), + } } } } From 5af9660b9e4ad804433335662a3c1bf79cfeb637 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Wed, 26 Jun 2024 16:37:04 +0100 Subject: [PATCH 28/57] CI(build-tools): don't install Postgres 14 (#6540) ## Problem We install Postgres 14 in `build-tools` image, but we don't need it. We use Postgres binaries, which we build ourselves. ## Summary of changes - Remove Postgresql 14 installation from `build-tools` image --- Dockerfile.build-tools | 7 ------- test_runner/fixtures/neon_fixtures.py | 11 +++++++++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 5dd2c13c0e2b..a1483e550ec4 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -73,13 +73,6 @@ RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \ && bash -c 'for f in /usr/bin/clang*-${LLVM_VERSION} /usr/bin/llvm*-${LLVM_VERSION}; do ln -s "${f}" "${f%-${LLVM_VERSION}}"; done' \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# PostgreSQL 14 -RUN curl -fsSL 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | apt-key add - \ - && echo 'deb http://apt.postgresql.org/pub/repos/apt bullseye-pgdg main' > /etc/apt/sources.list.d/pgdg.list \ - && apt update \ - && apt install -y postgresql-client-14 \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - # AWS CLI RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" \ && unzip -q awscliv2.zip \ diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index d8da2a3a3e75..745363721894 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3075,9 +3075,16 @@ def __init__( host: str = "127.0.0.1", port: int = 5432, ): - assert shutil.which(path) + search_path = None + if (d := os.getenv("POSTGRES_DISTRIB_DIR")) is not None and ( + v := os.getenv("DEFAULT_PG_VERSION") + ) is not None: + search_path = Path(d) / f"v{v}" / "bin" - self.path = path + full_path = shutil.which(path, path=search_path) + assert full_path is not None + + self.path = full_path self.database_url = f"postgres://{host}:{port}/main?options=project%3Dgeneric-project-name" async def run(self, query: Optional[str] = None) -> asyncio.subprocess.Process: From 3118c245213af0cdcd890cd559567366d7a85b0e Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Wed, 26 Jun 2024 16:46:14 +0100 Subject: [PATCH 29/57] Panic on unexpected error in simtests (#8169) --- safekeeper/tests/random_test.rs | 2 +- safekeeper/tests/walproposer_sim/safekeeper.rs | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/safekeeper/tests/random_test.rs b/safekeeper/tests/random_test.rs index 6c6f6a8c9674..7bdee35cd795 100644 --- a/safekeeper/tests/random_test.rs +++ b/safekeeper/tests/random_test.rs @@ -10,7 +10,7 @@ use crate::walproposer_sim::{ pub mod walproposer_sim; // Generates 2000 random seeds and runs a schedule for each of them. -// If you seed this test fail, please report the last seed to the +// If you see this test fail, please report the last seed to the // @safekeeper team. #[test] fn test_random_schedules() -> anyhow::Result<()> { diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 47539872a6c8..9c81d2eb4d45 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -21,7 +21,7 @@ use safekeeper::{ wal_storage::Storage, SafeKeeperConf, }; -use tracing::{debug, info_span}; +use tracing::{debug, info_span, warn}; use utils::{ id::{NodeId, TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, @@ -247,7 +247,12 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { NetEvent::Message(msg) => { let res = conn.process_any(msg, &mut global); if res.is_err() { - debug!("conn {:?} error: {:#}", connection_id, res.unwrap_err()); + let e = res.unwrap_err(); + let estr = e.to_string(); + if !estr.contains("finished processing START_REPLICATION") { + warn!("conn {:?} error: {:?}", connection_id, e); + panic!("unexpected error at safekeeper: {:#}", e); + } conns.remove(&connection_id); break; } From 24ce73ffaf5b56004f4e2630ca773630b716d253 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 26 Jun 2024 19:19:27 +0300 Subject: [PATCH 30/57] Silence compiler warning (#8153) I saw this compiler warning on my laptop: pgxn/neon_walredo/walredoproc.c:178:10: warning: using the result of an assignment as a condition without parentheses [-Wparentheses] if (err = close_range_syscall(3, ~0U, 0)) { ~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ pgxn/neon_walredo/walredoproc.c:178:10: note: place parentheses around the assignment to silence this warning if (err = close_range_syscall(3, ~0U, 0)) { ^ ( ) pgxn/neon_walredo/walredoproc.c:178:10: note: use '==' to turn this assignment into an equality comparison if (err = close_range_syscall(3, ~0U, 0)) { ^ == 1 warning generated. I'm not sure what compiler version or options cause that, but it's a good warning. Write the call a little differently, to avoid the warning and to make it a little more clear anyway. (The 'err' variable wasn't used for anything, so I'm surprised we were not seeing a compiler warning on the unused value, too.) --- pgxn/neon_walredo/walredoproc.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pgxn/neon_walredo/walredoproc.c b/pgxn/neon_walredo/walredoproc.c index c4ab22636b91..cc545393f594 100644 --- a/pgxn/neon_walredo/walredoproc.c +++ b/pgxn/neon_walredo/walredoproc.c @@ -168,16 +168,15 @@ close_range_syscall(unsigned int start_fd, unsigned int count, unsigned int flag static void enter_seccomp_mode(void) { - /* * The pageserver process relies on us to close all the file descriptors * it potentially leaked to us, _before_ we start processing potentially dangerous * wal records. See the comment in the Rust code that launches this process. */ - int err; - if (err = close_range_syscall(3, ~0U, 0)) { - ereport(FATAL, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("seccomp: could not close files >= fd 3"))); - } + if (close_range_syscall(3, ~0U, 0) != 0) + ereport(FATAL, + (errcode(ERRCODE_SYSTEM_ERROR), + errmsg("seccomp: could not close files >= fd 3"))); PgSeccompRule syscalls[] = { From 5b871802fd86c7b81fff0a99df3f1699ec8474b7 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 26 Jun 2024 19:53:03 +0300 Subject: [PATCH 31/57] Add counters for commands processed through the libpq page service API (#8089) I was looking for metrics on how many computes are still using protocol version 1 and 2. This provides counters for that as "pagestream" and "pagestream_v2" commands, but also all the other commands. The new metrics are global for the whole pageserver instance rather than per-tenant, so the additional metrics bloat should be fairly small. --- pageserver/src/metrics.rs | 41 ++++++++++++++++++++++++++++++++++ pageserver/src/page_service.rs | 39 +++++++++++++++++++++++++++++++- 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index c6b160733167..ca697afcf640 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1445,6 +1445,46 @@ pub(crate) static LIVE_CONNECTIONS_COUNT: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); +#[derive(Clone, Copy, enum_map::Enum, IntoStaticStr)] +pub(crate) enum ComputeCommandKind { + PageStreamV2, + PageStream, + Basebackup, + GetLastRecordRlsn, + Fullbackup, + ImportBasebackup, + ImportWal, + LeaseLsn, + Show, +} + +pub(crate) struct ComputeCommandCounters { + map: EnumMap, +} + +pub(crate) static COMPUTE_COMMANDS_COUNTERS: Lazy = Lazy::new(|| { + let inner = register_int_counter_vec!( + "pageserver_compute_commands", + "Number of compute -> pageserver commands processed", + &["command"] + ) + .expect("failed to define a metric"); + + ComputeCommandCounters { + map: EnumMap::from_array(std::array::from_fn(|i| { + let command = ::from_usize(i); + let command_str: &'static str = command.into(); + inner.with_label_values(&[command_str]) + })), + } +}); + +impl ComputeCommandCounters { + pub(crate) fn for_command(&self, command: ComputeCommandKind) -> &IntCounter { + &self.map[command] + } +} + // remote storage metrics static REMOTE_TIMELINE_CLIENT_CALLS: Lazy = Lazy::new(|| { @@ -2949,4 +2989,5 @@ pub fn preinitialize_metrics() { Lazy::force(&RECONSTRUCT_TIME); Lazy::force(&tenant_throttling::TIMELINE_GET); Lazy::force(&BASEBACKUP_QUERY_TIME); + Lazy::force(&COMPUTE_COMMANDS_COUNTERS); } diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index ebc23e89458e..6ea5f396d0a7 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -55,7 +55,7 @@ use crate::basebackup::BasebackupError; use crate::context::{DownloadBehavior, RequestContext}; use crate::import_datadir::import_wal_from_tar; use crate::metrics; -use crate::metrics::LIVE_CONNECTIONS_COUNT; +use crate::metrics::{ComputeCommandKind, COMPUTE_COMMANDS_COUNTERS, LIVE_CONNECTIONS_COUNT}; use crate::pgdatadir_mapping::Version; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id; @@ -1554,6 +1554,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::PageStreamV2) + .inc(); + self.handle_pagerequests( pgb, tenant_id, @@ -1579,6 +1583,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::PageStream) + .inc(); + self.handle_pagerequests( pgb, tenant_id, @@ -1605,6 +1613,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::Basebackup) + .inc(); + let lsn = if let Some(lsn_str) = params.get(2) { Some( Lsn::from_str(lsn_str) @@ -1662,6 +1674,11 @@ where .record("timeline_id", field::display(timeline_id)); self.check_permission(Some(tenant_id))?; + + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::GetLastRecordRlsn) + .inc(); + async { let timeline = self .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero) @@ -1723,6 +1740,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::Fullbackup) + .inc(); + // Check that the timeline exists self.handle_basebackup_request( pgb, @@ -1771,6 +1792,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::ImportBasebackup) + .inc(); + match self .handle_import_basebackup( pgb, @@ -1818,6 +1843,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::ImportWal) + .inc(); + match self .handle_import_wal(pgb, tenant_id, timeline_id, start_lsn, end_lsn, ctx) .await @@ -1855,6 +1884,10 @@ where self.check_permission(Some(tenant_shard_id.tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::LeaseLsn) + .inc(); + // The caller is responsible for providing correct lsn. let lsn = Lsn::from_str(params[2]) .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?; @@ -1886,6 +1919,10 @@ where self.check_permission(Some(tenant_id))?; + COMPUTE_COMMANDS_COUNTERS + .for_command(ComputeCommandKind::Show) + .inc(); + let tenant = self .get_active_tenant_with_timeout( tenant_id, From dd3adc36933f86e19aa45c1da07e997970350435 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Wed, 26 Jun 2024 18:27:23 +0100 Subject: [PATCH 32/57] docker: downgrade openssl to 1.1.1w (#8168) ## Problem We have seen numerous segfault and memory corruption issue for clients using libpq and openssl 3.2.2. I don't know if this is a bug in openssl or libpq. Downgrading to 1.1.1w fixes the issues for the storage controller and pgbench. ## Summary of Changes: Use openssl 1.1.1w instead of 3.2.2 --- Dockerfile.build-tools | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index a1483e550ec4..f85706ef6a44 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -106,10 +106,10 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && rm -rf ../lcov.tar.gz # Compile and install the static OpenSSL library -ENV OPENSSL_VERSION=3.2.2 +ENV OPENSSL_VERSION=1.1.1w ENV OPENSSL_PREFIX=/usr/local/openssl RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ - echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ + echo "cf3098950cb4d853ad95c0841f1f9c6d3dc102dccfcacd521d93925208b76ac8 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ cd /tmp && \ tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ From 76fc3d4aa1deaa3f0e821d2dcdb67bdfb7b49281 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Wed, 26 Jun 2024 18:58:56 +0100 Subject: [PATCH 33/57] Evict WAL files from disk (#8022) Fixes https://github.com/neondatabase/neon/issues/6337 Add safekeeper support to switch between `Present` and `Offloaded(flush_lsn)` states. The offloading is disabled by default, but can be controlled using new cmdline arguments: ``` --enable-offload Enable automatic switching to offloaded state --delete-offloaded-wal Delete local WAL files after offloading. When disabled, they will be left on disk --control-file-save-interval Pending updates to control file will be automatically saved after this interval [default: 300s] ``` Manager watches state updates and detects when there are no actvity on the timeline and actual partial backup upload in remote storage. When all conditions are met, the state can be switched to offloaded. In `timeline.rs` there is `StateSK` enum to support switching between states. When offloaded, code can access only control file structure and cannot use `SafeKeeper` to accept new WAL. `FullAccessTimeline` is now renamed to `WalResidentTimeline`. This struct contains guard to notify manager about active tasks requiring on-disk WAL access. All guards are issued by the manager, all requests are sent via channel using `ManagerCtl`. When manager receives request to issue a guard, it unevicts timeline if it's currently evicted. Fixed a bug in partial WAL backup, it used `term` instead of `last_log_term` previously. After this commit is merged, next step is to roll this change out, as in issue #6338. --- safekeeper/src/bin/safekeeper.rs | 19 +- safekeeper/src/control_file.rs | 5 +- safekeeper/src/copy_timeline.rs | 8 +- safekeeper/src/debug_dump.rs | 6 +- safekeeper/src/http/routes.rs | 8 +- safekeeper/src/json_ctrl.rs | 10 +- safekeeper/src/lib.rs | 9 + safekeeper/src/pull_timeline.rs | 35 +- safekeeper/src/receive_wal.rs | 20 +- safekeeper/src/recovery.rs | 29 +- safekeeper/src/remove_wal.rs | 2 +- safekeeper/src/safekeeper.rs | 46 +- safekeeper/src/send_wal.rs | 15 +- safekeeper/src/timeline.rs | 458 +++++++++-- safekeeper/src/timeline_eviction.rs | 366 +++++++++ safekeeper/src/timeline_guard.rs | 71 ++ safekeeper/src/timeline_manager.rs | 763 ++++++++++++------ safekeeper/src/timelines_set.rs | 4 + safekeeper/src/wal_backup.rs | 51 +- safekeeper/src/wal_backup_partial.rs | 80 +- safekeeper/src/wal_storage.rs | 12 +- .../tests/walproposer_sim/safekeeper.rs | 13 +- test_runner/fixtures/neon_fixtures.py | 2 + test_runner/regress/test_wal_acceptor.py | 100 +++ .../regress/test_wal_acceptor_async.py | 5 +- 25 files changed, 1665 insertions(+), 472 deletions(-) create mode 100644 safekeeper/src/timeline_eviction.rs create mode 100644 safekeeper/src/timeline_guard.rs diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 86238c729271..20650490b1ae 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -28,8 +28,8 @@ use utils::pid_file; use metrics::set_build_info_metric; use safekeeper::defaults::{ - DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, - DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, + DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, + DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, }; use safekeeper::http; use safekeeper::wal_service; @@ -172,6 +172,7 @@ struct Args { walsenders_keep_horizon: bool, /// Enable partial backup. If disabled, safekeeper will not upload partial /// segments to remote storage. + /// TODO: now partial backup is always enabled, remove this flag. #[arg(long)] partial_backup_enabled: bool, /// Controls how long backup will wait until uploading the partial segment. @@ -181,6 +182,15 @@ struct Args { /// be used in tests. #[arg(long)] disable_periodic_broker_push: bool, + /// Enable automatic switching to offloaded state. + #[arg(long)] + enable_offload: bool, + /// Delete local WAL files after offloading. When disabled, they will be left on disk. + #[arg(long)] + delete_offloaded_wal: bool, + /// Pending updates to control file will be automatically saved after this interval. + #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_CONTROL_FILE_SAVE_INTERVAL)] + control_file_save_interval: Duration, } // Like PathBufValueParser, but allows empty string. @@ -328,9 +338,12 @@ async fn main() -> anyhow::Result<()> { sk_auth_token, current_thread_runtime: args.current_thread_runtime, walsenders_keep_horizon: args.walsenders_keep_horizon, - partial_backup_enabled: args.partial_backup_enabled, + partial_backup_enabled: true, partial_backup_timeout: args.partial_backup_timeout, disable_periodic_broker_push: args.disable_periodic_broker_push, + enable_offload: args.enable_offload, + delete_offloaded_wal: args.delete_offloaded_wal, + control_file_save_interval: args.control_file_save_interval, }; // initialize sentry if SENTRY_DSN is provided diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index 8e9031fae4fe..cd3c7fe52631 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -72,6 +72,9 @@ impl FileStorage { conf: &SafeKeeperConf, state: TimelinePersistentState, ) -> Result { + // we don't support creating new timelines in offloaded state + assert!(matches!(state.eviction_state, EvictionState::Present)); + let store = FileStorage { timeline_dir, no_sync: conf.no_sync, @@ -103,7 +106,7 @@ impl FileStorage { } /// Load control file from given directory. - pub fn load_control_file_from_dir(timeline_dir: &Utf8Path) -> Result { + fn load_control_file_from_dir(timeline_dir: &Utf8Path) -> Result { let path = timeline_dir.join(CONTROL_FILE_NAME); Self::load_control_file(path) } diff --git a/safekeeper/src/copy_timeline.rs b/safekeeper/src/copy_timeline.rs index 51cf4db6b5b6..14bd3c03b810 100644 --- a/safekeeper/src/copy_timeline.rs +++ b/safekeeper/src/copy_timeline.rs @@ -15,7 +15,7 @@ use crate::{ control_file::{FileStorage, Storage}, pull_timeline::{create_temp_timeline_dir, load_temp_timeline, validate_temp_timeline}, state::TimelinePersistentState, - timeline::{FullAccessTimeline, Timeline, TimelineError}, + timeline::{Timeline, TimelineError, WalResidentTimeline}, wal_backup::copy_s3_segments, wal_storage::{wal_file_paths, WalReader}, GlobalTimelines, @@ -46,7 +46,7 @@ pub async fn handle_request(request: Request) -> Result<()> { } } - let source_tli = request.source.full_access_guard().await?; + let source_tli = request.source.wal_residence_guard().await?; let conf = &GlobalTimelines::get_global_config(); let ttid = request.destination_ttid; @@ -159,7 +159,7 @@ pub async fn handle_request(request: Request) -> Result<()> { } async fn copy_disk_segments( - tli: &FullAccessTimeline, + tli: &WalResidentTimeline, wal_seg_size: usize, start_lsn: Lsn, end_lsn: Lsn, @@ -183,7 +183,7 @@ async fn copy_disk_segments( let copy_end = copy_end - segment_start; let wal_file_path = { - let (normal, partial) = wal_file_paths(tli_dir_path, segment, wal_seg_size)?; + let (normal, partial) = wal_file_paths(tli_dir_path, segment, wal_seg_size); if segment == last_segment { partial diff --git a/safekeeper/src/debug_dump.rs b/safekeeper/src/debug_dump.rs index 062ff4b3db79..15b0272cd942 100644 --- a/safekeeper/src/debug_dump.rs +++ b/safekeeper/src/debug_dump.rs @@ -28,7 +28,8 @@ use crate::send_wal::WalSenderState; use crate::state::TimelineMemState; use crate::state::TimelinePersistentState; use crate::timeline::get_timeline_dir; -use crate::timeline::FullAccessTimeline; +use crate::timeline::WalResidentTimeline; +use crate::timeline_manager; use crate::GlobalTimelines; use crate::SafeKeeperConf; @@ -168,6 +169,7 @@ pub struct Memory { pub last_removed_segno: XLogSegNo, pub epoch_start_lsn: Lsn, pub mem_state: TimelineMemState, + pub mgr_status: timeline_manager::Status, // PhysicalStorage state. pub write_lsn: Lsn, @@ -326,7 +328,7 @@ pub struct TimelineDigest { } pub async fn calculate_digest( - tli: &FullAccessTimeline, + tli: &WalResidentTimeline, request: TimelineDigestRequest, ) -> Result { if request.from_lsn > request.until_lsn { diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 3f2cd97ccd01..fe6d325ceeed 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -214,10 +214,10 @@ async fn timeline_snapshot_handler(request: Request) -> Result) -> Result) -> Result( async fn prepare_safekeeper( ttid: TenantTimelineId, pg_version: u32, -) -> anyhow::Result { +) -> anyhow::Result { let tli = GlobalTimelines::create( ttid, ServerInfo { @@ -115,11 +115,11 @@ async fn prepare_safekeeper( ) .await?; - tli.full_access_guard().await + tli.wal_residence_guard().await } async fn send_proposer_elected( - tli: &FullAccessTimeline, + tli: &WalResidentTimeline, term: Term, lsn: Lsn, ) -> anyhow::Result<()> { @@ -151,7 +151,7 @@ pub struct InsertedWAL { /// Extend local WAL with new LogicalMessage record. To do that, /// create AppendRequest with new WAL and pass it to safekeeper. pub async fn append_logical_message( - tli: &FullAccessTimeline, + tli: &WalResidentTimeline, msg: &AppendLogicalMessage, ) -> anyhow::Result { let wal_data = encode_logical_message(&msg.lm_prefix, &msg.lm_message); diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index cbd67f0064c3..067e425570e7 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -28,6 +28,8 @@ pub mod safekeeper; pub mod send_wal; pub mod state; pub mod timeline; +pub mod timeline_eviction; +pub mod timeline_guard; pub mod timeline_manager; pub mod timelines_set; pub mod wal_backup; @@ -49,6 +51,7 @@ pub mod defaults { pub const DEFAULT_HEARTBEAT_TIMEOUT: &str = "5000ms"; pub const DEFAULT_MAX_OFFLOADER_LAG_BYTES: u64 = 128 * (1 << 20); pub const DEFAULT_PARTIAL_BACKUP_TIMEOUT: &str = "15m"; + pub const DEFAULT_CONTROL_FILE_SAVE_INTERVAL: &str = "300s"; } #[derive(Debug, Clone)] @@ -85,6 +88,9 @@ pub struct SafeKeeperConf { pub partial_backup_enabled: bool, pub partial_backup_timeout: Duration, pub disable_periodic_broker_push: bool, + pub enable_offload: bool, + pub delete_offloaded_wal: bool, + pub control_file_save_interval: Duration, } impl SafeKeeperConf { @@ -124,6 +130,9 @@ impl SafeKeeperConf { partial_backup_enabled: false, partial_backup_timeout: Duration::from_secs(0), disable_periodic_broker_push: false, + enable_offload: false, + delete_offloaded_wal: false, + control_file_save_interval: Duration::from_secs(1), } } } diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 66c41f65ff2b..618c6b278f9b 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -32,7 +32,7 @@ use crate::{ routes::TimelineStatus, }, safekeeper::Term, - timeline::{get_tenant_dir, get_timeline_dir, FullAccessTimeline, Timeline, TimelineError}, + timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError, WalResidentTimeline}, wal_storage::{self, open_wal_file, Storage}, GlobalTimelines, SafeKeeperConf, }; @@ -46,7 +46,7 @@ use utils::{ /// Stream tar archive of timeline to tx. #[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))] -pub async fn stream_snapshot(tli: FullAccessTimeline, tx: mpsc::Sender>) { +pub async fn stream_snapshot(tli: WalResidentTimeline, tx: mpsc::Sender>) { if let Err(e) = stream_snapshot_guts(tli, tx.clone()).await { // Error type/contents don't matter as they won't can't reach the client // (hyper likely doesn't do anything with it), but http stream will be @@ -66,7 +66,7 @@ pub struct SnapshotContext { pub flush_lsn: Lsn, pub wal_seg_size: usize, // used to remove WAL hold off in Drop. - pub tli: FullAccessTimeline, + pub tli: WalResidentTimeline, } impl Drop for SnapshotContext { @@ -80,7 +80,7 @@ impl Drop for SnapshotContext { } pub async fn stream_snapshot_guts( - tli: FullAccessTimeline, + tli: WalResidentTimeline, tx: mpsc::Sender>, ) -> Result<()> { // tokio-tar wants Write implementor, but we have mpsc tx >; @@ -135,7 +135,7 @@ pub async fn stream_snapshot_guts( Ok(()) } -impl FullAccessTimeline { +impl WalResidentTimeline { /// Start streaming tar archive with timeline: /// 1) stream control file under lock; /// 2) hold off WAL removal; @@ -160,6 +160,7 @@ impl FullAccessTimeline { ar: &mut tokio_tar::Builder, ) -> Result { let mut shared_state = self.write_shared_state().await; + let wal_seg_size = shared_state.get_wal_seg_size(); let cf_path = self.get_timeline_dir().join(CONTROL_FILE_NAME); let mut cf = File::open(cf_path).await?; @@ -173,19 +174,19 @@ impl FullAccessTimeline { // lock and setting `wal_removal_on_hold` later, it guarantees that WAL // won't be removed until we're done. let from_lsn = min( - shared_state.sk.state.remote_consistent_lsn, - shared_state.sk.state.backup_lsn, + shared_state.sk.state().remote_consistent_lsn, + shared_state.sk.state().backup_lsn, ); if from_lsn == Lsn::INVALID { // this is possible if snapshot is called before handling first // elected message bail!("snapshot is called on uninitialized timeline"); } - let from_segno = from_lsn.segment_number(shared_state.get_wal_seg_size()); - let term = shared_state.sk.get_term(); - let last_log_term = shared_state.sk.get_last_log_term(); + let from_segno = from_lsn.segment_number(wal_seg_size); + let term = shared_state.sk.state().acceptor_state.term; + let last_log_term = shared_state.sk.last_log_term(); let flush_lsn = shared_state.sk.flush_lsn(); - let upto_segno = flush_lsn.segment_number(shared_state.get_wal_seg_size()); + let upto_segno = flush_lsn.segment_number(wal_seg_size); // have some limit on max number of segments as a sanity check const MAX_ALLOWED_SEGS: u64 = 1000; let num_segs = upto_segno - from_segno + 1; @@ -206,14 +207,18 @@ impl FullAccessTimeline { } shared_state.wal_removal_on_hold = true; + // Drop shared_state to release the lock, before calling wal_residence_guard(). + drop(shared_state); + + let tli_copy = self.wal_residence_guard().await?; let bctx = SnapshotContext { from_segno, upto_segno, term, last_log_term, flush_lsn, - wal_seg_size: shared_state.get_wal_seg_size(), - tli: self.clone(), + wal_seg_size, + tli: tli_copy, }; Ok(bctx) @@ -225,8 +230,8 @@ impl FullAccessTimeline { /// forget this if snapshotting fails mid the way. pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> { let shared_state = self.read_shared_state().await; - let term = shared_state.sk.get_term(); - let last_log_term = shared_state.sk.get_last_log_term(); + let term = shared_state.sk.state().acceptor_state.term; + let last_log_term = shared_state.sk.last_log_term(); // There are some cases to relax this check (e.g. last_log_term might // change, but as long as older history is strictly part of new that's // fine), but there is no need to do it. diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index 7943a2fd8683..ab8c76dc17e2 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -6,7 +6,7 @@ use crate::handler::SafekeeperPostgresHandler; use crate::safekeeper::AcceptorProposerMessage; use crate::safekeeper::ProposerAcceptorMessage; use crate::safekeeper::ServerInfo; -use crate::timeline::FullAccessTimeline; +use crate::timeline::WalResidentTimeline; use crate::wal_service::ConnectionId; use crate::GlobalTimelines; use anyhow::{anyhow, Context}; @@ -213,7 +213,7 @@ impl SafekeeperPostgresHandler { &mut self, pgb: &mut PostgresBackend, ) -> Result<(), QueryError> { - let mut tli: Option = None; + let mut tli: Option = None; if let Err(end) = self.handle_start_wal_push_guts(pgb, &mut tli).await { // Log the result and probably send it to the client, closing the stream. let handle_end_fut = pgb.handle_copy_stream_end(end); @@ -233,7 +233,7 @@ impl SafekeeperPostgresHandler { pub async fn handle_start_wal_push_guts( &mut self, pgb: &mut PostgresBackend, - tli: &mut Option, + tli: &mut Option, ) -> Result<(), CopyStreamHandlerEnd> { // Notify the libpq client that it's allowed to send `CopyData` messages pgb.write_message(&BeMessage::CopyBothResponse).await?; @@ -269,11 +269,11 @@ impl SafekeeperPostgresHandler { .get_walreceivers() .pageserver_feedback_tx .subscribe(); - *tli = Some(timeline.clone()); + *tli = Some(timeline.wal_residence_guard().await?); tokio::select! { // todo: add read|write .context to these errors - r = network_reader.run(msg_tx, msg_rx, reply_tx, timeline.clone(), next_msg) => r, + r = network_reader.run(msg_tx, msg_rx, reply_tx, timeline, next_msg) => r, r = network_write(pgb, reply_rx, pageserver_feedback_rx) => r, } } else { @@ -323,7 +323,7 @@ struct NetworkReader<'a, IO> { impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { async fn read_first_message( &mut self, - ) -> Result<(FullAccessTimeline, ProposerAcceptorMessage), CopyStreamHandlerEnd> { + ) -> Result<(WalResidentTimeline, ProposerAcceptorMessage), CopyStreamHandlerEnd> { // Receive information about server to create timeline, if not yet. let next_msg = read_message(self.pgb_reader).await?; let tli = match next_msg { @@ -340,7 +340,7 @@ impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { let tli = GlobalTimelines::create(self.ttid, server_info, Lsn::INVALID, Lsn::INVALID) .await?; - tli.full_access_guard().await? + tli.wal_residence_guard().await? } _ => { return Err(CopyStreamHandlerEnd::Other(anyhow::anyhow!( @@ -356,7 +356,7 @@ impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { msg_tx: Sender, msg_rx: Receiver, reply_tx: Sender, - tli: FullAccessTimeline, + tli: WalResidentTimeline, next_msg: ProposerAcceptorMessage, ) -> Result<(), CopyStreamHandlerEnd> { *self.acceptor_handle = Some(WalAcceptor::spawn( @@ -451,7 +451,7 @@ const KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1); /// replies to reply_tx; reading from socket and writing to disk in parallel is /// beneficial for performance, this struct provides writing to disk part. pub struct WalAcceptor { - tli: FullAccessTimeline, + tli: WalResidentTimeline, msg_rx: Receiver, reply_tx: Sender, conn_id: Option, @@ -464,7 +464,7 @@ impl WalAcceptor { /// /// conn_id None means WalAcceptor is used by recovery initiated at this safekeeper. pub fn spawn( - tli: FullAccessTimeline, + tli: WalResidentTimeline, msg_rx: Receiver, reply_tx: Sender, conn_id: Option, diff --git a/safekeeper/src/recovery.rs b/safekeeper/src/recovery.rs index 80a630b1e120..a59ff07b96be 100644 --- a/safekeeper/src/recovery.rs +++ b/safekeeper/src/recovery.rs @@ -21,7 +21,7 @@ use utils::{id::NodeId, lsn::Lsn, postgres_client::wal_stream_connection_config} use crate::receive_wal::{WalAcceptor, REPLY_QUEUE_SIZE}; use crate::safekeeper::{AppendRequest, AppendRequestHeader}; -use crate::timeline::FullAccessTimeline; +use crate::timeline::WalResidentTimeline; use crate::{ http::routes::TimelineStatus, receive_wal::MSG_QUEUE_SIZE, @@ -36,7 +36,7 @@ use crate::{ /// Entrypoint for per timeline task which always runs, checking whether /// recovery for this safekeeper is needed and starting it if so. #[instrument(name = "recovery task", skip_all, fields(ttid = %tli.ttid))] -pub async fn recovery_main(tli: FullAccessTimeline, conf: SafeKeeperConf) { +pub async fn recovery_main(tli: WalResidentTimeline, conf: SafeKeeperConf) { info!("started"); let cancel = tli.cancel.clone(); @@ -66,12 +66,12 @@ pub async fn recovery_main(tli: FullAccessTimeline, conf: SafeKeeperConf) { /// depending on assembled quorum (e.g. classic picture 8 from Raft paper). /// Thus we don't try to predict it here. async fn recovery_needed( - tli: &FullAccessTimeline, + tli: &WalResidentTimeline, heartbeat_timeout: Duration, ) -> RecoveryNeededInfo { let ss = tli.read_shared_state().await; - let term = ss.sk.state.acceptor_state.term; - let last_log_term = ss.sk.get_last_log_term(); + let term = ss.sk.state().acceptor_state.term; + let last_log_term = ss.sk.last_log_term(); let flush_lsn = ss.sk.flush_lsn(); // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us. let mut peers = ss.get_peers(heartbeat_timeout); @@ -195,7 +195,7 @@ impl From<&PeerInfo> for Donor { const CHECK_INTERVAL_MS: u64 = 2000; /// Check regularly whether we need to start recovery. -async fn recovery_main_loop(tli: FullAccessTimeline, conf: SafeKeeperConf) { +async fn recovery_main_loop(tli: WalResidentTimeline, conf: SafeKeeperConf) { let check_duration = Duration::from_millis(CHECK_INTERVAL_MS); loop { let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await; @@ -205,7 +205,12 @@ async fn recovery_main_loop(tli: FullAccessTimeline, conf: SafeKeeperConf) { "starting recovery from donor {}: {}", donor.sk_id, recovery_needed_info ); - match recover(tli.clone(), donor, &conf).await { + let res = tli.wal_residence_guard().await; + if let Err(e) = res { + warn!("failed to obtain guard: {}", e); + continue; + } + match recover(res.unwrap(), donor, &conf).await { // Note: 'write_wal rewrites WAL written before' error is // expected here and might happen if compute and recovery // concurrently write the same data. Eventually compute @@ -228,7 +233,7 @@ async fn recovery_main_loop(tli: FullAccessTimeline, conf: SafeKeeperConf) { /// Recover from the specified donor. Returns message explaining normal finish /// reason or error. async fn recover( - tli: FullAccessTimeline, + tli: WalResidentTimeline, donor: &Donor, conf: &SafeKeeperConf, ) -> anyhow::Result { @@ -314,7 +319,7 @@ async fn recover( // Pull WAL from donor, assuming handshake is already done. async fn recovery_stream( - tli: FullAccessTimeline, + tli: WalResidentTimeline, donor: &Donor, start_streaming_at: Lsn, conf: &SafeKeeperConf, @@ -364,10 +369,10 @@ async fn recovery_stream( // As in normal walreceiver, do networking and writing to disk in parallel. let (msg_tx, msg_rx) = channel(MSG_QUEUE_SIZE); let (reply_tx, reply_rx) = channel(REPLY_QUEUE_SIZE); - let wa = WalAcceptor::spawn(tli.clone(), msg_rx, reply_tx, None); + let wa = WalAcceptor::spawn(tli.wal_residence_guard().await?, msg_rx, reply_tx, None); let res = tokio::select! { - r = network_io(physical_stream, msg_tx, donor.clone(), tli.clone(), conf.clone()) => r, + r = network_io(physical_stream, msg_tx, donor.clone(), tli, conf.clone()) => r, r = read_replies(reply_rx, donor.term) => r.map(|()| None), }; @@ -398,7 +403,7 @@ async fn network_io( physical_stream: ReplicationStream, msg_tx: Sender, donor: Donor, - tli: FullAccessTimeline, + tli: WalResidentTimeline, conf: SafeKeeperConf, ) -> anyhow::Result> { let mut physical_stream = pin!(physical_stream); diff --git a/safekeeper/src/remove_wal.rs b/safekeeper/src/remove_wal.rs index b661e48cb5a6..16239d847ba4 100644 --- a/safekeeper/src/remove_wal.rs +++ b/safekeeper/src/remove_wal.rs @@ -8,7 +8,7 @@ use crate::timeline_manager::StateSnapshot; /// While it is safe to use inmem values for determining horizon, /// we use persistent to make possible normal states less surprising. /// All segments covering LSNs before horizon_lsn can be removed. -pub fn calc_horizon_lsn(state: &StateSnapshot, extra_horizon_lsn: Option) -> Lsn { +pub(crate) fn calc_horizon_lsn(state: &StateSnapshot, extra_horizon_lsn: Option) -> Lsn { use std::cmp::min; let mut horizon_lsn = min( diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 666ffdf0cea4..4d0992e8bda9 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -499,7 +499,11 @@ where /// Accepts a control file storage containing the safekeeper state. /// State must be initialized, i.e. contain filled `tenant_id`, `timeline_id` /// and `server` (`wal_seg_size` inside it) fields. - pub fn new(state: CTRL, wal_store: WAL, node_id: NodeId) -> Result> { + pub fn new( + state: TimelineState, + wal_store: WAL, + node_id: NodeId, + ) -> Result> { if state.tenant_id == TenantId::from([0u8; 16]) || state.timeline_id == TimelineId::from([0u8; 16]) { @@ -512,7 +516,7 @@ where Ok(SafeKeeper { term_start_lsn: Lsn(0), - state: TimelineState::new(state), + state, wal_store, node_id, }) @@ -526,11 +530,6 @@ where .up_to(self.flush_lsn()) } - /// Get current term. - pub fn get_term(&self) -> Term { - self.state.acceptor_state.term - } - pub fn get_last_log_term(&self) -> Term { self.state .acceptor_state @@ -912,10 +911,8 @@ where ))) } - /// Update timeline state with peer safekeeper data. + /// Update commit_lsn from peer safekeeper data. pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> { - let mut sync_control_file = false; - if (Lsn(sk_info.commit_lsn) != Lsn::INVALID) && (sk_info.last_log_term != INVALID_TERM) { // Note: the check is too restrictive, generally we can update local // commit_lsn if our history matches (is part of) history of advanced @@ -924,29 +921,6 @@ where self.update_commit_lsn(Lsn(sk_info.commit_lsn)).await?; } } - - self.state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), self.state.inmem.backup_lsn); - sync_control_file |= self.state.backup_lsn + (self.state.server.wal_seg_size as u64) - < self.state.inmem.backup_lsn; - - self.state.inmem.remote_consistent_lsn = max( - Lsn(sk_info.remote_consistent_lsn), - self.state.inmem.remote_consistent_lsn, - ); - sync_control_file |= self.state.remote_consistent_lsn - + (self.state.server.wal_seg_size as u64) - < self.state.inmem.remote_consistent_lsn; - - self.state.inmem.peer_horizon_lsn = max( - Lsn(sk_info.peer_horizon_lsn), - self.state.inmem.peer_horizon_lsn, - ); - sync_control_file |= self.state.peer_horizon_lsn + (self.state.server.wal_seg_size as u64) - < self.state.inmem.peer_horizon_lsn; - - if sync_control_file { - self.state.flush().await?; - } Ok(()) } } @@ -1039,7 +1013,7 @@ mod tests { persisted_state: test_sk_state(), }; let wal_store = DummyWalStore { lsn: Lsn(0) }; - let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap(); + let mut sk = SafeKeeper::new(TimelineState::new(storage), wal_store, NodeId(0)).unwrap(); // check voting for 1 is ok let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 }); @@ -1055,7 +1029,7 @@ mod tests { persisted_state: state, }; - sk = SafeKeeper::new(storage, sk.wal_store, NodeId(0)).unwrap(); + sk = SafeKeeper::new(TimelineState::new(storage), sk.wal_store, NodeId(0)).unwrap(); // and ensure voting second time for 1 is not ok vote_resp = sk.process_msg(&vote_request).await; @@ -1072,7 +1046,7 @@ mod tests { }; let wal_store = DummyWalStore { lsn: Lsn(0) }; - let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap(); + let mut sk = SafeKeeper::new(TimelineState::new(storage), wal_store, NodeId(0)).unwrap(); let mut ar_hdr = AppendRequestHeader { term: 1, diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index df75893838ee..90b1604adbdb 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -5,7 +5,7 @@ use crate::handler::SafekeeperPostgresHandler; use crate::metrics::RECEIVED_PS_FEEDBACKS; use crate::receive_wal::WalReceivers; use crate::safekeeper::{Term, TermLsn}; -use crate::timeline::FullAccessTimeline; +use crate::timeline::WalResidentTimeline; use crate::wal_service::ConnectionId; use crate::wal_storage::WalReader; use crate::GlobalTimelines; @@ -387,10 +387,10 @@ impl SafekeeperPostgresHandler { term: Option, ) -> Result<(), QueryError> { let tli = GlobalTimelines::get(self.ttid).map_err(|e| QueryError::Other(e.into()))?; - let full_access = tli.full_access_guard().await?; + let residence_guard = tli.wal_residence_guard().await?; if let Err(end) = self - .handle_start_replication_guts(pgb, start_pos, term, full_access) + .handle_start_replication_guts(pgb, start_pos, term, residence_guard) .await { let info = tli.get_safekeeper_info(&self.conf).await; @@ -407,7 +407,7 @@ impl SafekeeperPostgresHandler { pgb: &mut PostgresBackend, start_pos: Lsn, term: Option, - tli: FullAccessTimeline, + tli: WalResidentTimeline, ) -> Result<(), CopyStreamHandlerEnd> { let appname = self.appname.clone(); @@ -458,7 +458,8 @@ impl SafekeeperPostgresHandler { let mut sender = WalSender { pgb, - tli: tli.clone(), + // should succeed since we're already holding another guard + tli: tli.wal_residence_guard().await?, appname, start_pos, end_pos, @@ -527,7 +528,7 @@ impl EndWatch { /// A half driving sending WAL. struct WalSender<'a, IO> { pgb: &'a mut PostgresBackend, - tli: FullAccessTimeline, + tli: WalResidentTimeline, appname: Option, // Position since which we are sending next chunk. start_pos: Lsn, @@ -736,7 +737,7 @@ impl WalSender<'_, IO> { struct ReplyReader { reader: PostgresBackendReader, ws_guard: Arc, - tli: FullAccessTimeline, + tli: WalResidentTimeline, } impl ReplyReader { diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 544ffdbb36cf..f632cd6fb3ec 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -31,12 +31,15 @@ use crate::safekeeper::{ INVALID_TERM, }; use crate::send_wal::WalSenders; -use crate::state::{TimelineMemState, TimelinePersistentState}; +use crate::state::{EvictionState, TimelineMemState, TimelinePersistentState, TimelineState}; +use crate::timeline_guard::ResidenceGuard; +use crate::timeline_manager::{AtomicStatus, ManagerCtl}; use crate::timelines_set::TimelinesSet; use crate::wal_backup::{self}; +use crate::wal_backup_partial::PartialRemoteSegment; use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION}; -use crate::metrics::FullTimelineInfo; +use crate::metrics::{FullTimelineInfo, WalStorageMetrics}; use crate::wal_storage::{Storage as wal_storage_iface, WalReader}; use crate::{debug_dump, timeline_manager, wal_storage}; use crate::{GlobalTimelines, SafeKeeperConf}; @@ -132,8 +135,9 @@ impl<'a> DerefMut for WriteGuardSharedState<'a> { impl<'a> Drop for WriteGuardSharedState<'a> { fn drop(&mut self) { - let term_flush_lsn = TermLsn::from((self.guard.sk.get_term(), self.guard.sk.flush_lsn())); - let commit_lsn = self.guard.sk.state.inmem.commit_lsn; + let term_flush_lsn = + TermLsn::from((self.guard.sk.last_log_term(), self.guard.sk.flush_lsn())); + let commit_lsn = self.guard.sk.state().inmem.commit_lsn; let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| { if *old != term_flush_lsn { @@ -162,10 +166,150 @@ impl<'a> Drop for WriteGuardSharedState<'a> { } } +/// This structure is stored in shared state and represents the state of the timeline. +/// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this +/// case, SafeKeeper is not available (because WAL is not present on disk) and all +/// operations can be done only with control file. +pub enum StateSK { + Loaded(SafeKeeper), + Offloaded(Box>), + // Not used, required for moving between states. + Empty, +} + +impl StateSK { + pub fn flush_lsn(&self) -> Lsn { + match self { + StateSK::Loaded(sk) => sk.wal_store.flush_lsn(), + StateSK::Offloaded(state) => match state.eviction_state { + EvictionState::Offloaded(flush_lsn) => flush_lsn, + _ => panic!("StateSK::Offloaded mismatches with eviction_state from control_file"), + }, + StateSK::Empty => unreachable!(), + } + } + + /// Get a reference to the control file's timeline state. + pub fn state(&self) -> &TimelineState { + match self { + StateSK::Loaded(sk) => &sk.state, + StateSK::Offloaded(ref s) => s, + StateSK::Empty => unreachable!(), + } + } + + pub fn state_mut(&mut self) -> &mut TimelineState { + match self { + StateSK::Loaded(sk) => &mut sk.state, + StateSK::Offloaded(ref mut s) => s, + StateSK::Empty => unreachable!(), + } + } + + pub fn last_log_term(&self) -> Term { + self.state() + .acceptor_state + .get_last_log_term(self.flush_lsn()) + } + + /// Close open WAL files to release FDs. + fn close_wal_store(&mut self) { + if let StateSK::Loaded(sk) = self { + sk.wal_store.close(); + } + } + + /// Update timeline state with peer safekeeper data. + pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> { + // update commit_lsn if safekeeper is loaded + match self { + StateSK::Loaded(sk) => sk.record_safekeeper_info(sk_info).await?, + StateSK::Offloaded(_) => {} + StateSK::Empty => unreachable!(), + } + + // update everything else, including remote_consistent_lsn and backup_lsn + let mut sync_control_file = false; + let state = self.state_mut(); + let wal_seg_size = state.server.wal_seg_size as u64; + + state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), state.inmem.backup_lsn); + sync_control_file |= state.backup_lsn + wal_seg_size < state.inmem.backup_lsn; + + state.inmem.remote_consistent_lsn = max( + Lsn(sk_info.remote_consistent_lsn), + state.inmem.remote_consistent_lsn, + ); + sync_control_file |= + state.remote_consistent_lsn + wal_seg_size < state.inmem.remote_consistent_lsn; + + state.inmem.peer_horizon_lsn = + max(Lsn(sk_info.peer_horizon_lsn), state.inmem.peer_horizon_lsn); + sync_control_file |= state.peer_horizon_lsn + wal_seg_size < state.inmem.peer_horizon_lsn; + + if sync_control_file { + state.flush().await?; + } + Ok(()) + } + + /// Previously known as epoch_start_lsn. Needed only for reference in some APIs. + pub fn term_start_lsn(&self) -> Lsn { + match self { + StateSK::Loaded(sk) => sk.term_start_lsn, + StateSK::Offloaded(_) => Lsn(0), + StateSK::Empty => unreachable!(), + } + } + + /// Used for metrics only. + pub fn wal_storage_metrics(&self) -> WalStorageMetrics { + match self { + StateSK::Loaded(sk) => sk.wal_store.get_metrics(), + StateSK::Offloaded(_) => WalStorageMetrics::default(), + StateSK::Empty => unreachable!(), + } + } + + /// Returns WAL storage internal LSNs for debug dump. + pub fn wal_storage_internal_state(&self) -> (Lsn, Lsn, Lsn, bool) { + match self { + StateSK::Loaded(sk) => sk.wal_store.internal_state(), + StateSK::Offloaded(_) => { + let flush_lsn = self.flush_lsn(); + (flush_lsn, flush_lsn, flush_lsn, false) + } + StateSK::Empty => unreachable!(), + } + } + + /// Access to SafeKeeper object. Panics if offloaded, should be good to use from WalResidentTimeline. + pub fn safekeeper( + &mut self, + ) -> &mut SafeKeeper { + match self { + StateSK::Loaded(sk) => sk, + StateSK::Offloaded(_) => { + panic!("safekeeper is offloaded, cannot be used") + } + StateSK::Empty => unreachable!(), + } + } + + /// Moves control file's state structure out of the enum. Used to switch states. + fn take_state(self) -> TimelineState { + match self { + StateSK::Loaded(sk) => sk.state, + StateSK::Offloaded(state) => *state, + StateSK::Empty => unreachable!(), + } + } +} + /// Shared state associated with database instance pub struct SharedState { /// Safekeeper object - pub(crate) sk: SafeKeeper, + pub(crate) sk: StateSK, /// In memory list containing state of peers sent in latest messages from them. pub(crate) peers_info: PeersInfo, // True value hinders old WAL removal; this is used by snapshotting. We @@ -203,10 +347,10 @@ impl SharedState { control_file::FileStorage::create_new(timeline_dir.clone(), conf, state)?; let wal_store = wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?; - let sk = SafeKeeper::new(control_store, wal_store, conf.my_id)?; + let sk = SafeKeeper::new(TimelineState::new(control_store), wal_store, conf.my_id)?; Ok(Self { - sk, + sk: StateSK::Loaded(sk), peers_info: PeersInfo(vec![]), wal_removal_on_hold: false, }) @@ -220,18 +364,30 @@ impl SharedState { bail!(TimelineError::UninitializedWalSegSize(*ttid)); } - let wal_store = - wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?; + let sk = match control_store.eviction_state { + EvictionState::Present => { + let wal_store = + wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?; + StateSK::Loaded(SafeKeeper::new( + TimelineState::new(control_store), + wal_store, + conf.my_id, + )?) + } + EvictionState::Offloaded(_) => { + StateSK::Offloaded(Box::new(TimelineState::new(control_store))) + } + }; Ok(Self { - sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?, + sk, peers_info: PeersInfo(vec![]), wal_removal_on_hold: false, }) } pub(crate) fn get_wal_seg_size(&self) -> usize { - self.sk.state.server.wal_seg_size as usize + self.sk.state().server.wal_seg_size as usize } fn get_safekeeper_info( @@ -246,20 +402,20 @@ impl SharedState { tenant_id: ttid.tenant_id.as_ref().to_owned(), timeline_id: ttid.timeline_id.as_ref().to_owned(), }), - term: self.sk.state.acceptor_state.term, - last_log_term: self.sk.get_last_log_term(), + term: self.sk.state().acceptor_state.term, + last_log_term: self.sk.last_log_term(), flush_lsn: self.sk.flush_lsn().0, // note: this value is not flushed to control file yet and can be lost - commit_lsn: self.sk.state.inmem.commit_lsn.0, - remote_consistent_lsn: self.sk.state.inmem.remote_consistent_lsn.0, - peer_horizon_lsn: self.sk.state.inmem.peer_horizon_lsn.0, + commit_lsn: self.sk.state().inmem.commit_lsn.0, + remote_consistent_lsn: self.sk.state().inmem.remote_consistent_lsn.0, + peer_horizon_lsn: self.sk.state().inmem.peer_horizon_lsn.0, safekeeper_connstr: conf .advertise_pg_addr .to_owned() .unwrap_or(conf.listen_pg_addr.clone()), http_connstr: conf.listen_http_addr.to_owned(), - backup_lsn: self.sk.state.inmem.backup_lsn.0, - local_start_lsn: self.sk.state.local_start_lsn.0, + backup_lsn: self.sk.state().inmem.backup_lsn.0, + local_start_lsn: self.sk.state().local_start_lsn.0, availability_zone: conf.availability_zone.clone(), standby_horizon: standby_apply_lsn.0, } @@ -335,6 +491,7 @@ pub struct Timeline { walsenders: Arc, walreceivers: Arc, timeline_dir: Utf8PathBuf, + manager_ctl: ManagerCtl, /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires pub(crate) cancel: CancellationToken, @@ -343,6 +500,7 @@ pub struct Timeline { pub(crate) broker_active: AtomicBool, pub(crate) wal_backup_active: AtomicBool, pub(crate) last_removed_segno: AtomicU64, + pub(crate) mgr_status: AtomicStatus, } impl Timeline { @@ -352,9 +510,9 @@ impl Timeline { let shared_state = SharedState::restore(conf, &ttid)?; let (commit_lsn_watch_tx, commit_lsn_watch_rx) = - watch::channel(shared_state.sk.state.commit_lsn); + watch::channel(shared_state.sk.state().commit_lsn); let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from(( - shared_state.sk.get_term(), + shared_state.sk.last_log_term(), shared_state.sk.flush_lsn(), ))); let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0); @@ -373,9 +531,11 @@ impl Timeline { walreceivers, cancel: CancellationToken::default(), timeline_dir: get_timeline_dir(conf, &ttid), + manager_ctl: ManagerCtl::new(), broker_active: AtomicBool::new(false), wal_backup_active: AtomicBool::new(false), last_removed_segno: AtomicU64::new(0), + mgr_status: AtomicStatus::new(), }) } @@ -409,9 +569,11 @@ impl Timeline { walreceivers, cancel: CancellationToken::default(), timeline_dir: get_timeline_dir(conf, &ttid), + manager_ctl: ManagerCtl::new(), broker_active: AtomicBool::new(false), wal_backup_active: AtomicBool::new(false), last_removed_segno: AtomicU64::new(0), + mgr_status: AtomicStatus::new(), }) } @@ -442,7 +604,7 @@ impl Timeline { fs::create_dir_all(&self.timeline_dir).await?; // Write timeline to disk and start background tasks. - if let Err(e) = shared_state.sk.state.flush().await { + if let Err(e) = shared_state.sk.state_mut().flush().await { // Bootstrap failed, cancel timeline and remove timeline directory. self.cancel(shared_state); @@ -465,12 +627,16 @@ impl Timeline { conf: &SafeKeeperConf, broker_active_set: Arc, ) { + let (tx, rx) = self.manager_ctl.bootstrap_manager(); + // Start manager task which will monitor timeline state and update // background tasks. tokio::spawn(timeline_manager::main_task( - self.clone(), + ManagerTimeline { tli: self.clone() }, conf.clone(), broker_active_set, + tx, + rx, )); } @@ -507,7 +673,7 @@ impl Timeline { self.cancel.cancel(); // Close associated FDs. Nobody will be able to touch timeline data once // it is cancelled, so WAL storage won't be opened again. - shared_state.sk.wal_store.close(); + shared_state.sk.close_wal_store(); } /// Returns if timeline is cancelled. @@ -547,12 +713,15 @@ impl Timeline { /// Returns state of the timeline. pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) { let state = self.read_shared_state().await; - (state.sk.state.inmem.clone(), state.sk.state.clone()) + ( + state.sk.state().inmem.clone(), + TimelinePersistentState::clone(state.sk.state()), + ) } /// Returns latest backup_lsn. pub async fn get_wal_backup_lsn(&self) -> Lsn { - self.read_shared_state().await.sk.state.inmem.backup_lsn + self.read_shared_state().await.sk.state().inmem.backup_lsn } /// Sets backup_lsn to the given value. @@ -562,7 +731,7 @@ impl Timeline { } let mut state = self.write_shared_state().await; - state.sk.state.inmem.backup_lsn = max(state.sk.state.inmem.backup_lsn, backup_lsn); + state.sk.state_mut().inmem.backup_lsn = max(state.sk.state().inmem.backup_lsn, backup_lsn); // we should check whether to shut down offloader, but this will be done // soon by peer communication anyway. Ok(()) @@ -604,7 +773,7 @@ impl Timeline { /// Returns flush_lsn. pub async fn get_flush_lsn(&self) -> Lsn { - self.read_shared_state().await.sk.wal_store.flush_lsn() + self.read_shared_state().await.sk.flush_lsn() } /// Gather timeline data for metrics. @@ -623,11 +792,11 @@ impl Timeline { timeline_is_active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed), - epoch_start_lsn: state.sk.term_start_lsn, - mem_state: state.sk.state.inmem.clone(), - persisted_state: state.sk.state.clone(), - flush_lsn: state.sk.wal_store.flush_lsn(), - wal_storage: state.sk.wal_store.get_metrics(), + epoch_start_lsn: state.sk.term_start_lsn(), + mem_state: state.sk.state().inmem.clone(), + persisted_state: TimelinePersistentState::clone(state.sk.state()), + flush_lsn: state.sk.flush_lsn(), + wal_storage: state.sk.wal_storage_metrics(), }) } @@ -636,7 +805,7 @@ impl Timeline { let state = self.read_shared_state().await; let (write_lsn, write_record_lsn, flush_lsn, file_open) = - state.sk.wal_store.internal_state(); + state.sk.wal_storage_internal_state(); debug_dump::Memory { is_cancelled: self.is_cancelled(), @@ -646,8 +815,9 @@ impl Timeline { active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed), - epoch_start_lsn: state.sk.term_start_lsn, - mem_state: state.sk.state.inmem.clone(), + epoch_start_lsn: state.sk.term_start_lsn(), + mem_state: state.sk.state().inmem.clone(), + mgr_status: self.mgr_status.get(), write_lsn, write_record_lsn, flush_lsn, @@ -661,34 +831,77 @@ impl Timeline { f: impl FnOnce(&mut TimelinePersistentState) -> Result, ) -> Result { let mut state = self.write_shared_state().await; - let mut persistent_state = state.sk.state.start_change(); + let mut persistent_state = state.sk.state_mut().start_change(); // If f returns error, we abort the change and don't persist anything. let res = f(&mut persistent_state)?; // If persisting fails, we abort the change and return error. - state.sk.state.finish_change(&persistent_state).await?; + state + .sk + .state_mut() + .finish_change(&persistent_state) + .await?; Ok(res) } /// Get the timeline guard for reading/writing WAL files. - /// TODO: if WAL files are not present on disk (evicted), they will be - /// downloaded from S3. Also there will logic for preventing eviction - /// while someone is holding FullAccessTimeline guard. - pub async fn full_access_guard(self: &Arc) -> Result { + /// If WAL files are not present on disk (evicted), they will be automatically + /// downloaded from remote storage. This is done in the manager task, which is + /// responsible for issuing all guards. + /// + /// NB: don't use this function from timeline_manager, it will deadlock. + /// NB: don't use this function while holding shared_state lock. + pub async fn wal_residence_guard(self: &Arc) -> Result { if self.is_cancelled() { bail!(TimelineError::Cancelled(self.ttid)); } - Ok(FullAccessTimeline { tli: self.clone() }) + + debug!("requesting WalResidentTimeline guard"); + + // Wait 5 seconds for the guard to be acquired, should be enough for uneviction. + // If it times out, most likely there is a deadlock in the manager task. + let res = tokio::time::timeout( + Duration::from_secs(5), + self.manager_ctl.wal_residence_guard(), + ) + .await; + + let guard = match res { + Ok(Ok(guard)) => guard, + Ok(Err(e)) => { + warn!( + "error while acquiring WalResidentTimeline guard (current state {:?}): {}", + self.mgr_status.get(), + e + ); + return Err(e); + } + Err(_) => { + warn!( + "timeout while acquiring WalResidentTimeline guard (current state {:?})", + self.mgr_status.get() + ); + anyhow::bail!("timeout while acquiring WalResidentTimeline guard"); + } + }; + + Ok(WalResidentTimeline::new(self.clone(), guard)) } } /// This is a guard that allows to read/write disk timeline state. -/// All tasks that are using the disk should use this guard. -#[derive(Clone)] -pub struct FullAccessTimeline { +/// All tasks that are trying to read/write WAL from disk should use this guard. +pub struct WalResidentTimeline { pub tli: Arc, + _guard: ResidenceGuard, } -impl Deref for FullAccessTimeline { +impl WalResidentTimeline { + pub fn new(tli: Arc, _guard: ResidenceGuard) -> Self { + WalResidentTimeline { tli, _guard } + } +} + +impl Deref for WalResidentTimeline { type Target = Arc; fn deref(&self) -> &Self::Target { @@ -696,7 +909,7 @@ impl Deref for FullAccessTimeline { } } -impl FullAccessTimeline { +impl WalResidentTimeline { /// Returns true if walsender should stop sending WAL to pageserver. We /// terminate it if remote_consistent_lsn reached commit_lsn and there is no /// computes. While there might be nothing to stream already, we learn about @@ -708,8 +921,8 @@ impl FullAccessTimeline { } let shared_state = self.read_shared_state().await; if self.walreceivers.get_num() == 0 { - return shared_state.sk.state.inmem.commit_lsn == Lsn(0) || // no data at all yet - reported_remote_consistent_lsn >= shared_state.sk.state.inmem.commit_lsn; + return shared_state.sk.state().inmem.commit_lsn == Lsn(0) || // no data at all yet + reported_remote_consistent_lsn >= shared_state.sk.state().inmem.commit_lsn; } false } @@ -717,11 +930,11 @@ impl FullAccessTimeline { /// Ensure that current term is t, erroring otherwise, and lock the state. pub async fn acquire_term(&self, t: Term) -> Result { let ss = self.read_shared_state().await; - if ss.sk.state.acceptor_state.term != t { + if ss.sk.state().acceptor_state.term != t { bail!( "failed to acquire term {}, current term {}", t, - ss.sk.state.acceptor_state.term + ss.sk.state().acceptor_state.term ); } Ok(ss) @@ -739,7 +952,7 @@ impl FullAccessTimeline { let mut rmsg: Option; { let mut shared_state = self.write_shared_state().await; - rmsg = shared_state.sk.process_msg(msg).await?; + rmsg = shared_state.sk.safekeeper().process_msg(msg).await?; // if this is AppendResponse, fill in proper hot standby feedback. if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg { @@ -769,8 +982,141 @@ impl FullAccessTimeline { /// Update in memory remote consistent lsn. pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) { let mut shared_state = self.write_shared_state().await; - shared_state.sk.state.inmem.remote_consistent_lsn = - max(shared_state.sk.state.inmem.remote_consistent_lsn, candidate); + shared_state.sk.state_mut().inmem.remote_consistent_lsn = max( + shared_state.sk.state().inmem.remote_consistent_lsn, + candidate, + ); + } +} + +/// This struct contains methods that are used by timeline manager task. +pub(crate) struct ManagerTimeline { + pub(crate) tli: Arc, +} + +impl Deref for ManagerTimeline { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.tli + } +} + +impl ManagerTimeline { + pub(crate) fn timeline_dir(&self) -> &Utf8PathBuf { + &self.tli.timeline_dir + } + + /// Manager requests this state on startup. + pub(crate) async fn bootstrap_mgr(&self) -> (bool, Option) { + let shared_state = self.read_shared_state().await; + let is_offloaded = matches!( + shared_state.sk.state().eviction_state, + EvictionState::Offloaded(_) + ); + let partial_backup_uploaded = shared_state.sk.state().partial_backup.uploaded_segment(); + + (is_offloaded, partial_backup_uploaded) + } + + /// Try to switch state Present->Offloaded. + pub(crate) async fn switch_to_offloaded( + &self, + partial: &PartialRemoteSegment, + ) -> anyhow::Result<()> { + let mut shared = self.write_shared_state().await; + + // updating control file + let mut pstate = shared.sk.state_mut().start_change(); + + if !matches!(pstate.eviction_state, EvictionState::Present) { + bail!( + "cannot switch to offloaded state, current state is {:?}", + pstate.eviction_state + ); + } + + if partial.flush_lsn != shared.sk.flush_lsn() { + bail!( + "flush_lsn mismatch in partial backup, expected {}, got {}", + shared.sk.flush_lsn(), + partial.flush_lsn + ); + } + + if partial.commit_lsn != pstate.commit_lsn { + bail!( + "commit_lsn mismatch in partial backup, expected {}, got {}", + pstate.commit_lsn, + partial.commit_lsn + ); + } + + if partial.term != shared.sk.last_log_term() { + bail!( + "term mismatch in partial backup, expected {}, got {}", + shared.sk.last_log_term(), + partial.term + ); + } + + pstate.eviction_state = EvictionState::Offloaded(shared.sk.flush_lsn()); + shared.sk.state_mut().finish_change(&pstate).await?; + // control file is now switched to Offloaded state + + // now we can switch shared.sk to Offloaded, shouldn't fail + let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty); + let cfile_state = prev_sk.take_state(); + shared.sk = StateSK::Offloaded(Box::new(cfile_state)); + + Ok(()) + } + + /// Try to switch state Offloaded->Present. + pub(crate) async fn switch_to_present(&self) -> anyhow::Result<()> { + let conf = GlobalTimelines::get_global_config(); + let mut shared = self.write_shared_state().await; + + // trying to restore WAL storage + let wal_store = wal_storage::PhysicalStorage::new( + &self.ttid, + self.timeline_dir.clone(), + &conf, + shared.sk.state(), + )?; + + // updating control file + let mut pstate = shared.sk.state_mut().start_change(); + + if !matches!(pstate.eviction_state, EvictionState::Offloaded(_)) { + bail!( + "cannot switch to present state, current state is {:?}", + pstate.eviction_state + ); + } + + if wal_store.flush_lsn() != shared.sk.flush_lsn() { + bail!( + "flush_lsn mismatch in restored WAL, expected {}, got {}", + shared.sk.flush_lsn(), + wal_store.flush_lsn() + ); + } + + pstate.eviction_state = EvictionState::Present; + shared.sk.state_mut().finish_change(&pstate).await?; + + // now we can switch shared.sk to Present, shouldn't fail + let prev_sk = std::mem::replace(&mut shared.sk, StateSK::Empty); + let cfile_state = prev_sk.take_state(); + shared.sk = StateSK::Loaded(SafeKeeper::new(cfile_state, wal_store, conf.my_id)?); + + Ok(()) + } + + /// Update current manager state, useful for debugging manager deadlocks. + pub(crate) fn set_status(&self, status: timeline_manager::Status) { + self.mgr_status.store(status, Ordering::Relaxed); } } @@ -784,13 +1130,13 @@ async fn delete_dir(path: &Utf8PathBuf) -> Result { } /// Get a path to the tenant directory. If you just need to get a timeline directory, -/// use FullAccessTimeline::get_timeline_dir instead. +/// use WalResidentTimeline::get_timeline_dir instead. pub(crate) fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf { conf.workdir.join(tenant_id.to_string()) } /// Get a path to the timeline directory. If you need to read WAL files from disk, -/// use FullAccessTimeline::get_timeline_dir instead. This function does not check +/// use WalResidentTimeline::get_timeline_dir instead. This function does not check /// timeline eviction status and WAL files might not be present on disk. pub(crate) fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf { get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string()) diff --git a/safekeeper/src/timeline_eviction.rs b/safekeeper/src/timeline_eviction.rs new file mode 100644 index 000000000000..b303d41b7bab --- /dev/null +++ b/safekeeper/src/timeline_eviction.rs @@ -0,0 +1,366 @@ +//! Code related to evicting WAL files to remote storage. The actual upload is done by the +//! partial WAL backup code. This file has code to delete and re-download WAL files, +//! cross-validate with partial WAL backup if local file is still present. + +use anyhow::Context; +use camino::Utf8PathBuf; +use remote_storage::RemotePath; +use tokio::{ + fs::File, + io::{AsyncRead, AsyncWriteExt}, +}; +use tracing::{debug, info, instrument, warn}; +use utils::crashsafe::durable_rename; + +use crate::{ + timeline_manager::{Manager, StateSnapshot}, + wal_backup, + wal_backup_partial::{self, PartialRemoteSegment}, + wal_storage::wal_file_paths, +}; + +impl Manager { + /// Returns true if the timeline is ready for eviction. + /// Current criteria: + /// - no active tasks + /// - control file is flushed (no next event scheduled) + /// - no WAL residence guards + /// - no pushes to the broker + /// - partial WAL backup is uploaded + pub(crate) fn ready_for_eviction( + &self, + next_event: &Option, + state: &StateSnapshot, + ) -> bool { + self.backup_task.is_none() + && self.recovery_task.is_none() + && self.wal_removal_task.is_none() + && self.partial_backup_task.is_none() + && self.partial_backup_uploaded.is_some() + && next_event.is_none() + && self.access_service.is_empty() + && !self.tli_broker_active.get() + && !wal_backup_partial::needs_uploading(state, &self.partial_backup_uploaded) + && self + .partial_backup_uploaded + .as_ref() + .unwrap() + .flush_lsn + .segment_number(self.wal_seg_size) + == self.last_removed_segno + 1 + } + + /// Evict the timeline to remote storage. + #[instrument(name = "evict_timeline", skip_all)] + pub(crate) async fn evict_timeline(&mut self) { + assert!(!self.is_offloaded); + let partial_backup_uploaded = match &self.partial_backup_uploaded { + Some(p) => p.clone(), + None => { + warn!("no partial backup uploaded, skipping eviction"); + return; + } + }; + + info!("starting eviction, using {:?}", partial_backup_uploaded); + + if let Err(e) = do_eviction(self, &partial_backup_uploaded).await { + warn!("failed to evict timeline: {:?}", e); + return; + } + + info!("successfully evicted timeline"); + } + + /// Restore evicted timeline from remote storage. + #[instrument(name = "unevict_timeline", skip_all)] + pub(crate) async fn unevict_timeline(&mut self) { + assert!(self.is_offloaded); + let partial_backup_uploaded = match &self.partial_backup_uploaded { + Some(p) => p.clone(), + None => { + warn!("no partial backup uploaded, cannot unevict"); + return; + } + }; + + info!("starting uneviction, using {:?}", partial_backup_uploaded); + + if let Err(e) = do_uneviction(self, &partial_backup_uploaded).await { + warn!("failed to unevict timeline: {:?}", e); + return; + } + + info!("successfully restored evicted timeline"); + } +} + +/// Ensure that content matches the remote partial backup, if local segment exists. +/// Then change state in control file and in-memory. If `delete_offloaded_wal` is set, +/// delete the local segment. +async fn do_eviction(mgr: &mut Manager, partial: &PartialRemoteSegment) -> anyhow::Result<()> { + compare_local_segment_with_remote(mgr, partial).await?; + + mgr.tli.switch_to_offloaded(partial).await?; + // switch manager state as soon as possible + mgr.is_offloaded = true; + + if mgr.conf.delete_offloaded_wal { + delete_local_segment(mgr, partial).await?; + } + + Ok(()) +} + +/// Ensure that content matches the remote partial backup, if local segment exists. +/// Then download segment to local disk and change state in control file and in-memory. +async fn do_uneviction(mgr: &mut Manager, partial: &PartialRemoteSegment) -> anyhow::Result<()> { + // if the local segment is present, validate it + compare_local_segment_with_remote(mgr, partial).await?; + + // atomically download the partial segment + redownload_partial_segment(mgr, partial).await?; + + mgr.tli.switch_to_present().await?; + // switch manager state as soon as possible + mgr.is_offloaded = false; + + Ok(()) +} + +/// Delete local WAL segment. +async fn delete_local_segment(mgr: &Manager, partial: &PartialRemoteSegment) -> anyhow::Result<()> { + let local_path = local_segment_path(mgr, partial); + + info!("deleting WAL file to evict: {}", local_path); + tokio::fs::remove_file(&local_path).await?; + Ok(()) +} + +/// Redownload partial segment from remote storage. +/// The segment is downloaded to a temporary file and then renamed to the final path. +async fn redownload_partial_segment( + mgr: &Manager, + partial: &PartialRemoteSegment, +) -> anyhow::Result<()> { + let tmp_file = mgr.tli.timeline_dir().join("remote_partial.tmp"); + let remote_segfile = remote_segment_path(mgr, partial)?; + + debug!( + "redownloading partial segment: {} -> {}", + remote_segfile, tmp_file + ); + + let mut reader = wal_backup::read_object(&remote_segfile, 0).await?; + let mut file = File::create(&tmp_file).await?; + + let actual_len = tokio::io::copy(&mut reader, &mut file).await?; + let expected_len = partial.flush_lsn.segment_offset(mgr.wal_seg_size); + + if actual_len != expected_len as u64 { + anyhow::bail!( + "partial downloaded {} bytes, expected {}", + actual_len, + expected_len + ); + } + + if actual_len > mgr.wal_seg_size as u64 { + anyhow::bail!( + "remote segment is too long: {} bytes, expected {}", + actual_len, + mgr.wal_seg_size + ); + } + file.set_len(mgr.wal_seg_size as u64).await?; + file.flush().await?; + + let final_path = local_segment_path(mgr, partial); + info!( + "downloaded {} bytes, renaming to {}", + final_path, final_path, + ); + if let Err(e) = durable_rename(&tmp_file, &final_path, !mgr.conf.no_sync).await { + // Probably rename succeeded, but fsync of it failed. Remove + // the file then to avoid using it. + tokio::fs::remove_file(tmp_file) + .await + .or_else(utils::fs_ext::ignore_not_found)?; + return Err(e.into()); + } + + Ok(()) +} + +/// Compare local WAL segment with partial WAL backup in remote storage. +/// If the local segment is not present, the function does nothing. +/// If the local segment is present, it compares the local segment with the remote one. +async fn compare_local_segment_with_remote( + mgr: &Manager, + partial: &PartialRemoteSegment, +) -> anyhow::Result<()> { + let local_path = local_segment_path(mgr, partial); + + match File::open(&local_path).await { + Ok(mut local_file) => do_validation(mgr, &mut local_file, mgr.wal_seg_size, partial) + .await + .context("validation failed"), + Err(_) => { + info!( + "local WAL file {} is not present, skipping validation", + local_path + ); + Ok(()) + } + } +} + +/// Compare opened local WAL segment with partial WAL backup in remote storage. +/// Validate full content of both files. +async fn do_validation( + mgr: &Manager, + file: &mut File, + wal_seg_size: usize, + partial: &PartialRemoteSegment, +) -> anyhow::Result<()> { + let local_size = file.metadata().await?.len() as usize; + if local_size != wal_seg_size { + anyhow::bail!( + "local segment size is invalid: found {}, expected {}", + local_size, + wal_seg_size + ); + } + + let remote_segfile = remote_segment_path(mgr, partial)?; + let mut remote_reader: std::pin::Pin> = + wal_backup::read_object(&remote_segfile, 0).await?; + + // remote segment should have bytes excatly up to `flush_lsn` + let expected_remote_size = partial.flush_lsn.segment_offset(mgr.wal_seg_size); + // let's compare the first `expected_remote_size` bytes + compare_n_bytes(&mut remote_reader, file, expected_remote_size).await?; + // and check that the remote segment ends here + check_end(&mut remote_reader).await?; + + // if local segment is longer, the rest should be zeroes + read_n_zeroes(file, mgr.wal_seg_size - expected_remote_size).await?; + // and check that the local segment ends here + check_end(file).await?; + + Ok(()) +} + +fn local_segment_path(mgr: &Manager, partial: &PartialRemoteSegment) -> Utf8PathBuf { + let flush_lsn = partial.flush_lsn; + let segno = flush_lsn.segment_number(mgr.wal_seg_size); + let (_, local_partial_segfile) = + wal_file_paths(mgr.tli.timeline_dir(), segno, mgr.wal_seg_size); + local_partial_segfile +} + +fn remote_segment_path( + mgr: &Manager, + partial: &PartialRemoteSegment, +) -> anyhow::Result { + let remote_timeline_path = wal_backup::remote_timeline_path(&mgr.tli.ttid)?; + Ok(partial.remote_path(&remote_timeline_path)) +} + +/// Compare first `n` bytes of two readers. If the bytes differ, return an error. +/// If the readers are shorter than `n`, return an error. +async fn compare_n_bytes(reader1: &mut R1, reader2: &mut R2, n: usize) -> anyhow::Result<()> +where + R1: AsyncRead + Unpin, + R2: AsyncRead + Unpin, +{ + use tokio::io::AsyncReadExt; + + const BUF_SIZE: usize = 32 * 1024; + + let mut buffer1 = vec![0u8; BUF_SIZE]; + let mut buffer2 = vec![0u8; BUF_SIZE]; + + let mut offset = 0; + + while offset < n { + let bytes_to_read = std::cmp::min(BUF_SIZE, n - offset); + + let bytes_read1 = reader1 + .read(&mut buffer1[..bytes_to_read]) + .await + .with_context(|| format!("failed to read from reader1 at offset {}", offset))?; + if bytes_read1 == 0 { + anyhow::bail!("unexpected EOF from reader1 at offset {}", offset); + } + + let bytes_read2 = reader2 + .read_exact(&mut buffer2[..bytes_read1]) + .await + .with_context(|| { + format!( + "failed to read {} bytes from reader2 at offset {}", + bytes_read1, offset + ) + })?; + assert!(bytes_read2 == bytes_read1); + + if buffer1[..bytes_read1] != buffer2[..bytes_read2] { + let diff_offset = buffer1[..bytes_read1] + .iter() + .zip(buffer2[..bytes_read2].iter()) + .position(|(a, b)| a != b) + .expect("mismatched buffers, but no difference found"); + anyhow::bail!("mismatch at offset {}", offset + diff_offset); + } + + offset += bytes_read1; + } + + Ok(()) +} + +async fn check_end(mut reader: R) -> anyhow::Result<()> +where + R: AsyncRead + Unpin, +{ + use tokio::io::AsyncReadExt; + + let mut buffer = [0u8; 1]; + let bytes_read = reader.read(&mut buffer).await?; + if bytes_read != 0 { + anyhow::bail!("expected EOF, found bytes"); + } + Ok(()) +} + +async fn read_n_zeroes(reader: &mut R, n: usize) -> anyhow::Result<()> +where + R: AsyncRead + Unpin, +{ + use tokio::io::AsyncReadExt; + + const BUF_SIZE: usize = 32 * 1024; + let mut buffer = vec![0u8; BUF_SIZE]; + let mut offset = 0; + + while offset < n { + let bytes_to_read = std::cmp::min(BUF_SIZE, n - offset); + + let bytes_read = reader + .read(&mut buffer[..bytes_to_read]) + .await + .context("expected zeroes, got read error")?; + if bytes_read == 0 { + anyhow::bail!("expected zeroes, got EOF"); + } + + if buffer[..bytes_read].iter().all(|&b| b == 0) { + offset += bytes_read; + } else { + anyhow::bail!("non-zero byte found"); + } + } + + Ok(()) +} diff --git a/safekeeper/src/timeline_guard.rs b/safekeeper/src/timeline_guard.rs new file mode 100644 index 000000000000..e249c859b4bb --- /dev/null +++ b/safekeeper/src/timeline_guard.rs @@ -0,0 +1,71 @@ +//! Timeline residence guard is needed to ensure that WAL segments are present on disk, +//! as long as the code is holding the guard. This file implements guard logic, to issue +//! and drop guards, and to notify the manager when the guard is dropped. + +use std::collections::HashSet; + +use tracing::{debug, warn}; + +use crate::timeline_manager::ManagerCtlMessage; + +#[derive(Debug, Clone, Copy)] +pub struct GuardId(u64); + +pub struct ResidenceGuard { + manager_tx: tokio::sync::mpsc::UnboundedSender, + guard_id: GuardId, +} + +impl Drop for ResidenceGuard { + fn drop(&mut self) { + // notify the manager that the guard is dropped + let res = self + .manager_tx + .send(ManagerCtlMessage::GuardDrop(self.guard_id)); + if let Err(e) = res { + warn!("failed to send GuardDrop message: {:?}", e); + } + } +} + +/// AccessService is responsible for issuing and dropping residence guards. +/// All guards are stored in the `guards` set. +/// TODO: it's possible to add `String` name to each guard, for better observability. +pub(crate) struct AccessService { + next_guard_id: u64, + guards: HashSet, + manager_tx: tokio::sync::mpsc::UnboundedSender, +} + +impl AccessService { + pub(crate) fn new(manager_tx: tokio::sync::mpsc::UnboundedSender) -> Self { + Self { + next_guard_id: 0, + guards: HashSet::new(), + manager_tx, + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.guards.is_empty() + } + + pub(crate) fn create_guard(&mut self) -> ResidenceGuard { + let guard_id = self.next_guard_id; + self.next_guard_id += 1; + self.guards.insert(guard_id); + + let guard_id = GuardId(guard_id); + debug!("issued a new guard {:?}", guard_id); + + ResidenceGuard { + manager_tx: self.manager_tx.clone(), + guard_id, + } + } + + pub(crate) fn drop_guard(&mut self, guard_id: GuardId) { + debug!("dropping guard {:?}", guard_id); + assert!(self.guards.remove(&guard_id.0)); + } +} diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 592426bba34d..c3abeac6449f 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -2,66 +2,83 @@ //! It is spawned alongside each timeline and exits when the timeline is deleted. //! It watches for changes in the timeline state and decides when to spawn or kill background tasks. //! It also can manage some reactive state, like should the timeline be active for broker pushes or not. +//! +//! Be aware that you need to be extra careful with manager code, because it is not respawned on panic. +//! Also, if it will stuck in some branch, it will prevent any further progress in the timeline. use std::{ - sync::Arc, - time::{Duration, Instant}, + sync::{atomic::AtomicUsize, Arc}, + time::Duration, }; use postgres_ffi::XLogSegNo; -use tokio::task::{JoinError, JoinHandle}; -use tracing::{info, info_span, instrument, warn, Instrument}; +use serde::{Deserialize, Serialize}; +use tokio::{ + task::{JoinError, JoinHandle}, + time::Instant, +}; +use tracing::{debug, info, info_span, instrument, warn, Instrument}; use utils::lsn::Lsn; use crate::{ - control_file::Storage, + control_file::{FileStorage, Storage}, metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL}, recovery::recovery_main, remove_wal::calc_horizon_lsn, + safekeeper::Term, send_wal::WalSenders, - timeline::{PeerInfo, ReadGuardSharedState, Timeline}, + state::TimelineState, + timeline::{ManagerTimeline, PeerInfo, ReadGuardSharedState, StateSK, WalResidentTimeline}, + timeline_guard::{AccessService, GuardId, ResidenceGuard}, timelines_set::{TimelineSetGuard, TimelinesSet}, wal_backup::{self, WalBackupTaskHandle}, - wal_backup_partial, SafeKeeperConf, + wal_backup_partial::{self, PartialRemoteSegment}, + SafeKeeperConf, }; -pub struct StateSnapshot { +pub(crate) struct StateSnapshot { // inmem values - pub commit_lsn: Lsn, - pub backup_lsn: Lsn, - pub remote_consistent_lsn: Lsn, + pub(crate) commit_lsn: Lsn, + pub(crate) backup_lsn: Lsn, + pub(crate) remote_consistent_lsn: Lsn, // persistent control file values - pub cfile_peer_horizon_lsn: Lsn, - pub cfile_remote_consistent_lsn: Lsn, - pub cfile_backup_lsn: Lsn, + pub(crate) cfile_peer_horizon_lsn: Lsn, + pub(crate) cfile_remote_consistent_lsn: Lsn, + pub(crate) cfile_backup_lsn: Lsn, + + // latest state + pub(crate) flush_lsn: Lsn, + pub(crate) last_log_term: Term, // misc - pub cfile_last_persist_at: Instant, - pub inmem_flush_pending: bool, - pub wal_removal_on_hold: bool, - pub peers: Vec, + pub(crate) cfile_last_persist_at: std::time::Instant, + pub(crate) inmem_flush_pending: bool, + pub(crate) wal_removal_on_hold: bool, + pub(crate) peers: Vec, } impl StateSnapshot { /// Create a new snapshot of the timeline state. fn new(read_guard: ReadGuardSharedState, heartbeat_timeout: Duration) -> Self { + let state = read_guard.sk.state(); Self { - commit_lsn: read_guard.sk.state.inmem.commit_lsn, - backup_lsn: read_guard.sk.state.inmem.backup_lsn, - remote_consistent_lsn: read_guard.sk.state.inmem.remote_consistent_lsn, - cfile_peer_horizon_lsn: read_guard.sk.state.peer_horizon_lsn, - cfile_remote_consistent_lsn: read_guard.sk.state.remote_consistent_lsn, - cfile_backup_lsn: read_guard.sk.state.backup_lsn, - cfile_last_persist_at: read_guard.sk.state.pers.last_persist_at(), - inmem_flush_pending: Self::has_unflushed_inmem_state(&read_guard), + commit_lsn: state.inmem.commit_lsn, + backup_lsn: state.inmem.backup_lsn, + remote_consistent_lsn: state.inmem.remote_consistent_lsn, + cfile_peer_horizon_lsn: state.peer_horizon_lsn, + cfile_remote_consistent_lsn: state.remote_consistent_lsn, + cfile_backup_lsn: state.backup_lsn, + flush_lsn: read_guard.sk.flush_lsn(), + last_log_term: read_guard.sk.last_log_term(), + cfile_last_persist_at: state.pers.last_persist_at(), + inmem_flush_pending: Self::has_unflushed_inmem_state(state), wal_removal_on_hold: read_guard.wal_removal_on_hold, peers: read_guard.get_peers(heartbeat_timeout), } } - fn has_unflushed_inmem_state(read_guard: &ReadGuardSharedState) -> bool { - let state = &read_guard.sk.state; + fn has_unflushed_inmem_state(state: &TimelineState) -> bool { state.inmem.commit_lsn > state.commit_lsn || state.inmem.backup_lsn > state.backup_lsn || state.inmem.peer_horizon_lsn > state.peer_horizon_lsn @@ -73,314 +90,560 @@ impl StateSnapshot { /// There is no need to check for updates more often than this. const REFRESH_INTERVAL: Duration = Duration::from_millis(300); -/// How often to save the control file if the is no other activity. -const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300); +pub enum ManagerCtlMessage { + /// Request to get a guard for WalResidentTimeline, with WAL files available locally. + GuardRequest(tokio::sync::oneshot::Sender>), + /// Request to drop the guard. + GuardDrop(GuardId), +} + +impl std::fmt::Debug for ManagerCtlMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ManagerCtlMessage::GuardRequest(_) => write!(f, "GuardRequest"), + ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({:?})", id), + } + } +} + +pub struct ManagerCtl { + manager_tx: tokio::sync::mpsc::UnboundedSender, + + // this is used to initialize manager, it will be moved out in bootstrap(). + init_manager_rx: + std::sync::Mutex>>, +} + +impl Default for ManagerCtl { + fn default() -> Self { + Self::new() + } +} + +impl ManagerCtl { + pub fn new() -> Self { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + Self { + manager_tx: tx, + init_manager_rx: std::sync::Mutex::new(Some(rx)), + } + } + + /// Issue a new guard and wait for manager to prepare the timeline. + /// Sends a message to the manager and waits for the response. + /// Can be blocked indefinitely if the manager is stuck. + pub async fn wal_residence_guard(&self) -> anyhow::Result { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.manager_tx.send(ManagerCtlMessage::GuardRequest(tx))?; + + // wait for the manager to respond with the guard + rx.await + .map_err(|e| anyhow::anyhow!("response read fail: {:?}", e)) + .and_then(std::convert::identity) + } + + /// Must be called exactly once to bootstrap the manager. + pub fn bootstrap_manager( + &self, + ) -> ( + tokio::sync::mpsc::UnboundedSender, + tokio::sync::mpsc::UnboundedReceiver, + ) { + let rx = self + .init_manager_rx + .lock() + .expect("mutex init_manager_rx poisoned") + .take() + .expect("manager already bootstrapped"); + + (self.manager_tx.clone(), rx) + } +} + +pub(crate) struct Manager { + // configuration & dependencies + pub(crate) tli: ManagerTimeline, + pub(crate) conf: SafeKeeperConf, + pub(crate) wal_seg_size: usize, + pub(crate) walsenders: Arc, + + // current state + pub(crate) state_version_rx: tokio::sync::watch::Receiver, + pub(crate) num_computes_rx: tokio::sync::watch::Receiver, + pub(crate) tli_broker_active: TimelineSetGuard, + pub(crate) last_removed_segno: XLogSegNo, + pub(crate) is_offloaded: bool, + + // background tasks + pub(crate) backup_task: Option, + pub(crate) recovery_task: Option>, + pub(crate) wal_removal_task: Option>>, + + // partial backup + pub(crate) partial_backup_task: Option>>, + pub(crate) partial_backup_uploaded: Option, + + // misc + pub(crate) access_service: AccessService, +} /// This task gets spawned alongside each timeline and is responsible for managing the timeline's /// background tasks. /// Be careful, this task is not respawned on panic, so it should not panic. #[instrument(name = "manager", skip_all, fields(ttid = %tli.ttid))] pub async fn main_task( - tli: Arc, + tli: ManagerTimeline, conf: SafeKeeperConf, broker_active_set: Arc, + manager_tx: tokio::sync::mpsc::UnboundedSender, + mut manager_rx: tokio::sync::mpsc::UnboundedReceiver, ) { + tli.set_status(Status::Started); + + let defer_tli = tli.tli.clone(); scopeguard::defer! { - if tli.is_cancelled() { + if defer_tli.is_cancelled() { info!("manager task finished"); } else { warn!("manager task finished prematurely"); } }; - // configuration & dependencies - let wal_seg_size = tli.get_wal_seg_size().await; - let heartbeat_timeout = conf.heartbeat_timeout; - let walsenders = tli.get_walsenders(); - let walreceivers = tli.get_walreceivers(); - - // current state - let mut state_version_rx = tli.get_state_version_rx(); - let mut num_computes_rx = walreceivers.get_num_rx(); - let mut tli_broker_active = broker_active_set.guard(tli.clone()); - let mut last_removed_segno = 0 as XLogSegNo; - - // list of background tasks - let mut backup_task: Option = None; - let mut recovery_task: Option> = None; - let mut partial_backup_task: Option> = None; - let mut wal_removal_task: Option>> = None; + let mut mgr = Manager::new(tli, conf, broker_active_set, manager_tx).await; // Start recovery task which always runs on the timeline. - if conf.peer_recovery_enabled { - match tli.full_access_guard().await { - Ok(tli) => { - recovery_task = Some(tokio::spawn(recovery_main(tli, conf.clone()))); - } - Err(e) => { - warn!("failed to start recovery task: {:?}", e); - } - } - } - - // Start partial backup task which always runs on the timeline. - if conf.is_wal_backup_enabled() && conf.partial_backup_enabled { - match tli.full_access_guard().await { - Ok(tli) => { - partial_backup_task = Some(tokio::spawn(wal_backup_partial::main_task( - tli, - conf.clone(), - ))); - } - Err(e) => { - warn!("failed to start partial backup task: {:?}", e); - } - } + if !mgr.is_offloaded && mgr.conf.peer_recovery_enabled { + let tli = mgr.wal_resident_timeline(); + mgr.recovery_task = Some(tokio::spawn(recovery_main(tli, mgr.conf.clone()))); } let last_state = 'outer: loop { MANAGER_ITERATIONS_TOTAL.inc(); - let state_snapshot = StateSnapshot::new(tli.read_shared_state().await, heartbeat_timeout); - let num_computes = *num_computes_rx.borrow(); + mgr.set_status(Status::StateSnapshot); + let state_snapshot = mgr.state_snapshot().await; - let is_wal_backup_required = update_backup( - &conf, - &tli, - wal_seg_size, - num_computes, - &state_snapshot, - &mut backup_task, - ) - .await; - - let _is_active = update_is_active( - is_wal_backup_required, - num_computes, - &state_snapshot, - &mut tli_broker_active, - &tli, - ); + let mut next_event: Option = None; + if !mgr.is_offloaded { + let num_computes = *mgr.num_computes_rx.borrow(); - let next_cfile_save = update_control_file_save(&state_snapshot, &tli).await; + mgr.set_status(Status::UpdateBackup); + let is_wal_backup_required = mgr.update_backup(num_computes, &state_snapshot).await; + mgr.update_is_active(is_wal_backup_required, num_computes, &state_snapshot); - update_wal_removal( - &conf, - walsenders, - &tli, - wal_seg_size, - &state_snapshot, - last_removed_segno, - &mut wal_removal_task, - ) - .await; + mgr.set_status(Status::UpdateControlFile); + mgr.update_control_file_save(&state_snapshot, &mut next_event) + .await; + mgr.set_status(Status::UpdateWalRemoval); + mgr.update_wal_removal(&state_snapshot).await; + + mgr.set_status(Status::UpdatePartialBackup); + mgr.update_partial_backup(&state_snapshot).await; + + if mgr.conf.enable_offload && mgr.ready_for_eviction(&next_event, &state_snapshot) { + mgr.set_status(Status::EvictTimeline); + mgr.evict_timeline().await; + } + } + + mgr.set_status(Status::Wait); // wait until something changes. tx channels are stored under Arc, so they will not be // dropped until the manager task is finished. tokio::select! { - _ = tli.cancel.cancelled() => { + _ = mgr.tli.cancel.cancelled() => { // timeline was deleted break 'outer state_snapshot; } _ = async { // don't wake up on every state change, but at most every REFRESH_INTERVAL tokio::time::sleep(REFRESH_INTERVAL).await; - let _ = state_version_rx.changed().await; + let _ = mgr.state_version_rx.changed().await; } => { // state was updated } - _ = num_computes_rx.changed() => { + _ = mgr.num_computes_rx.changed() => { // number of connected computes was updated } - _ = async { - if let Some(timeout) = next_cfile_save { - tokio::time::sleep_until(timeout).await - } else { - futures::future::pending().await - } - } => { - // it's time to save the control file + _ = sleep_until(&next_event) => { + // we were waiting for some event (e.g. cfile save) } - res = async { - if let Some(task) = &mut wal_removal_task { - task.await - } else { - futures::future::pending().await - } - } => { + res = await_task_finish(&mut mgr.wal_removal_task) => { // WAL removal task finished - wal_removal_task = None; - update_wal_removal_end(res, &tli, &mut last_removed_segno); + mgr.wal_removal_task = None; + mgr.update_wal_removal_end(res); + } + res = await_task_finish(&mut mgr.partial_backup_task) => { + // partial backup task finished + mgr.partial_backup_task = None; + mgr.update_partial_backup_end(res); + } + + msg = manager_rx.recv() => { + mgr.set_status(Status::HandleMessage); + mgr.handle_message(msg).await; } } }; + mgr.set_status(Status::Exiting); // remove timeline from the broker active set sooner, before waiting for background tasks - tli_broker_active.set(false); + mgr.tli_broker_active.set(false); // shutdown background tasks - if conf.is_wal_backup_enabled() { - wal_backup::update_task(&conf, &tli, false, &last_state, &mut backup_task).await; + if mgr.conf.is_wal_backup_enabled() { + wal_backup::update_task(&mut mgr, false, &last_state).await; } - if let Some(recovery_task) = recovery_task { + if let Some(recovery_task) = &mut mgr.recovery_task { if let Err(e) = recovery_task.await { warn!("recovery task failed: {:?}", e); } } - if let Some(partial_backup_task) = partial_backup_task { + if let Some(partial_backup_task) = &mut mgr.partial_backup_task { if let Err(e) = partial_backup_task.await { warn!("partial backup task failed: {:?}", e); } } - if let Some(wal_removal_task) = wal_removal_task { + if let Some(wal_removal_task) = &mut mgr.wal_removal_task { let res = wal_removal_task.await; - update_wal_removal_end(res, &tli, &mut last_removed_segno); + mgr.update_wal_removal_end(res); } -} -/// Spawns/kills backup task and returns true if backup is required. -async fn update_backup( - conf: &SafeKeeperConf, - tli: &Arc, - wal_seg_size: usize, - num_computes: usize, - state: &StateSnapshot, - backup_task: &mut Option, -) -> bool { - let is_wal_backup_required = - wal_backup::is_wal_backup_required(wal_seg_size, num_computes, state); - - if conf.is_wal_backup_enabled() { - wal_backup::update_task(conf, tli, is_wal_backup_required, state, backup_task).await; - } - - // update the state in Arc - tli.wal_backup_active - .store(backup_task.is_some(), std::sync::atomic::Ordering::Relaxed); - is_wal_backup_required + mgr.set_status(Status::Finished); } -/// Update is_active flag and returns its value. -fn update_is_active( - is_wal_backup_required: bool, - num_computes: usize, - state: &StateSnapshot, - tli_broker_active: &mut TimelineSetGuard, - tli: &Arc, -) -> bool { - let is_active = is_wal_backup_required - || num_computes > 0 - || state.remote_consistent_lsn < state.commit_lsn; - - // update the broker timeline set - if tli_broker_active.set(is_active) { - // write log if state has changed - info!( - "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}", - is_active, state.remote_consistent_lsn, state.commit_lsn, +impl Manager { + async fn new( + tli: ManagerTimeline, + conf: SafeKeeperConf, + broker_active_set: Arc, + manager_tx: tokio::sync::mpsc::UnboundedSender, + ) -> Manager { + let (is_offloaded, partial_backup_uploaded) = tli.bootstrap_mgr().await; + Manager { + conf, + wal_seg_size: tli.get_wal_seg_size().await, + walsenders: tli.get_walsenders().clone(), + state_version_rx: tli.get_state_version_rx(), + num_computes_rx: tli.get_walreceivers().get_num_rx(), + tli_broker_active: broker_active_set.guard(tli.clone()), + last_removed_segno: 0, + is_offloaded, + backup_task: None, + recovery_task: None, + wal_removal_task: None, + partial_backup_task: None, + partial_backup_uploaded, + access_service: AccessService::new(manager_tx), + tli, + } + } + + fn set_status(&self, status: Status) { + self.tli.set_status(status); + } + + /// Get a WalResidentTimeline. + /// Manager code must use this function instead of one from `Timeline` + /// directly, because it will deadlock. + pub(crate) fn wal_resident_timeline(&mut self) -> WalResidentTimeline { + assert!(!self.is_offloaded); + let guard = self.access_service.create_guard(); + WalResidentTimeline::new(self.tli.clone(), guard) + } + + /// Get a snapshot of the timeline state. + async fn state_snapshot(&self) -> StateSnapshot { + StateSnapshot::new( + self.tli.read_shared_state().await, + self.conf.heartbeat_timeout, + ) + } + + /// Spawns/kills backup task and returns true if backup is required. + async fn update_backup(&mut self, num_computes: usize, state: &StateSnapshot) -> bool { + let is_wal_backup_required = + wal_backup::is_wal_backup_required(self.wal_seg_size, num_computes, state); + + if self.conf.is_wal_backup_enabled() { + wal_backup::update_task(self, is_wal_backup_required, state).await; + } + + // update the state in Arc + self.tli.wal_backup_active.store( + self.backup_task.is_some(), + std::sync::atomic::Ordering::Relaxed, ); + is_wal_backup_required + } + + /// Update is_active flag and returns its value. + fn update_is_active( + &mut self, + is_wal_backup_required: bool, + num_computes: usize, + state: &StateSnapshot, + ) { + let is_active = is_wal_backup_required + || num_computes > 0 + || state.remote_consistent_lsn < state.commit_lsn; + + // update the broker timeline set + if self.tli_broker_active.set(is_active) { + // write log if state has changed + info!( + "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}", + is_active, state.remote_consistent_lsn, state.commit_lsn, + ); + + MANAGER_ACTIVE_CHANGES.inc(); + } - MANAGER_ACTIVE_CHANGES.inc(); + // update the state in Arc + self.tli + .broker_active + .store(is_active, std::sync::atomic::Ordering::Relaxed); } - // update the state in Arc - tli.broker_active - .store(is_active, std::sync::atomic::Ordering::Relaxed); - is_active -} + /// Save control file if needed. Returns Instant if we should persist the control file in the future. + async fn update_control_file_save( + &self, + state: &StateSnapshot, + next_event: &mut Option, + ) { + if !state.inmem_flush_pending { + return; + } + + if state.cfile_last_persist_at.elapsed() > self.conf.control_file_save_interval { + let mut write_guard = self.tli.write_shared_state().await; + // it should be done in the background because it blocks manager task, but flush() should + // be fast enough not to be a problem now + if let Err(e) = write_guard.sk.state_mut().flush().await { + warn!("failed to save control file: {:?}", e); + } + } else { + // we should wait until some time passed until the next save + update_next_event( + next_event, + (state.cfile_last_persist_at + self.conf.control_file_save_interval).into(), + ); + } + } + + /// Spawns WAL removal task if needed. + async fn update_wal_removal(&mut self, state: &StateSnapshot) { + if self.wal_removal_task.is_some() || state.wal_removal_on_hold { + // WAL removal is already in progress or hold off + return; + } + + // If enabled, we use LSN of the most lagging walsender as a WAL removal horizon. + // This allows to get better read speed for pageservers that are lagging behind, + // at the cost of keeping more WAL on disk. + let replication_horizon_lsn = if self.conf.walsenders_keep_horizon { + self.walsenders.laggard_lsn() + } else { + None + }; + + let removal_horizon_lsn = calc_horizon_lsn(state, replication_horizon_lsn); + let removal_horizon_segno = removal_horizon_lsn + .segment_number(self.wal_seg_size) + .saturating_sub(1); + + if removal_horizon_segno > self.last_removed_segno { + // we need to remove WAL + let remover = match self.tli.read_shared_state().await.sk { + StateSK::Loaded(ref sk) => { + crate::wal_storage::Storage::remove_up_to(&sk.wal_store, removal_horizon_segno) + } + StateSK::Offloaded(_) => { + // we can't remove WAL if it's not loaded + warn!("unexpectedly trying to run WAL removal on offloaded timeline"); + return; + } + StateSK::Empty => unreachable!(), + }; + + self.wal_removal_task = Some(tokio::spawn( + async move { + remover.await?; + Ok(removal_horizon_segno) + } + .instrument(info_span!("WAL removal", ttid=%self.tli.ttid)), + )); + } + } + + /// Update the state after WAL removal task finished. + fn update_wal_removal_end(&mut self, res: Result, JoinError>) { + let new_last_removed_segno = match res { + Ok(Ok(segno)) => segno, + Err(e) => { + warn!("WAL removal task failed: {:?}", e); + return; + } + Ok(Err(e)) => { + warn!("WAL removal task failed: {:?}", e); + return; + } + }; + + self.last_removed_segno = new_last_removed_segno; + // update the state in Arc + self.tli + .last_removed_segno + .store(new_last_removed_segno, std::sync::atomic::Ordering::Relaxed); + } + + /// Spawns partial WAL backup task if needed. + async fn update_partial_backup(&mut self, state: &StateSnapshot) { + // check if partial backup is enabled and should be started + if !self.conf.is_wal_backup_enabled() || !self.conf.partial_backup_enabled { + return; + } + + if self.partial_backup_task.is_some() { + // partial backup is already running + return; + } + + if !wal_backup_partial::needs_uploading(state, &self.partial_backup_uploaded) { + // nothing to upload + return; + } + + // Get WalResidentTimeline and start partial backup task. + self.partial_backup_task = Some(tokio::spawn(wal_backup_partial::main_task( + self.wal_resident_timeline(), + self.conf.clone(), + ))); + } -/// Save control file if needed. Returns Instant if we should persist the control file in the future. -async fn update_control_file_save( - state: &StateSnapshot, - tli: &Arc, -) -> Option { - if !state.inmem_flush_pending { - return None; - } - - if state.cfile_last_persist_at.elapsed() > CF_SAVE_INTERVAL { - let mut write_guard = tli.write_shared_state().await; - // this can be done in the background because it blocks manager task, but flush() should - // be fast enough not to be a problem now - if let Err(e) = write_guard.sk.state.flush().await { - warn!("failed to save control file: {:?}", e); + /// Update the state after partial WAL backup task finished. + fn update_partial_backup_end(&mut self, res: Result, JoinError>) { + match res { + Ok(new_upload_state) => { + self.partial_backup_uploaded = new_upload_state; + } + Err(e) => { + warn!("partial backup task panicked: {:?}", e); + } + } + } + + /// Handle message arrived from ManagerCtl. + async fn handle_message(&mut self, msg: Option) { + debug!("received manager message: {:?}", msg); + match msg { + Some(ManagerCtlMessage::GuardRequest(tx)) => { + if self.is_offloaded { + // trying to unevict timeline, but without gurarantee that it will be successful + self.unevict_timeline().await; + } + + let guard = if self.is_offloaded { + Err(anyhow::anyhow!("timeline is offloaded, can't get a guard")) + } else { + Ok(self.access_service.create_guard()) + }; + + if tx.send(guard).is_err() { + warn!("failed to reply with a guard, receiver dropped"); + } + } + Some(ManagerCtlMessage::GuardDrop(guard_id)) => { + self.access_service.drop_guard(guard_id); + } + None => { + // can't happen, we're holding the sender + unreachable!(); + } } + } +} - None +// utility functions +async fn sleep_until(option: &Option) { + if let Some(timeout) = option { + tokio::time::sleep_until(*timeout).await; } else { - // we should wait until next CF_SAVE_INTERVAL - Some((state.cfile_last_persist_at + CF_SAVE_INTERVAL).into()) + futures::future::pending::<()>().await; } } -/// Spawns WAL removal task if needed. -async fn update_wal_removal( - conf: &SafeKeeperConf, - walsenders: &Arc, - tli: &Arc, - wal_seg_size: usize, - state: &StateSnapshot, - last_removed_segno: u64, - wal_removal_task: &mut Option>>, -) { - if wal_removal_task.is_some() || state.wal_removal_on_hold { - // WAL removal is already in progress or hold off - return; +async fn await_task_finish(option: &mut Option>) -> Result { + if let Some(task) = option { + task.await + } else { + futures::future::pending().await } +} - // If enabled, we use LSN of the most lagging walsender as a WAL removal horizon. - // This allows to get better read speed for pageservers that are lagging behind, - // at the cost of keeping more WAL on disk. - let replication_horizon_lsn = if conf.walsenders_keep_horizon { - walsenders.laggard_lsn() +/// Update next_event if candidate is earlier. +fn update_next_event(next_event: &mut Option, candidate: Instant) { + if let Some(next) = next_event { + if candidate < *next { + *next = candidate; + } } else { - None - }; + *next_event = Some(candidate); + } +} - let removal_horizon_lsn = calc_horizon_lsn(state, replication_horizon_lsn); - let removal_horizon_segno = removal_horizon_lsn - .segment_number(wal_seg_size) - .saturating_sub(1); +#[repr(usize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum Status { + NotStarted, + Started, + StateSnapshot, + UpdateBackup, + UpdateControlFile, + UpdateWalRemoval, + UpdatePartialBackup, + EvictTimeline, + Wait, + HandleMessage, + Exiting, + Finished, +} - if removal_horizon_segno > last_removed_segno { - // we need to remove WAL - let remover = crate::wal_storage::Storage::remove_up_to( - &tli.read_shared_state().await.sk.wal_store, - removal_horizon_segno, - ); - *wal_removal_task = Some(tokio::spawn( - async move { - remover.await?; - Ok(removal_horizon_segno) - } - .instrument(info_span!("WAL removal", ttid=%tli.ttid)), - )); +/// AtomicStatus is a wrapper around AtomicUsize adapted for the Status enum. +pub struct AtomicStatus { + inner: AtomicUsize, +} + +impl Default for AtomicStatus { + fn default() -> Self { + Self::new() } } -/// Update the state after WAL removal task finished. -fn update_wal_removal_end( - res: Result, JoinError>, - tli: &Arc, - last_removed_segno: &mut u64, -) { - let new_last_removed_segno = match res { - Ok(Ok(segno)) => segno, - Err(e) => { - warn!("WAL removal task failed: {:?}", e); - return; +impl AtomicStatus { + pub fn new() -> Self { + AtomicStatus { + inner: AtomicUsize::new(Status::NotStarted as usize), } - Ok(Err(e)) => { - warn!("WAL removal task failed: {:?}", e); - return; - } - }; + } - *last_removed_segno = new_last_removed_segno; - // update the state in Arc - tli.last_removed_segno - .store(new_last_removed_segno, std::sync::atomic::Ordering::Relaxed); + pub fn load(&self, order: std::sync::atomic::Ordering) -> Status { + // Safety: This line of code uses `std::mem::transmute` to reinterpret the loaded value as `Status`. + // It is safe to use `transmute` in this context because `Status` is a repr(usize) enum, + // which means it has the same memory layout as usize. + // However, it is important to ensure that the loaded value is a valid variant of `Status`, + // otherwise, the behavior will be undefined. + unsafe { std::mem::transmute(self.inner.load(order)) } + } + + pub fn get(&self) -> Status { + self.load(std::sync::atomic::Ordering::Relaxed) + } + + pub fn store(&self, val: Status, order: std::sync::atomic::Ordering) { + self.inner.store(val as usize, order); + } } diff --git a/safekeeper/src/timelines_set.rs b/safekeeper/src/timelines_set.rs index ea8e23bb7265..d6eea79f8227 100644 --- a/safekeeper/src/timelines_set.rs +++ b/safekeeper/src/timelines_set.rs @@ -80,6 +80,10 @@ impl TimelineSetGuard { self.timelines_set.set_present(self.tli.clone(), present); true } + + pub fn get(&self) -> bool { + self.is_present + } } impl Drop for TimelineSetGuard { diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index 58591aecfa73..9ea048a3c76a 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -12,7 +12,6 @@ use std::cmp::min; use std::collections::HashSet; use std::num::NonZeroU32; use std::pin::Pin; -use std::sync::Arc; use std::time::Duration; use postgres_ffi::v14::xlog_utils::XLogSegNoOffsetToRecPtr; @@ -30,8 +29,8 @@ use tracing::*; use utils::{id::TenantTimelineId, lsn::Lsn}; use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS}; -use crate::timeline::{FullAccessTimeline, PeerInfo, Timeline}; -use crate::timeline_manager::StateSnapshot; +use crate::timeline::{PeerInfo, WalResidentTimeline}; +use crate::timeline_manager::{Manager, StateSnapshot}; use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME}; use once_cell::sync::OnceCell; @@ -48,7 +47,7 @@ pub struct WalBackupTaskHandle { } /// Do we have anything to upload to S3, i.e. should safekeepers run backup activity? -pub fn is_wal_backup_required( +pub(crate) fn is_wal_backup_required( wal_seg_size: usize, num_computes: usize, state: &StateSnapshot, @@ -61,35 +60,33 @@ pub fn is_wal_backup_required( /// Based on peer information determine which safekeeper should offload; if it /// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task /// is running, kill it. -pub async fn update_task( - conf: &SafeKeeperConf, - tli: &Arc, - need_backup: bool, - state: &StateSnapshot, - entry: &mut Option, -) { +pub(crate) async fn update_task(mgr: &mut Manager, need_backup: bool, state: &StateSnapshot) { let (offloader, election_dbg_str) = - determine_offloader(&state.peers, state.backup_lsn, tli.ttid, conf); - let elected_me = Some(conf.my_id) == offloader; + determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf); + let elected_me = Some(mgr.conf.my_id) == offloader; let should_task_run = need_backup && elected_me; // start or stop the task - if should_task_run != (entry.is_some()) { + if should_task_run != (mgr.backup_task.is_some()) { if should_task_run { info!("elected for backup: {}", election_dbg_str); let (shutdown_tx, shutdown_rx) = mpsc::channel(1); - let async_task = backup_task_main(tli.clone(), conf.backup_parallel_jobs, shutdown_rx); + let async_task = backup_task_main( + mgr.wal_resident_timeline(), + mgr.conf.backup_parallel_jobs, + shutdown_rx, + ); - let handle = if conf.current_thread_runtime { + let handle = if mgr.conf.current_thread_runtime { tokio::spawn(async_task) } else { WAL_BACKUP_RUNTIME.spawn(async_task) }; - *entry = Some(WalBackupTaskHandle { + mgr.backup_task = Some(WalBackupTaskHandle { shutdown_tx, handle, }); @@ -101,7 +98,7 @@ pub async fn update_task( // someone else has been elected info!("stepping down from backup: {}", election_dbg_str); } - shut_down_task(entry).await; + shut_down_task(&mut mgr.backup_task).await; } } } @@ -191,7 +188,7 @@ pub fn init_remote_storage(conf: &SafeKeeperConf) { } struct WalBackupTask { - timeline: FullAccessTimeline, + timeline: WalResidentTimeline, timeline_dir: Utf8PathBuf, wal_seg_size: usize, parallel_jobs: usize, @@ -200,16 +197,12 @@ struct WalBackupTask { /// Offload single timeline. #[instrument(name = "WAL backup", skip_all, fields(ttid = %tli.ttid))] -async fn backup_task_main(tli: Arc, parallel_jobs: usize, mut shutdown_rx: Receiver<()>) { +async fn backup_task_main( + tli: WalResidentTimeline, + parallel_jobs: usize, + mut shutdown_rx: Receiver<()>, +) { let _guard = WAL_BACKUP_TASKS.guard(); - - let tli = match tli.full_access_guard().await { - Ok(tli) => tli, - Err(e) => { - error!("backup error: {}", e); - return; - } - }; info!("started"); let mut wb = WalBackupTask { @@ -304,7 +297,7 @@ impl WalBackupTask { } async fn backup_lsn_range( - timeline: &FullAccessTimeline, + timeline: &WalResidentTimeline, backup_lsn: &mut Lsn, end_lsn: Lsn, wal_seg_size: usize, diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index ed5ddb71f50d..9c7cd0888d83 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -29,18 +29,22 @@ use utils::lsn::Lsn; use crate::{ metrics::{PARTIAL_BACKUP_UPLOADED_BYTES, PARTIAL_BACKUP_UPLOADS}, safekeeper::Term, - timeline::FullAccessTimeline, + timeline::WalResidentTimeline, + timeline_manager::StateSnapshot, wal_backup::{self, remote_timeline_path}, SafeKeeperConf, }; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum UploadStatus { - /// Upload is in progress + /// Upload is in progress. This status should be used only for garbage collection, + /// don't read data from the remote storage with this status. InProgress, - /// Upload is finished + /// Upload is finished. There is always at most one segment with this status. + /// It means that the segment is actual and can be used. Uploaded, - /// Deletion is in progress + /// Deletion is in progress. This status should be used only for garbage collection, + /// don't read data from the remote storage with this status. Deleting, } @@ -50,6 +54,10 @@ pub struct PartialRemoteSegment { pub name: String, pub commit_lsn: Lsn, pub flush_lsn: Lsn, + // We should use last_log_term here, otherwise it's possible to have inconsistent data in the + // remote storage. + // + // More info here: https://github.com/neondatabase/neon/pull/8022#discussion_r1654738405 pub term: Term, } @@ -60,6 +68,10 @@ impl PartialRemoteSegment { && self.flush_lsn == other.flush_lsn && self.term == other.term } + + pub(crate) fn remote_path(&self, remote_timeline_path: &RemotePath) -> RemotePath { + remote_timeline_path.join(&self.name) + } } // NB: these structures are a part of a control_file, you can't change them without @@ -71,7 +83,7 @@ pub struct State { impl State { /// Find an Uploaded segment. There should be only one Uploaded segment at a time. - fn uploaded_segment(&self) -> Option { + pub(crate) fn uploaded_segment(&self) -> Option { self.segments .iter() .find(|seg| seg.status == UploadStatus::Uploaded) @@ -81,7 +93,7 @@ impl State { struct PartialBackup { wal_seg_size: usize, - tli: FullAccessTimeline, + tli: WalResidentTimeline, conf: SafeKeeperConf, local_prefix: Utf8PathBuf, remote_timeline_path: RemotePath, @@ -128,17 +140,17 @@ impl PartialBackup { let sk_info = self.tli.get_safekeeper_info(&self.conf).await; let flush_lsn = Lsn(sk_info.flush_lsn); let commit_lsn = Lsn(sk_info.commit_lsn); - let term = sk_info.term; + let last_log_term = sk_info.last_log_term; let segno = self.segno(flush_lsn); - let name = self.remote_segment_name(segno, term, commit_lsn, flush_lsn); + let name = self.remote_segment_name(segno, last_log_term, commit_lsn, flush_lsn); PartialRemoteSegment { status: UploadStatus::InProgress, name, commit_lsn, flush_lsn, - term, + term: last_log_term, } } @@ -151,7 +163,7 @@ impl PartialBackup { let backup_bytes = flush_lsn.segment_offset(self.wal_seg_size); let local_path = self.local_prefix.join(self.local_segment_name(segno)); - let remote_path = self.remote_timeline_path.join(&prepared.name); + let remote_path = prepared.remote_path(&self.remote_timeline_path); // Upload first `backup_bytes` bytes of the segment to the remote storage. wal_backup::backup_partial_segment(&local_path, &remote_path, backup_bytes).await?; @@ -161,7 +173,7 @@ impl PartialBackup { // If the term changed, we cannot guarantee the validity of the uploaded data. // If the term is the same, we know the data is not corrupted. let sk_info = self.tli.get_safekeeper_info(&self.conf).await; - if sk_info.term != prepared.term { + if sk_info.last_log_term != prepared.term { anyhow::bail!("term changed during upload"); } assert!(prepared.commit_lsn <= Lsn(sk_info.commit_lsn)); @@ -270,8 +282,32 @@ impl PartialBackup { } } +/// Check if everything is uploaded and partial backup task doesn't need to run. +pub(crate) fn needs_uploading( + state: &StateSnapshot, + uploaded: &Option, +) -> bool { + match uploaded { + Some(uploaded) => { + uploaded.status != UploadStatus::Uploaded + || uploaded.flush_lsn != state.flush_lsn + || uploaded.commit_lsn != state.commit_lsn + || uploaded.term != state.last_log_term + } + None => true, + } +} + +/// Main task for partial backup. It waits for the flush_lsn to change and then uploads the +/// partial segment to the remote storage. It also does garbage collection of old segments. +/// +/// When there is nothing more to do and the last segment was successfully uploaded, the task +/// returns PartialRemoteSegment, to signal readiness for offloading the timeline. #[instrument(name = "Partial backup", skip_all, fields(ttid = %tli.ttid))] -pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { +pub async fn main_task( + tli: WalResidentTimeline, + conf: SafeKeeperConf, +) -> Option { debug!("started"); let await_duration = conf.partial_backup_timeout; @@ -285,7 +321,7 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { Ok(path) => path, Err(e) => { error!("failed to create remote path: {:?}", e); - return; + return None; } }; @@ -320,19 +356,13 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { // wait until we have something to upload let uploaded_segment = backup.state.uploaded_segment(); if let Some(seg) = &uploaded_segment { - // if we already uploaded something, wait until we have something new - while flush_lsn_rx.borrow().lsn == seg.flush_lsn + // check if uploaded segment matches the current state + if flush_lsn_rx.borrow().lsn == seg.flush_lsn && *commit_lsn_rx.borrow() == seg.commit_lsn && flush_lsn_rx.borrow().term == seg.term { - tokio::select! { - _ = backup.tli.cancel.cancelled() => { - info!("timeline canceled"); - return; - } - _ = commit_lsn_rx.changed() => {} - _ = flush_lsn_rx.changed() => {} - } + // we have nothing to do, the last segment is already uploaded + return Some(seg.clone()); } } @@ -341,7 +371,7 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { tokio::select! { _ = backup.tli.cancel.cancelled() => { info!("timeline canceled"); - return; + return None; } _ = flush_lsn_rx.changed() => {} } @@ -358,7 +388,7 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { tokio::select! { _ = backup.tli.cancel.cancelled() => { info!("timeline canceled"); - return; + return None; } _ = commit_lsn_rx.changed() => {} _ = flush_lsn_rx.changed() => { diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 2aead70ffd03..74c4693ccd9b 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -211,7 +211,7 @@ impl PhysicalStorage { /// Returns `file` and `is_partial`. async fn open_or_create(&mut self, segno: XLogSegNo) -> Result<(File, bool)> { let (wal_file_path, wal_file_partial_path) = - wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size)?; + wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size); // Try to open already completed segment if let Ok(file) = OpenOptions::new().write(true).open(&wal_file_path).await { @@ -276,7 +276,7 @@ impl PhysicalStorage { // Rename partial file to completed file let (wal_file_path, wal_file_partial_path) = - wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size)?; + wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size); fs::rename(wal_file_partial_path, wal_file_path).await?; } else { // otherwise, file can be reused later @@ -461,7 +461,7 @@ impl Storage for PhysicalStorage { if !is_partial { // Make segment partial once again let (wal_file_path, wal_file_partial_path) = - wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size)?; + wal_file_paths(&self.timeline_dir, segno, self.wal_seg_size); fs::rename(wal_file_path, wal_file_partial_path).await?; } @@ -741,7 +741,7 @@ pub(crate) async fn open_wal_file( segno: XLogSegNo, wal_seg_size: usize, ) -> Result<(tokio::fs::File, bool)> { - let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size)?; + let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size); // First try to open the .partial file. let mut partial_path = wal_file_path.to_owned(); @@ -767,9 +767,9 @@ pub fn wal_file_paths( timeline_dir: &Utf8Path, segno: XLogSegNo, wal_seg_size: usize, -) -> Result<(Utf8PathBuf, Utf8PathBuf)> { +) -> (Utf8PathBuf, Utf8PathBuf) { let wal_file_name = XLogFileName(PG_TLI, segno, wal_seg_size); let wal_file_path = timeline_dir.join(wal_file_name.clone()); let wal_file_partial_path = timeline_dir.join(wal_file_name + ".partial"); - Ok((wal_file_path, wal_file_partial_path)) + (wal_file_path, wal_file_partial_path) } diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 9c81d2eb4d45..43835c7f4411 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -16,7 +16,7 @@ use desim::{ use hyper::Uri; use safekeeper::{ safekeeper::{ProposerAcceptorMessage, SafeKeeper, ServerInfo, UNKNOWN_SERVER_VERSION}, - state::TimelinePersistentState, + state::{TimelinePersistentState, TimelineState}, timeline::TimelineError, wal_storage::Storage, SafeKeeperConf, @@ -68,7 +68,7 @@ impl GlobalMap { let control_store = DiskStateStorage::new(disk.clone()); let wal_store = DiskWALStorage::new(disk.clone(), &control_store)?; - let sk = SafeKeeper::new(control_store, wal_store, conf.my_id)?; + let sk = SafeKeeper::new(TimelineState::new(control_store), wal_store, conf.my_id)?; timelines.insert( ttid, SharedState { @@ -118,7 +118,11 @@ impl GlobalMap { let control_store = DiskStateStorage::new(disk_timeline.clone()); let wal_store = DiskWALStorage::new(disk_timeline.clone(), &control_store)?; - let sk = SafeKeeper::new(control_store, wal_store, self.conf.my_id)?; + let sk = SafeKeeper::new( + TimelineState::new(control_store), + wal_store, + self.conf.my_id, + )?; self.timelines.insert( ttid, @@ -180,6 +184,9 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { partial_backup_enabled: false, partial_backup_timeout: Duration::from_secs(0), disable_periodic_broker_push: false, + enable_offload: false, + delete_offloaded_wal: false, + control_file_save_interval: Duration::from_secs(1), }; let mut global = GlobalMap::new(disk, conf.clone())?; diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 745363721894..6a29df6f135f 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3916,6 +3916,8 @@ def stop(self, immediate: bool = False) -> "Safekeeper": def assert_no_errors(self): assert not self.log_contains("manager task finished prematurely") + assert not self.log_contains("error while acquiring WalResidentTimeline guard") + assert not self.log_contains("timeout while acquiring WalResidentTimeline guard") def append_logical_message( self, tenant_id: TenantId, timeline_id: TimelineId, request: Dict[str, Any] diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 7bf208db54c9..ac1a3bef67bd 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -1,4 +1,5 @@ import filecmp +import logging import os import random import shutil @@ -2178,3 +2179,102 @@ def do_something(): do_something() do_something() + + +# Test creates 5 endpoints and tries to wake them up randomly. All timeouts are +# configured to be very short, so that we expect that: +# - pageserver will update remote_consistent_lsn very often +# - safekeepers will upload partial WAL segments very often +# - safekeeper will try to evict and unevict timelines +# +# Test checks that there are no critical errors while doing this. Also it checks +# that every safekeeper has at least one successful eviction. +@pytest.mark.parametrize("delete_offloaded_wal", [False, True]) +@pytest.mark.parametrize("restart_chance", [0.0, 0.2]) +def test_s3_eviction( + neon_env_builder: NeonEnvBuilder, delete_offloaded_wal: bool, restart_chance: float +): + neon_env_builder.num_safekeepers = 3 + neon_env_builder.enable_safekeeper_remote_storage(RemoteStorageKind.LOCAL_FS) + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_timeout": "100ms", + } + ) + + extra_opts = [ + "--enable-offload", + "--partial-backup-timeout", + "50ms", + "--control-file-save-interval", + "1s", + ] + if delete_offloaded_wal: + extra_opts.append("--delete-offloaded-wal") + + for sk in env.safekeepers: + sk.stop().start(extra_opts=extra_opts) + + n_timelines = 5 + + branch_names = [f"branch{tlin}" for tlin in range(n_timelines)] + timelines = [] + ps_client = env.pageservers[0].http_client() + + # start postgres on each timeline + endpoints: list[Endpoint] = [] + for branch_name in branch_names: + timeline_id = env.neon_cli.create_branch(branch_name) + timelines.append(timeline_id) + + endpoints.append(env.endpoints.create_start(branch_name)) + endpoints[-1].safe_psql("CREATE TABLE t(i int)") + endpoints[-1].safe_psql("INSERT INTO t VALUES (0)") + + lsn = endpoints[-1].safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0] + log.info(f"{branch_name}: LSN={lsn}") + + endpoints[-1].stop() + + # update remote_consistent_lsn on pageserver + ps_client.timeline_checkpoint(env.initial_tenant, timelines[-1], wait_until_uploaded=True) + + check_values = [0] * n_timelines + + n_iters = 20 + for _ in range(n_iters): + if log.isEnabledFor(logging.DEBUG): + for j in range(n_timelines): + detail = ps_client.timeline_detail(env.initial_tenant, timelines[j]) + log.debug( + f'{branch_names[j]}: RCL={detail["remote_consistent_lsn"]}, LRL={detail["last_record_lsn"]}' + ) + + i = random.randint(0, n_timelines - 1) + log.info(f"Starting endpoint {i}") + endpoints[i].start() + check_values[i] += 1 + res = endpoints[i].safe_psql("UPDATE t SET i = i + 1 RETURNING i") + assert res[0][0] == check_values[i] + + lsn = endpoints[i].safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0] + log.info(f"{branch_names[i]}: LSN={lsn}") + + endpoints[i].stop() + + # update remote_consistent_lsn on pageserver + ps_client.timeline_checkpoint(env.initial_tenant, timelines[i], wait_until_uploaded=True) + + # restarting random safekeepers + for sk in env.safekeepers: + if random.random() < restart_chance: + sk.stop().start(extra_opts=extra_opts) + time.sleep(0.5) + + # require at least one successful eviction in at least one safekeeper + # TODO: require eviction in each safekeeper after https://github.com/neondatabase/neon/issues/8148 is fixed + assert any( + sk.log_contains("successfully evicted timeline") + and sk.log_contains("successfully restored evicted timeline") + for sk in env.safekeepers + ) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index 971fad787a22..3f0a4a2ff8a6 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -200,9 +200,8 @@ async def run_restarts_under_load( # assert that at least one transaction has completed in every worker stats.check_progress() - # testing #6530, temporary here - # TODO: remove afer partial backup is enabled by default - victim.start(extra_opts=["--partial-backup-enabled", "--partial-backup-timeout=2s"]) + # testing #6530 + victim.start(extra_opts=["--partial-backup-timeout=2s"]) log.info("Iterations are finished, exiting coroutines...") stats.running = False From c39d5b03e81683717bd95c87615d68b0b23e887d Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 26 Jun 2024 20:53:59 +0100 Subject: [PATCH 34/57] pageserver: remove legacy tenant config code, clean up redundant generation none/broken usages (#7947) ## Problem In https://github.com/neondatabase/neon/pull/5299, the new config-v1 tenant config file was added to hold the LocationConf type. We left the old config file in place for forward compat, and because running without generations (therefore without LocationConf) as still useful before the storage controller was ready for prime-time. Closes: https://github.com/neondatabase/neon/issues/5388 ## Summary of changes - Remove code for reading and writing the legacy config file - Remove Generation::Broken: it was unused. - Treat missing config file on disk as an error loading a tenant, rather than defaulting it. We can now remove LocationConf::default, and thereby guarantee that we never construct a tenant with a None generation. - Update some comments + add some assertions to clarify that Generation::None is only used in layer metadata, not in the state of a running tenant. - Update docker compose test to create tenants with a generation --- docker-compose/README.md | 10 ++ .../compute_wrapper/shell/compute.sh | 7 +- libs/pageserver_api/src/models.rs | 13 +- libs/utils/src/generation.rs | 32 +---- pageserver/src/config.rs | 19 +-- pageserver/src/deletion_queue.rs | 11 -- pageserver/src/http/routes.rs | 9 +- pageserver/src/lib.rs | 6 +- pageserver/src/tenant.rs | 136 +++--------------- pageserver/src/tenant/config.rs | 16 --- pageserver/src/tenant/mgr.rs | 24 +--- .../src/tenant/secondary/heatmap_uploader.rs | 5 +- pageserver/src/tenant/storage_layer/layer.rs | 16 +-- 13 files changed, 67 insertions(+), 237 deletions(-) create mode 100644 docker-compose/README.md diff --git a/docker-compose/README.md b/docker-compose/README.md new file mode 100644 index 000000000000..bd47805a6791 --- /dev/null +++ b/docker-compose/README.md @@ -0,0 +1,10 @@ + +# Example docker compose configuration + +The configuration in this directory is used for testing Neon docker images: it is +not intended for deploying a usable system. To run a development environment where +you can experiment with a minature Neon system, use `cargo neon` rather than container images. + +This configuration does not start the storage controller, because the controller +needs a way to reconfigure running computes, and no such thing exists in this setup. + diff --git a/docker-compose/compute_wrapper/shell/compute.sh b/docker-compose/compute_wrapper/shell/compute.sh index 22660a63ce2e..f646e36f5993 100755 --- a/docker-compose/compute_wrapper/shell/compute.sh +++ b/docker-compose/compute_wrapper/shell/compute.sh @@ -23,11 +23,10 @@ echo "Page server is ready." echo "Create a tenant and timeline" generate_id tenant_id PARAMS=( - -sb - -X POST + -X PUT -H "Content-Type: application/json" - -d "{\"new_tenant_id\": \"${tenant_id}\"}" - http://pageserver:9898/v1/tenant/ + -d "{\"mode\": \"AttachedSingle\", \"generation\": 1, \"tenant_conf\": {}}" + "http://pageserver:9898/v1/tenant/${tenant_id}/location_config" ) result=$(curl "${PARAMS[@]}") echo $result | jq . diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index b1e4525cc03c..4875f4949522 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -625,8 +625,7 @@ pub struct TenantInfo { /// If a layer is present in both local FS and S3, it counts only once. pub current_physical_size: Option, // physical size is only included in `tenant_status` endpoint pub attachment_status: TenantAttachmentStatus, - #[serde(skip_serializing_if = "Option::is_none")] - pub generation: Option, + pub generation: u32, } #[derive(Serialize, Deserialize, Clone)] @@ -1453,7 +1452,7 @@ mod tests { state: TenantState::Active, current_physical_size: Some(42), attachment_status: TenantAttachmentStatus::Attached, - generation: None, + generation: 1, }; let expected_active = json!({ "id": original_active.id.to_string(), @@ -1463,7 +1462,8 @@ mod tests { "current_physical_size": 42, "attachment_status": { "slug":"attached", - } + }, + "generation" : 1 }); let original_broken = TenantInfo { @@ -1474,7 +1474,7 @@ mod tests { }, current_physical_size: Some(42), attachment_status: TenantAttachmentStatus::Attached, - generation: None, + generation: 1, }; let expected_broken = json!({ "id": original_broken.id.to_string(), @@ -1488,7 +1488,8 @@ mod tests { "current_physical_size": 42, "attachment_status": { "slug":"attached", - } + }, + "generation" : 1 }); assert_eq!( diff --git a/libs/utils/src/generation.rs b/libs/utils/src/generation.rs index b703e883def4..5970836033c8 100644 --- a/libs/utils/src/generation.rs +++ b/libs/utils/src/generation.rs @@ -9,20 +9,11 @@ use serde::{Deserialize, Serialize}; /// numbers are used. #[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] pub enum Generation { - // Generations with this magic value will not add a suffix to S3 keys, and will not - // be included in persisted index_part.json. This value is only to be used - // during migration from pre-generation metadata to generation-aware metadata, - // and should eventually go away. - // - // A special Generation is used rather than always wrapping Generation in an Option, - // so that code handling generations doesn't have to be aware of the legacy - // case everywhere it touches a generation. + // The None Generation is used in the metadata of layers written before generations were + // introduced. A running Tenant always has a valid generation, but the layer metadata may + // include None generations. None, - // Generations with this magic value may never be used to construct S3 keys: - // we will panic if someone tries to. This is for Tenants in the "Broken" state, - // so that we can satisfy their constructor with a Generation without risking - // a code bug using it in an S3 write (broken tenants should never write) - Broken, + Valid(u32), } @@ -42,11 +33,6 @@ impl Generation { Self::None } - // Create a new generation that will panic if you try to use get_suffix - pub fn broken() -> Self { - Self::Broken - } - pub const fn new(v: u32) -> Self { Self::Valid(v) } @@ -60,9 +46,6 @@ impl Generation { match self { Self::Valid(v) => GenerationFileSuffix(Some(*v)), Self::None => GenerationFileSuffix(None), - Self::Broken => { - panic!("Tried to use a broken generation"); - } } } @@ -86,7 +69,6 @@ impl Generation { } } Self::None => Self::None, - Self::Broken => panic!("Attempted to use a broken generation"), } } @@ -95,7 +77,6 @@ impl Generation { match self { Self::Valid(n) => Self::Valid(*n + 1), Self::None => Self::Valid(1), - Self::Broken => panic!("Attempted to use a broken generation"), } } @@ -128,7 +109,7 @@ impl Serialize for Generation { if let Self::Valid(v) = self { v.serialize(serializer) } else { - // We should never be asked to serialize a None or Broken. Structures + // We should never be asked to serialize a None. Structures // that include an optional generation should convert None to an // Option::None Err(serde::ser::Error::custom( @@ -159,9 +140,6 @@ impl Debug for Generation { Self::None => { write!(f, "") } - Self::Broken => { - write!(f, "") - } } } } diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 104234841c82..f36e63f035c7 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -36,10 +36,7 @@ use crate::tenant::{config::TenantConfOpt, timeline::GetImpl}; use crate::tenant::{TENANTS_SEGMENT_NAME, TIMELINES_SEGMENT_NAME}; use crate::{disk_usage_eviction_task::DiskUsageEvictionTaskConfig, virtual_file::io_engine}; use crate::{tenant::config::TenantConf, virtual_file}; -use crate::{ - TENANT_CONFIG_NAME, TENANT_HEATMAP_BASENAME, TENANT_LOCATION_CONFIG_NAME, - TIMELINE_DELETE_MARK_SUFFIX, -}; +use crate::{TENANT_HEATMAP_BASENAME, TENANT_LOCATION_CONFIG_NAME, TIMELINE_DELETE_MARK_SUFFIX}; use self::defaults::DEFAULT_CONCURRENT_TENANT_WARMUP; @@ -810,15 +807,11 @@ impl PageServerConf { } /// Points to a place in pageserver's local directory, - /// where certain tenant's tenantconf file should be located. - /// - /// Legacy: superseded by tenant_location_config_path. Eventually - /// remove this function. - pub fn tenant_config_path(&self, tenant_shard_id: &TenantShardId) -> Utf8PathBuf { - self.tenant_path(tenant_shard_id).join(TENANT_CONFIG_NAME) - } - - pub fn tenant_location_config_path(&self, tenant_shard_id: &TenantShardId) -> Utf8PathBuf { + /// where certain tenant's LocationConf be stored. + pub(crate) fn tenant_location_config_path( + &self, + tenant_shard_id: &TenantShardId, + ) -> Utf8PathBuf { self.tenant_path(tenant_shard_id) .join(TENANT_LOCATION_CONFIG_NAME) } diff --git a/pageserver/src/deletion_queue.rs b/pageserver/src/deletion_queue.rs index e779729f8d48..3e48552ace44 100644 --- a/pageserver/src/deletion_queue.rs +++ b/pageserver/src/deletion_queue.rs @@ -382,17 +382,6 @@ pub enum DeletionQueueError { } impl DeletionQueueClient { - pub(crate) fn broken() -> Self { - // Channels whose receivers are immediately dropped. - let (tx, _rx) = tokio::sync::mpsc::unbounded_channel(); - let (executor_tx, _executor_rx) = tokio::sync::mpsc::channel(1); - Self { - tx, - executor_tx, - lsn_table: Arc::default(), - } - } - /// This is cancel-safe. If you drop the future before it completes, the message /// is not pushed, although in the context of the deletion queue it doesn't matter: once /// we decide to do a deletion the decision is always final. diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 41d096d7bbb6..5ebd34a40690 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -887,7 +887,9 @@ async fn tenant_list_handler( state: state.clone(), current_physical_size: None, attachment_status: state.attachment_status(), - generation: (*gen).into(), + generation: (*gen) + .into() + .expect("Tenants are always attached with a generation"), }) .collect::>(); @@ -935,7 +937,10 @@ async fn tenant_status( state: state.clone(), current_physical_size: Some(current_physical_size), attachment_status: state.attachment_status(), - generation: tenant.generation().into(), + generation: tenant + .generation() + .into() + .expect("Tenants are always attached with a generation"), }, walredo: tenant.wal_redo_manager_status(), timelines: tenant.list_timeline_ids(), diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 9e64eafffcab..353f97264c5f 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -113,11 +113,7 @@ pub async fn shutdown_pageserver( } /// Per-tenant configuration file. -/// Full path: `tenants//config`. -pub(crate) const TENANT_CONFIG_NAME: &str = "config"; - -/// Per-tenant configuration file. -/// Full path: `tenants//config`. +/// Full path: `tenants//config-v1`. pub(crate) const TENANT_LOCATION_CONFIG_NAME: &str = "config-v1"; /// Per-tenant copy of their remote heatmap, downloaded into the local diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 30e855eaa2dc..45e542a3367c 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -648,7 +648,7 @@ impl Tenant { init_order: Option, mode: SpawnMode, ctx: &RequestContext, - ) -> anyhow::Result> { + ) -> Arc { let wal_redo_manager = Arc::new(WalRedoManager::from(PostgresRedoManager::new( conf, tenant_shard_id, @@ -856,7 +856,7 @@ impl Tenant { } .instrument(tracing::info_span!(parent: None, "attach", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), gen=?generation)), ); - Ok(tenant) + tenant } #[instrument(skip_all)] @@ -1147,30 +1147,6 @@ impl Tenant { .await } - /// Create a placeholder Tenant object for a broken tenant - pub fn create_broken_tenant( - conf: &'static PageServerConf, - tenant_shard_id: TenantShardId, - remote_storage: GenericRemoteStorage, - reason: String, - ) -> Arc { - Arc::new(Tenant::new( - TenantState::Broken { - reason, - backtrace: String::new(), - }, - conf, - AttachedTenantConf::try_from(LocationConf::default()).unwrap(), - // Shard identity isn't meaningful for a broken tenant: it's just a placeholder - // to occupy the slot for this TenantShardId. - ShardIdentity::broken(tenant_shard_id.shard_number, tenant_shard_id.shard_count), - None, - tenant_shard_id, - remote_storage, - DeletionQueueClient::broken(), - )) - } - async fn load_timeline_metadata( self: &Arc, timeline_ids: HashSet, @@ -2494,6 +2470,10 @@ impl Tenant { remote_storage: GenericRemoteStorage, deletion_queue_client: DeletionQueueClient, ) -> Tenant { + debug_assert!( + !attached_conf.location.generation.is_none() || conf.control_plane_api.is_none() + ); + let (state, mut rx) = watch::channel(state); tokio::spawn(async move { @@ -2584,45 +2564,22 @@ impl Tenant { conf: &'static PageServerConf, tenant_shard_id: &TenantShardId, ) -> anyhow::Result { - let legacy_config_path = conf.tenant_config_path(tenant_shard_id); let config_path = conf.tenant_location_config_path(tenant_shard_id); if config_path.exists() { // New-style config takes precedence let deserialized = Self::read_config(&config_path)?; Ok(toml_edit::de::from_document::(deserialized)?) - } else if legacy_config_path.exists() { - // Upgrade path: found an old-style configuration only - let deserialized = Self::read_config(&legacy_config_path)?; - - let mut tenant_conf = TenantConfOpt::default(); - for (key, item) in deserialized.iter() { - match key { - "tenant_config" => { - tenant_conf = TenantConfOpt::try_from(item.to_owned()).context(format!("Failed to parse config from file '{legacy_config_path}' as pageserver config"))?; - } - _ => bail!( - "config file {legacy_config_path} has unrecognized pageserver option '{key}'" - ), - } - } - - // Legacy configs are implicitly in attached state, and do not support sharding - Ok(LocationConf::attached_single( - tenant_conf, - Generation::none(), - &models::ShardParameters::default(), - )) } else { - // FIXME If the config file is not found, assume that we're attaching - // a detached tenant and config is passed via attach command. - // https://github.com/neondatabase/neon/issues/1555 - // OR: we're loading after incomplete deletion that managed to remove config. - info!( - "tenant config not found in {} or {}", - config_path, legacy_config_path - ); - Ok(LocationConf::default()) + // The config should almost always exist for a tenant directory: + // - When attaching a tenant, the config is the first thing we write + // - When detaching a tenant, we atomically move the directory to a tmp location + // before deleting contents. + // + // The very rare edge case that can result in a missing config is if we crash during attach + // between creating directory and writing config. Callers should handle that as if the + // directory didn't exist. + anyhow::bail!("tenant config not found in {}", config_path); } } @@ -2644,47 +2601,17 @@ impl Tenant { tenant_shard_id: &TenantShardId, location_conf: &LocationConf, ) -> anyhow::Result<()> { - let legacy_config_path = conf.tenant_config_path(tenant_shard_id); let config_path = conf.tenant_location_config_path(tenant_shard_id); - Self::persist_tenant_config_at( - tenant_shard_id, - &config_path, - &legacy_config_path, - location_conf, - ) - .await + Self::persist_tenant_config_at(tenant_shard_id, &config_path, location_conf).await } #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))] pub(super) async fn persist_tenant_config_at( tenant_shard_id: &TenantShardId, config_path: &Utf8Path, - legacy_config_path: &Utf8Path, location_conf: &LocationConf, ) -> anyhow::Result<()> { - if let LocationMode::Attached(attach_conf) = &location_conf.mode { - // The modern-style LocationConf config file requires a generation to be set. In case someone - // is running a pageserver without the infrastructure to set generations, write out the legacy-style - // config file that only contains TenantConf. - // - // This will eventually be removed in https://github.com/neondatabase/neon/issues/5388 - - if attach_conf.generation.is_none() { - tracing::info!( - "Running without generations, writing legacy-style tenant config file" - ); - Self::persist_tenant_config_legacy( - tenant_shard_id, - legacy_config_path, - &location_conf.tenant_conf, - ) - .await?; - - return Ok(()); - } - } - debug!("persisting tenantconf to {config_path}"); let mut conf_content = r#"# This file contains a specific per-tenant's config. @@ -2711,37 +2638,6 @@ impl Tenant { Ok(()) } - #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))] - async fn persist_tenant_config_legacy( - tenant_shard_id: &TenantShardId, - target_config_path: &Utf8Path, - tenant_conf: &TenantConfOpt, - ) -> anyhow::Result<()> { - debug!("persisting tenantconf to {target_config_path}"); - - let mut conf_content = r#"# This file contains a specific per-tenant's config. -# It is read in case of pageserver restart. - -[tenant_config] -"# - .to_string(); - - // Convert the config to a toml file. - conf_content += &toml_edit::ser::to_string(&tenant_conf)?; - - let temp_path = path_with_suffix_extension(target_config_path, TEMP_FILE_SUFFIX); - - let tenant_shard_id = *tenant_shard_id; - let target_config_path = target_config_path.to_owned(); - let conf_content = conf_content.into_bytes(); - VirtualFile::crashsafe_overwrite(target_config_path.clone(), temp_path, conf_content) - .await - .with_context(|| { - format!("write tenant {tenant_shard_id} config to {target_config_path}") - })?; - Ok(()) - } - // // How garbage collection works: // diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index 1b9be1264207..5b532e483004 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -281,22 +281,6 @@ impl LocationConf { } } -impl Default for LocationConf { - // TODO: this should be removed once tenant loading can guarantee that we are never - // loading from a directory without a configuration. - // => tech debt since https://github.com/neondatabase/neon/issues/1555 - fn default() -> Self { - Self { - mode: LocationMode::Attached(AttachedLocationConfig { - generation: Generation::none(), - attach_mode: AttachmentMode::Single, - }), - tenant_conf: TenantConfOpt::default(), - shard: ShardIdentity::unsharded(), - } - } -} - /// A tenant's calcuated configuration, which is the result of merging a /// tenant's TenantConfOpt with the global TenantConf from PageServerConf. /// diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 1bc21d8b7805..08c3f19b6f75 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -495,17 +495,8 @@ pub async fn init_tenant_mgr( let mut location_conf = match location_conf { Ok(l) => l, Err(e) => { - warn!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Marking tenant broken, failed to {e:#}"); - - tenants.insert( - tenant_shard_id, - TenantSlot::Attached(Tenant::create_broken_tenant( - conf, - tenant_shard_id, - resources.remote_storage.clone(), - format!("{}", e), - )), - ); + // This should only happen in the case of a serialization bug or critical local I/O error: we cannot load this tenant + error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to load tenant config, failed to {e:#}"); continue; } }; @@ -687,8 +678,7 @@ fn tenant_spawn( "Cannot load tenant from empty directory {tenant_path:?}" ); - let remote_storage = resources.remote_storage.clone(); - let tenant = match Tenant::spawn( + let tenant = Tenant::spawn( conf, tenant_shard_id, resources, @@ -697,13 +687,7 @@ fn tenant_spawn( init_order, mode, ctx, - ) { - Ok(tenant) => tenant, - Err(e) => { - error!("Failed to spawn tenant {tenant_shard_id}, reason: {e:#}"); - Tenant::create_broken_tenant(conf, tenant_shard_id, remote_storage, format!("{e:#}")) - } - }; + ); Ok(tenant) } diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index 9c7a9c4234e5..0aad5bf392d5 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -367,10 +367,9 @@ async fn upload_tenant_heatmap( debug_assert_current_span_has_tenant_id(); let generation = tenant.get_generation(); + debug_assert!(!generation.is_none()); if generation.is_none() { - // We do not expect this: generations were implemented before heatmap uploads. However, - // handle it so that we don't have to make the generation in the heatmap an Option<> - // (Generation::none is not serializable) + // We do not expect this: None generations should only appear in historic layer metadata, not in running Tenants tracing::warn!("Skipping heatmap upload for tenant with generation==None"); return Ok(UploadHeatmapOutcome::Skipped); } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 7eb42d81869b..5dd947253578 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -93,16 +93,12 @@ pub(crate) struct Layer(Arc); impl std::fmt::Display for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if matches!(self.0.generation, Generation::Broken) { - write!(f, "{}-broken", self.layer_desc().short_id()) - } else { - write!( - f, - "{}{}", - self.layer_desc().short_id(), - self.0.generation.get_suffix() - ) - } + write!( + f, + "{}{}", + self.layer_desc().short_id(), + self.0.generation.get_suffix() + ) } } From 04b2ac3fed635bf32b66fa2f9212ce6f5644c8a5 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 26 Jun 2024 16:33:15 -0400 Subject: [PATCH 35/57] test: use aux file v2 policy in benchmarks (#8174) Use aux file v2 in benchmarks. Signed-off-by: Alex Chi Z --- test_runner/fixtures/neon_fixtures.py | 12 +++++------- test_runner/performance/test_logical_replication.py | 3 ++- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 6a29df6f135f..c5fc7ee3513b 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -581,7 +581,7 @@ def init_start( timeline_id=env.initial_timeline, shard_count=initial_tenant_shard_count, shard_stripe_size=initial_tenant_shard_stripe_size, - aux_file_v2=self.pageserver_aux_file_policy, + aux_file_policy=self.pageserver_aux_file_policy, ) assert env.initial_tenant == initial_tenant assert env.initial_timeline == initial_timeline @@ -1604,7 +1604,7 @@ def create_tenant( shard_stripe_size: Optional[int] = None, placement_policy: Optional[str] = None, set_default: bool = False, - aux_file_v2: Optional[AuxFileStore] = None, + aux_file_policy: Optional[AuxFileStore] = None, ) -> Tuple[TenantId, TimelineId]: """ Creates a new tenant, returns its id and its initial timeline's id. @@ -1629,13 +1629,11 @@ def create_tenant( ) ) - if aux_file_v2 is AuxFileStore.V2: + if aux_file_policy is AuxFileStore.V2: args.extend(["-c", "switch_aux_file_policy:v2"]) - - if aux_file_v2 is AuxFileStore.V1: + elif aux_file_policy is AuxFileStore.V1: args.extend(["-c", "switch_aux_file_policy:v1"]) - - if aux_file_v2 is AuxFileStore.CrossValidation: + elif aux_file_policy is AuxFileStore.CrossValidation: args.extend(["-c", "switch_aux_file_policy:cross-validation"]) if set_default: diff --git a/test_runner/performance/test_logical_replication.py b/test_runner/performance/test_logical_replication.py index b799f7248fb3..7d11facc2949 100644 --- a/test_runner/performance/test_logical_replication.py +++ b/test_runner/performance/test_logical_replication.py @@ -2,9 +2,10 @@ import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, PgBin, logical_replication_sync +from fixtures.neon_fixtures import AuxFileStore, NeonEnv, PgBin, logical_replication_sync +@pytest.mark.parametrize("pageserver_aux_file_policy", [AuxFileStore.V2]) @pytest.mark.timeout(1000) def test_logical_replication(neon_simple_env: NeonEnv, pg_bin: PgBin, vanilla_pg): env = neon_simple_env From d2753719e322e3efae50a49bd3935cfd465d1434 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 27 Jun 2024 00:54:29 +0300 Subject: [PATCH 36/57] test: Add helper function for importing a Postgres cluster (#8025) Also, modify the "neon_local timeline import" command so that it doesn't create the endpoint any more. I don't see any reason to bundle that in the same command, the "timeline create" and "timeline branch" commands don't do that either. I plan to add more tests similar to 'test_import_at_2bil', this will help to reduce the copy-pasting. --- control_plane/src/bin/neon_local.rs | 29 ++------- test_runner/fixtures/neon_fixtures.py | 64 +++++++++++++++++++ test_runner/regress/test_import.py | 16 ++--- test_runner/regress/test_next_xid.py | 91 ++++++++------------------- 4 files changed, 102 insertions(+), 98 deletions(-) diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 3f656932d5d3..f381337346ff 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -600,13 +600,9 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local Some(("import", import_match)) => { let tenant_id = get_tenant_id(import_match, env)?; let timeline_id = parse_timeline_id(import_match)?.expect("No timeline id provided"); - let name = import_match - .get_one::("node-name") - .ok_or_else(|| anyhow!("No node name provided"))?; - let update_catalog = import_match - .get_one::("update-catalog") - .cloned() - .unwrap_or_default(); + let branch_name = import_match + .get_one::("branch-name") + .ok_or_else(|| anyhow!("No branch name provided"))?; // Parse base inputs let base_tarfile = import_match @@ -633,24 +629,11 @@ async fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::Local .copied() .context("Failed to parse postgres version from the argument string")?; - let mut cplane = ComputeControlPlane::load(env.clone())?; println!("Importing timeline into pageserver ..."); pageserver .timeline_import(tenant_id, timeline_id, base, pg_wal, pg_version) .await?; - env.register_branch_mapping(name.to_string(), tenant_id, timeline_id)?; - - println!("Creating endpoint for imported timeline ..."); - cplane.new_endpoint( - name, - tenant_id, - timeline_id, - None, - None, - pg_version, - ComputeMode::Primary, - !update_catalog, - )?; + env.register_branch_mapping(branch_name.to_string(), tenant_id, timeline_id)?; println!("Done"); } Some(("branch", branch_match)) => { @@ -1487,8 +1470,7 @@ fn cli() -> Command { .about("Import timeline from basebackup directory") .arg(tenant_id_arg.clone()) .arg(timeline_id_arg.clone()) - .arg(Arg::new("node-name").long("node-name") - .help("Name to assign to the imported timeline")) + .arg(branch_name_arg.clone()) .arg(Arg::new("base-tarfile") .long("base-tarfile") .value_parser(value_parser!(PathBuf)) @@ -1504,7 +1486,6 @@ fn cli() -> Command { .arg(Arg::new("end-lsn").long("end-lsn") .help("Lsn the basebackup ends at")) .arg(pg_version_arg.clone()) - .arg(update_catalog.clone()) ) ).subcommand( Command::new("tenant") diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index c5fc7ee3513b..6bfe1afd1f2a 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4659,6 +4659,70 @@ def fork_at_current_lsn( return env.neon_cli.create_branch(new_branch_name, ancestor_branch_name, tenant_id, current_lsn) +def import_timeline_from_vanilla_postgres( + test_output_dir: Path, + env: NeonEnv, + pg_bin: PgBin, + tenant_id: TenantId, + timeline_id: TimelineId, + branch_name: str, + vanilla_pg_connstr: str, +): + """ + Create a new timeline, by importing an existing PostgreSQL cluster. + + This works by taking a physical backup of the running PostgreSQL cluster, and importing that. + """ + + # Take backup of the existing PostgreSQL server with pg_basebackup + basebackup_dir = os.path.join(test_output_dir, "basebackup") + base_tar = os.path.join(basebackup_dir, "base.tar") + wal_tar = os.path.join(basebackup_dir, "pg_wal.tar") + os.mkdir(basebackup_dir) + pg_bin.run( + [ + "pg_basebackup", + "-F", + "tar", + "-d", + vanilla_pg_connstr, + "-D", + basebackup_dir, + ] + ) + + # Extract start_lsn and end_lsn form the backup manifest file + with open(os.path.join(basebackup_dir, "backup_manifest")) as f: + manifest = json.load(f) + start_lsn = manifest["WAL-Ranges"][0]["Start-LSN"] + end_lsn = manifest["WAL-Ranges"][0]["End-LSN"] + + # Import the backup tarballs into the pageserver + env.neon_cli.raw_cli( + [ + "timeline", + "import", + "--tenant-id", + str(tenant_id), + "--timeline-id", + str(timeline_id), + "--branch-name", + branch_name, + "--base-lsn", + start_lsn, + "--base-tarfile", + base_tar, + "--end-lsn", + end_lsn, + "--wal-tarfile", + wal_tar, + "--pg-version", + env.pg_version, + ] + ) + wait_for_last_record_lsn(env.pageserver.http_client(), tenant_id, timeline_id, Lsn(end_lsn)) + + def last_flush_lsn_upload( env: NeonEnv, endpoint: Endpoint, diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index ac27a4cf365f..d97e882a7093 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -76,7 +76,7 @@ def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_build start_lsn = manifest["WAL-Ranges"][0]["Start-LSN"] end_lsn = manifest["WAL-Ranges"][0]["End-LSN"] - endpoint_id = "ep-import_from_vanilla" + branch_name = "import_from_vanilla" tenant = TenantId.generate() timeline = TimelineId.generate() @@ -106,8 +106,8 @@ def import_tar(base, wal): str(tenant), "--timeline-id", str(timeline), - "--node-name", - endpoint_id, + "--branch-name", + branch_name, "--base-lsn", start_lsn, "--base-tarfile", @@ -146,7 +146,7 @@ def import_tar(base, wal): wait_for_upload(client, tenant, timeline, Lsn(end_lsn)) # Check it worked - endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant) + endpoint = env.endpoints.create_start(branch_name, tenant_id=tenant) assert endpoint.safe_psql("select count(*) from t") == [(300000,)] vanilla_pg.stop() @@ -265,7 +265,7 @@ def _import( tenant = TenantId.generate() # Import to pageserver - endpoint_id = "ep-import_from_pageserver" + branch_name = "import_from_pageserver" client = env.pageserver.http_client() env.pageserver.tenant_create(tenant) env.neon_cli.raw_cli( @@ -276,8 +276,8 @@ def _import( str(tenant), "--timeline-id", str(timeline), - "--node-name", - endpoint_id, + "--branch-name", + branch_name, "--base-lsn", str(lsn), "--base-tarfile", @@ -292,7 +292,7 @@ def _import( wait_for_upload(client, tenant, timeline, lsn) # Check it worked - endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant, lsn=lsn) + endpoint = env.endpoints.create_start(branch_name, tenant_id=tenant, lsn=lsn) assert endpoint.safe_psql("select count(*) from tbl") == [(expected_num_rows,)] # Take another fullbackup diff --git a/test_runner/regress/test_next_xid.py b/test_runner/regress/test_next_xid.py index 98fb06a0d667..b9e7e642b51c 100644 --- a/test_runner/regress/test_next_xid.py +++ b/test_runner/regress/test_next_xid.py @@ -1,13 +1,14 @@ -import json import os import time from pathlib import Path -from fixtures.common_types import Lsn, TenantId, TimelineId +from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_wal_insert_lsn -from fixtures.pageserver.utils import ( - wait_for_last_record_lsn, +from fixtures.neon_fixtures import ( + NeonEnvBuilder, + PgBin, + import_timeline_from_vanilla_postgres, + wait_for_wal_insert_lsn, ) from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import query_scalar @@ -76,7 +77,6 @@ def test_import_at_2bil( ): neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) env = neon_env_builder.init_start() - ps_http = env.pageserver.http_client() # Reset the vanilla Postgres instance to somewhat before 2 billion transactions. pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, "pg_resetwal") @@ -92,68 +92,28 @@ def test_import_at_2bil( assert vanilla_pg.safe_psql("select count(*) from tt") == [(300000,)] vanilla_pg.safe_psql("CREATE TABLE t (t text);") vanilla_pg.safe_psql("INSERT INTO t VALUES ('inserted in vanilla')") - - endpoint_id = "ep-import_from_vanilla" - tenant = TenantId.generate() - timeline = TimelineId.generate() - - env.pageserver.tenant_create(tenant) - - # Take basebackup - basebackup_dir = os.path.join(test_output_dir, "basebackup") - base_tar = os.path.join(basebackup_dir, "base.tar") - wal_tar = os.path.join(basebackup_dir, "pg_wal.tar") - os.mkdir(basebackup_dir) vanilla_pg.safe_psql("CHECKPOINT") - pg_bin.run( - [ - "pg_basebackup", - "-F", - "tar", - "-d", - vanilla_pg.connstr(), - "-D", - basebackup_dir, - ] - ) - # Get start_lsn and end_lsn - with open(os.path.join(basebackup_dir, "backup_manifest")) as f: - manifest = json.load(f) - start_lsn = manifest["WAL-Ranges"][0]["Start-LSN"] - end_lsn = manifest["WAL-Ranges"][0]["End-LSN"] - - def import_tar(base, wal): - env.neon_cli.raw_cli( - [ - "timeline", - "import", - "--tenant-id", - str(tenant), - "--timeline-id", - str(timeline), - "--node-name", - endpoint_id, - "--base-lsn", - start_lsn, - "--base-tarfile", - base, - "--end-lsn", - end_lsn, - "--wal-tarfile", - wal, - "--pg-version", - env.pg_version, - ] - ) - - # Importing correct backup works - import_tar(base_tar, wal_tar) - wait_for_last_record_lsn(ps_http, tenant, timeline, Lsn(end_lsn)) + tenant_id = TenantId.generate() + env.pageserver.tenant_create(tenant_id) + timeline_id = TimelineId.generate() + + # Import the cluster to Neon + import_timeline_from_vanilla_postgres( + test_output_dir, + env, + pg_bin, + tenant_id, + timeline_id, + "imported_2bil_xids", + vanilla_pg.connstr(), + ) + vanilla_pg.stop() # don't need the original server anymore + # Check that it works endpoint = env.endpoints.create_start( - endpoint_id, - tenant_id=tenant, + "imported_2bil_xids", + tenant_id=tenant_id, config_lines=[ "log_autovacuum_min_duration = 0", "autovacuum_naptime='5 s'", @@ -161,7 +121,6 @@ def import_tar(base, wal): ) assert endpoint.safe_psql("select count(*) from t") == [(1,)] - # Ok, consume conn = endpoint.connect() cur = conn.cursor() @@ -213,7 +172,7 @@ def import_tar(base, wal): cur.execute("checkpoint") # wait until pageserver receives that data - wait_for_wal_insert_lsn(env, endpoint, tenant, timeline) + wait_for_wal_insert_lsn(env, endpoint, tenant_id, timeline_id) # Restart endpoint endpoint.stop() From 32b75e7c7361d57671c037651c6fa943f18e94a7 Mon Sep 17 00:00:00 2001 From: Cihan Demirci <128653800+fcdm@users.noreply.github.com> Date: Wed, 26 Jun 2024 23:36:41 +0100 Subject: [PATCH 37/57] CI: additional trigger on merge to main (#8176) Before we consolidate workflows we want to be triggered by merges to main. https://github.com/neondatabase/cloud/issues/14862 --- .github/workflows/build_and_test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 113b37ae518b..87f04996fd81 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1245,6 +1245,7 @@ jobs: run: | if [[ "$GITHUB_REF_NAME" == "main" ]]; then gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main -f branch=main -f dockerTag=${{needs.tag.outputs.build-tag}} -f deployPreprodRegion=false + gh workflow --repo neondatabase/azure run deploy.yml -f dockerTag=${{needs.tag.outputs.build-tag}} elif [[ "$GITHUB_REF_NAME" == "release" ]]; then gh workflow --repo neondatabase/aws run deploy-dev.yml --ref main \ -f deployPgSniRouter=false \ From d55700267503f68e707e137710d1ee7a0dc4d693 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Thu, 27 Jun 2024 11:56:57 +0100 Subject: [PATCH 38/57] strocon: don't overcommit when making node fill plan (#8171) ## Problem The fill requirement was not taken into account when looking through the shards of a given node to fill from. ## Summary of Changes Ensure that we do not fill a node past the recommendation from `Scheduler::compute_fill_requirement`. --- storage_controller/src/service.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index e329f42dd610..a94575b4286d 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -5564,9 +5564,12 @@ impl Service { break; } - let mut can_take = attached - expected_attached; + let can_take = attached - expected_attached; + let needed = fill_requirement - plan.len(); + let mut take = std::cmp::min(can_take, needed); + let mut remove_node = false; - while can_take > 0 { + while take > 0 { match tids_by_node.get_mut(&node_id) { Some(tids) => match tids.pop() { Some(tid) => { @@ -5578,7 +5581,7 @@ impl Service { if *promoted < max_promote_for_tenant { plan.push(tid); *promoted += 1; - can_take -= 1; + take -= 1; } } None => { From 6f20a18e8e20c7cdf689b728fccd7de88eb29ca7 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 17 Jun 2024 16:23:07 +0300 Subject: [PATCH 39/57] Allow to change compute safekeeper list without restart. - Add --safekeepers option to neon_local reconfigure - Add it to python Endpoint reconfigure - Implement config reload in walproposer by restarting the whole bgw when safekeeper list changes. ref https://github.com/neondatabase/neon/issues/6341 --- control_plane/src/bin/neon_local.rs | 46 +++++++++++++-------- control_plane/src/endpoint.rs | 37 +++++++++++------ pgxn/neon/walproposer_pg.c | 52 ++++++++++++++++++++++-- storage_controller/src/compute_hook.rs | 2 +- test_runner/fixtures/neon_fixtures.py | 22 +++++++++- test_runner/regress/test_wal_acceptor.py | 35 +++++++--------- 6 files changed, 139 insertions(+), 55 deletions(-) diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index f381337346ff..2c05938f4410 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -848,20 +848,13 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re let allow_multiple = sub_args.get_flag("allow-multiple"); - // If --safekeepers argument is given, use only the listed safekeeper nodes. - let safekeepers = - if let Some(safekeepers_str) = sub_args.get_one::("safekeepers") { - let mut safekeepers: Vec = Vec::new(); - for sk_id in safekeepers_str.split(',').map(str::trim) { - let sk_id = NodeId(u64::from_str(sk_id).map_err(|_| { - anyhow!("invalid node ID \"{sk_id}\" in --safekeepers list") - })?); - safekeepers.push(sk_id); - } - safekeepers - } else { - env.safekeepers.iter().map(|sk| sk.id).collect() - }; + // If --safekeepers argument is given, use only the listed + // safekeeper nodes; otherwise all from the env. + let safekeepers = if let Some(safekeepers) = parse_safekeepers(sub_args)? { + safekeepers + } else { + env.safekeepers.iter().map(|sk| sk.id).collect() + }; let endpoint = cplane .endpoints @@ -965,7 +958,10 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re }) .collect::>() }; - endpoint.reconfigure(pageservers, None).await?; + // If --safekeepers argument is given, use only the listed + // safekeeper nodes; otherwise all from the env. + let safekeepers = parse_safekeepers(sub_args)?; + endpoint.reconfigure(pageservers, None, safekeepers).await?; } "stop" => { let endpoint_id = sub_args @@ -987,6 +983,23 @@ async fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Re Ok(()) } +/// Parse --safekeepers as list of safekeeper ids. +fn parse_safekeepers(sub_args: &ArgMatches) -> Result>> { + if let Some(safekeepers_str) = sub_args.get_one::("safekeepers") { + let mut safekeepers: Vec = Vec::new(); + for sk_id in safekeepers_str.split(',').map(str::trim) { + let sk_id = NodeId( + u64::from_str(sk_id) + .map_err(|_| anyhow!("invalid node ID \"{sk_id}\" in --safekeepers list"))?, + ); + safekeepers.push(sk_id); + } + Ok(Some(safekeepers)) + } else { + Ok(None) + } +} + fn handle_mappings(sub_match: &ArgMatches, env: &mut local_env::LocalEnv) -> Result<()> { let (sub_name, sub_args) = match sub_match.subcommand() { Some(ep_subcommand_data) => ep_subcommand_data, @@ -1590,7 +1603,7 @@ fn cli() -> Command { .about("Start postgres.\n If the endpoint doesn't exist yet, it is created.") .arg(endpoint_id_arg.clone()) .arg(endpoint_pageserver_id_arg.clone()) - .arg(safekeepers_arg) + .arg(safekeepers_arg.clone()) .arg(remote_ext_config_args) .arg(create_test_user) .arg(allow_multiple.clone()) @@ -1599,6 +1612,7 @@ fn cli() -> Command { .subcommand(Command::new("reconfigure") .about("Reconfigure the endpoint") .arg(endpoint_pageserver_id_arg) + .arg(safekeepers_arg) .arg(endpoint_id_arg.clone()) .arg(tenant_id_arg.clone()) ) diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index b928bbfc308e..f9bb2da7e7ac 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -499,6 +499,23 @@ impl Endpoint { .join(",") } + /// Map safekeepers ids to the actual connection strings. + fn build_safekeepers_connstrs(&self, sk_ids: Vec) -> Result> { + let mut safekeeper_connstrings = Vec::new(); + if self.mode == ComputeMode::Primary { + for sk_id in sk_ids { + let sk = self + .env + .safekeepers + .iter() + .find(|node| node.id == sk_id) + .ok_or_else(|| anyhow!("safekeeper {sk_id} does not exist"))?; + safekeeper_connstrings.push(format!("127.0.0.1:{}", sk.get_compute_port())); + } + } + Ok(safekeeper_connstrings) + } + pub async fn start( &self, auth_token: &Option, @@ -523,18 +540,7 @@ impl Endpoint { let pageserver_connstring = Self::build_pageserver_connstr(&pageservers); assert!(!pageserver_connstring.is_empty()); - let mut safekeeper_connstrings = Vec::new(); - if self.mode == ComputeMode::Primary { - for sk_id in safekeepers { - let sk = self - .env - .safekeepers - .iter() - .find(|node| node.id == sk_id) - .ok_or_else(|| anyhow!("safekeeper {sk_id} does not exist"))?; - safekeeper_connstrings.push(format!("127.0.0.1:{}", sk.get_compute_port())); - } - } + let safekeeper_connstrings = self.build_safekeepers_connstrs(safekeepers)?; // check for file remote_extensions_spec.json // if it is present, read it and pass to compute_ctl @@ -740,6 +746,7 @@ impl Endpoint { &self, mut pageservers: Vec<(Host, u16)>, stripe_size: Option, + safekeepers: Option>, ) -> Result<()> { let mut spec: ComputeSpec = { let spec_path = self.endpoint_path().join("spec.json"); @@ -774,6 +781,12 @@ impl Endpoint { spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize); } + // If safekeepers are not specified, don't change them. + if let Some(safekeepers) = safekeepers { + let safekeeper_connstrings = self.build_safekeepers_connstrs(safekeepers)?; + spec.safekeeper_connstrings = safekeeper_connstrings; + } + let client = reqwest::Client::builder() .timeout(Duration::from_secs(30)) .build() diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index da1a6f76f0a2..944b316344dd 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -63,6 +63,8 @@ char *wal_acceptors_list = ""; int wal_acceptor_reconnect_timeout = 1000; int wal_acceptor_connection_timeout = 10000; +/* Set to true in the walproposer bgw. */ +static bool am_walproposer; static WalproposerShmemState *walprop_shared; static WalProposerConfig walprop_config; static XLogRecPtr sentPtr = InvalidXLogRecPtr; @@ -76,6 +78,7 @@ static HotStandbyFeedback agg_hs_feedback; static void nwp_shmem_startup_hook(void); static void nwp_register_gucs(void); +static void assign_neon_safekeepers(const char *newval, void *extra); static void nwp_prepare_shmem(void); static uint64 backpressure_lag_impl(void); static bool backpressure_throttling_impl(void); @@ -111,7 +114,8 @@ init_walprop_config(bool syncSafekeepers) { walprop_config.neon_tenant = neon_tenant; walprop_config.neon_timeline = neon_timeline; - walprop_config.safekeepers_list = wal_acceptors_list; + /* WalProposerCreate scribbles directly on it, so pstrdup */ + walprop_config.safekeepers_list = pstrdup(wal_acceptors_list); walprop_config.safekeeper_reconnect_timeout = wal_acceptor_reconnect_timeout; walprop_config.safekeeper_connection_timeout = wal_acceptor_connection_timeout; walprop_config.wal_segment_size = wal_segment_size; @@ -151,6 +155,7 @@ WalProposerMain(Datum main_arg) init_walprop_config(false); walprop_pg_init_bgworker(); + am_walproposer = true; walprop_pg_load_libpqwalreceiver(); wp = WalProposerCreate(&walprop_config, walprop_pg); @@ -189,10 +194,10 @@ nwp_register_gucs(void) NULL, /* long_desc */ &wal_acceptors_list, /* valueAddr */ "", /* bootValue */ - PGC_POSTMASTER, + PGC_SIGHUP, GUC_LIST_INPUT, /* extensions can't use* * GUC_LIST_QUOTE */ - NULL, NULL, NULL); + NULL, assign_neon_safekeepers, NULL); DefineCustomIntVariable( "neon.safekeeper_reconnect_timeout", @@ -215,6 +220,33 @@ nwp_register_gucs(void) NULL, NULL, NULL); } +/* + * GUC assign_hook for neon.safekeepers. Restarts walproposer through FATAL if + * the list changed. + */ +static void +assign_neon_safekeepers(const char *newval, void *extra) +{ + if (!am_walproposer) + return; + + if (!newval) { + /* should never happen */ + wpg_log(FATAL, "neon.safekeepers is empty"); + } + + /* + * TODO: restarting through FATAL is stupid and introduces 1s delay before + * next bgw start. We should refactor walproposer to allow graceful exit and + * thus remove this delay. + */ + if (strcmp(wal_acceptors_list, newval) != 0) + { + wpg_log(FATAL, "restarting walproposer to change safekeeper list from %s to %s", + wal_acceptors_list, newval); + } +} + /* Check if we need to suspend inserts because of lagging replication. */ static uint64 backpressure_lag_impl(void) @@ -363,7 +395,7 @@ walprop_register_bgworker(void) snprintf(bgw.bgw_function_name, BGW_MAXLEN, "WalProposerMain"); snprintf(bgw.bgw_name, BGW_MAXLEN, "WAL proposer"); snprintf(bgw.bgw_type, BGW_MAXLEN, "WAL proposer"); - bgw.bgw_restart_time = 5; + bgw.bgw_restart_time = 1; bgw.bgw_notify_pid = 0; bgw.bgw_main_arg = (Datum) 0; @@ -1639,6 +1671,18 @@ walprop_pg_wait_event_set(WalProposer *wp, long timeout, Safekeeper **sk, uint32 late_cv_trigger = ConditionVariableCancelSleep(); #endif + /* + * Process config if requested. This restarts walproposer if safekeepers + * list changed. Don't do that for sync-safekeepers because quite probably + * it (re-reading config) won't work without some effort, and + * sync-safekeepers should be quick to finish anyway. + */ + if (!wp->config->syncSafekeepers && ConfigReloadPending) + { + ConfigReloadPending = false; + ProcessConfigFile(PGC_SIGHUP); + } + /* * If wait is terminated by latch set (walsenders' latch is set on each * wal flush). (no need for pm death check due to WL_EXIT_ON_PM_DEATH) diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index 4d0f8006aaa4..c46539485c1f 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -323,7 +323,7 @@ impl ComputeHook { if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running { tracing::info!("Reconfiguring endpoint {}", endpoint_name,); endpoint - .reconfigure(compute_pageservers.clone(), *stripe_size) + .reconfigure(compute_pageservers.clone(), *stripe_size, None) .await .map_err(NotifyError::NeonLocal)?; } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 6bfe1afd1f2a..a3f83abd3eed 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1933,6 +1933,7 @@ def endpoint_reconfigure( endpoint_id: str, tenant_id: Optional[TenantId] = None, pageserver_id: Optional[int] = None, + safekeepers: Optional[List[int]] = None, check_return_code=True, ) -> "subprocess.CompletedProcess[str]": args = ["endpoint", "reconfigure", endpoint_id] @@ -1940,6 +1941,8 @@ def endpoint_reconfigure( args.extend(["--tenant-id", str(tenant_id)]) if pageserver_id is not None: args.extend(["--pageserver-id", str(pageserver_id)]) + if safekeepers is not None: + args.extend(["--safekeepers", (",".join(map(str, safekeepers)))]) return self.raw_cli(args, check_return_code=check_return_code) def endpoint_stop( @@ -3484,6 +3487,7 @@ def __init__( self.pg_port = pg_port self.http_port = http_port self.check_stop_result = check_stop_result + # passed to endpoint create and endpoint reconfigure self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf @@ -3552,6 +3556,7 @@ def start( self, remote_ext_config: Optional[str] = None, pageserver_id: Optional[int] = None, + safekeepers: Optional[List[int]] = None, allow_multiple: bool = False, ) -> "Endpoint": """ @@ -3561,6 +3566,11 @@ def start( assert self.endpoint_id is not None + # If `safekeepers` is not None, they are remember them as active and use + # in the following commands. + if safekeepers is not None: + self.active_safekeepers = safekeepers + log.info(f"Starting postgres endpoint {self.endpoint_id}") self.env.neon_cli.endpoint_start( @@ -3624,9 +3634,17 @@ def edit_hba(self, hba: List[str]): def is_running(self): return self._running._value > 0 - def reconfigure(self, pageserver_id: Optional[int] = None): + def reconfigure( + self, pageserver_id: Optional[int] = None, safekeepers: Optional[List[int]] = None + ): assert self.endpoint_id is not None - self.env.neon_cli.endpoint_reconfigure(self.endpoint_id, self.tenant_id, pageserver_id) + # If `safekeepers` is not None, they are remember them as active and use + # in the following commands. + if safekeepers is not None: + self.active_safekeepers = safekeepers + self.env.neon_cli.endpoint_reconfigure( + self.endpoint_id, self.tenant_id, pageserver_id, self.active_safekeepers + ) def respec(self, **kwargs): """Update the endpoint.json file used by control_plane.""" diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index ac1a3bef67bd..febfc102930a 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -1725,7 +1725,10 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): # Basic pull_timeline test. -def test_pull_timeline(neon_env_builder: NeonEnvBuilder): +# When live_sk_change is False, compute is restarted to change set of +# safekeepers; otherwise it is live reload. +@pytest.mark.parametrize("live_sk_change", [False, True]) +def test_pull_timeline(neon_env_builder: NeonEnvBuilder, live_sk_change: bool): neon_env_builder.auth_enabled = True def execute_payload(endpoint: Endpoint): @@ -1758,8 +1761,7 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i log.info("Use only first 3 safekeepers") env.safekeepers[3].stop() endpoint = env.endpoints.create("main") - endpoint.active_safekeepers = [1, 2, 3] - endpoint.start() + endpoint.start(safekeepers=[1, 2, 3]) execute_payload(endpoint) show_statuses(env.safekeepers, tenant_id, timeline_id) @@ -1771,29 +1773,22 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i log.info("Initialize new safekeeper 4, pull data from 1 & 3") env.safekeepers[3].start() - res = ( - env.safekeepers[3] - .http_client(auth_token=env.auth_keys.generate_safekeeper_token()) - .pull_timeline( - { - "tenant_id": str(tenant_id), - "timeline_id": str(timeline_id), - "http_hosts": [ - f"http://localhost:{env.safekeepers[0].port.http}", - f"http://localhost:{env.safekeepers[2].port.http}", - ], - } - ) + res = env.safekeepers[3].pull_timeline( + [env.safekeepers[0], env.safekeepers[2]], tenant_id, timeline_id ) log.info("Finished pulling timeline") log.info(res) show_statuses(env.safekeepers, tenant_id, timeline_id) - log.info("Restarting compute with new config to verify that it works") - endpoint.stop_and_destroy().create("main") - endpoint.active_safekeepers = [1, 3, 4] - endpoint.start() + action = "reconfiguing" if live_sk_change else "restarting" + log.info(f"{action} compute with new config to verify that it works") + new_sks = [1, 3, 4] + if not live_sk_change: + endpoint.stop_and_destroy().create("main") + endpoint.start(safekeepers=new_sks) + else: + endpoint.reconfigure(safekeepers=new_sks) execute_payload(endpoint) show_statuses(env.safekeepers, tenant_id, timeline_id) From 54a06de4b5ef7dc0ceadfe3cc553e164490e6ae4 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Thu, 27 Jun 2024 13:56:03 +0100 Subject: [PATCH 40/57] CI: Use `runner.arch` in cache keys along with `runner.os` (#8175) ## Problem The cache keys that we use on CI are the same for X64 and ARM64 (`runner.arch`) ## Summary of changes - Include `runner.arch` along with `runner.os` into cache keys --- .../actions/allure-report-generate/action.yml | 2 +- .../actions/run-python-test-set/action.yml | 6 ++--- .github/workflows/benchmarking.yml | 12 +++++----- .github/workflows/build_and_test.yml | 22 +++++++++---------- .github/workflows/pg_clients.yml | 4 ++-- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/actions/allure-report-generate/action.yml b/.github/actions/allure-report-generate/action.yml index f84beff20c52..11adc8df86ec 100644 --- a/.github/actions/allure-report-generate/action.yml +++ b/.github/actions/allure-report-generate/action.yml @@ -183,7 +183,7 @@ runs: uses: actions/cache@v4 with: path: ~/.cache/pypoetry/virtualenvs - key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} + key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }} - name: Store Allure test stat in the DB (new) if: ${{ !cancelled() && inputs.store-test-results-into-db == 'true' }} diff --git a/.github/actions/run-python-test-set/action.yml b/.github/actions/run-python-test-set/action.yml index d5c1fcf524de..c6ea52ba8812 100644 --- a/.github/actions/run-python-test-set/action.yml +++ b/.github/actions/run-python-test-set/action.yml @@ -56,14 +56,14 @@ runs: if: inputs.build_type != 'remote' uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact path: /tmp/neon - name: Download Neon binaries for the previous release if: inputs.build_type != 'remote' uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-${{ inputs.build_type }}-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build_type }}-artifact path: /tmp/neon-previous prefix: latest @@ -89,7 +89,7 @@ runs: uses: actions/cache@v4 with: path: ~/.cache/pypoetry/virtualenvs - key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} + key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }} - name: Install Python deps shell: bash -euxo pipefail {0} diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 9eff4836809d..db4209500ff5 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -77,7 +77,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest @@ -235,7 +235,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest @@ -373,7 +373,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest @@ -473,7 +473,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest @@ -576,7 +576,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest @@ -677,7 +677,7 @@ jobs: - name: Download Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-release-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact path: /tmp/neon/ prefix: latest diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 87f04996fd81..9cea9f41485d 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -109,7 +109,7 @@ jobs: uses: actions/cache@v4 with: path: ~/.cache/pypoetry/virtualenvs - key: v2-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} + key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }} - name: Install Python deps run: ./scripts/pysync @@ -149,7 +149,7 @@ jobs: # !~/.cargo/registry/src # ~/.cargo/git/ # target/ -# key: v1-${{ runner.os }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }} +# key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }} # Some of our rust modules use FFI and need those to be checked - name: Get postgres headers @@ -291,29 +291,29 @@ jobs: # target/ # # Fall back to older versions of the key, if no cache for current Cargo.lock was found # key: | -# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }} -# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}- +# v1-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }} +# v1-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}- - name: Cache postgres v14 build id: cache_pg_14 uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' @@ -411,7 +411,7 @@ jobs: - name: Upload Neon artifact uses: ./.github/actions/upload with: - name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact path: /tmp/neon # XXX: keep this after the binaries.list is formed, so the coverage can properly work later @@ -490,7 +490,7 @@ jobs: uses: actions/cache@v4 with: path: ~/.cache/pypoetry/virtualenvs - key: v1-${{ runner.os }}-python-deps-${{ hashFiles('poetry.lock') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }} - name: Install Python deps run: ./scripts/pysync @@ -639,7 +639,7 @@ jobs: - name: Get Neon artifact uses: ./.github/actions/download with: - name: neon-${{ runner.os }}-${{ matrix.build_type }}-artifact + name: neon-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build_type }}-artifact path: /tmp/neon - name: Get coverage artifact @@ -1340,7 +1340,7 @@ jobs: # Update Neon artifact for the release (reuse already uploaded artifact) for build_type in debug release; do OLD_PREFIX=artifacts/${GITHUB_RUN_ID} - FILENAME=neon-${{ runner.os }}-${build_type}-artifact.tar.zst + FILENAME=neon-${{ runner.os }}-${{ runner.arch }}-${build_type}-artifact.tar.zst S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${OLD_PREFIX} | jq -r '.Contents[]?.Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true) if [ -z "${S3_KEY}" ]; then diff --git a/.github/workflows/pg_clients.yml b/.github/workflows/pg_clients.yml index fef3aec754b2..dd09abddb848 100644 --- a/.github/workflows/pg_clients.yml +++ b/.github/workflows/pg_clients.yml @@ -41,7 +41,7 @@ jobs: uses: actions/cache@v4 with: path: ~/.cache/pypoetry/virtualenvs - key: v2-${{ runner.os }}-python-deps-ubunutu-latest-${{ hashFiles('poetry.lock') }} + key: v2-${{ runner.os }}-${{ runner.arch }}-python-deps-ubunutu-latest-${{ hashFiles('poetry.lock') }} - name: Install Python deps shell: bash -euxo pipefail {0} @@ -85,7 +85,7 @@ jobs: uses: actions/upload-artifact@v4 with: retention-days: 7 - name: python-test-pg_clients-${{ runner.os }}-stage-logs + name: python-test-pg_clients-${{ runner.os }}-${{ runner.arch }}-stage-logs path: ${{ env.TEST_OUTPUT }} - name: Post to a Slack channel From 89cf8df93bae771e92b65a510ce8ff33801437a6 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Thu, 27 Jun 2024 14:16:41 +0100 Subject: [PATCH 41/57] stocon: bump number of concurrent reconciles per operation (#8179) ## Problem Background node operations take a long time for loaded nodes. ## Summary of changes Increase number of concurrent reconciles an operation is allowed to spawn. This should make drain and fill operations faster and the new value is still well below the total limit of concurrent reconciles. --- storage_controller/src/background_node_operations.rs | 2 +- test_runner/regress/test_storage_controller.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage_controller/src/background_node_operations.rs b/storage_controller/src/background_node_operations.rs index 74b7e7c84955..6f1355eb6848 100644 --- a/storage_controller/src/background_node_operations.rs +++ b/storage_controller/src/background_node_operations.rs @@ -3,7 +3,7 @@ use std::{borrow::Cow, fmt::Debug, fmt::Display}; use tokio_util::sync::CancellationToken; use utils::id::NodeId; -pub(crate) const MAX_RECONCILES_PER_OPERATION: usize = 10; +pub(crate) const MAX_RECONCILES_PER_OPERATION: usize = 32; #[derive(Copy, Clone)] pub(crate) struct Drain { diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 9cc13ecfdbca..139a100872fb 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -1636,7 +1636,7 @@ def test_background_operation_cancellation(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_configs() env.start() - tenant_count = 5 + tenant_count = 10 shard_count_per_tenant = 8 tenant_ids = [] From 66b0bf41a1c9ac140a5af8ab61a94f66e821ae8d Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 27 Jun 2024 15:58:28 +0200 Subject: [PATCH 42/57] fix: shutdown does not kill walredo processes (#8150) While investigating Pageserver logs from the cases where systemd hangs during shutdown (https://github.com/neondatabase/cloud/issues/11387), I noticed that even if Pageserver shuts down cleanly[^1], there are lingering walredo processes. [^1]: Meaning, pageserver finishes its shutdown procedure and calls `exit(0)` on its own terms, instead of hitting the systemd unit's `TimeoutSec=` limit and getting SIGKILLed. While systemd should never lock up like it does, maybe we can avoid hitting that bug by cleaning up properly. Changes ------- This PR adds a shutdown method to `WalRedoManager` and hooks it up to tenant shutdown. We keep track of intent to shutdown through the new `enum ProcessOnceCell` stored inside the pre-existing `redo_process` field. A gate is added to keep track of running processes, using the new type `struct Process`. Future Work ----------- Requests that don't need the redo process will not observe the shutdown (see doc comment). Doing so would be nice for completeness sake, but doesn't provide much benefit because `Tenant` and `Timeline` already shut down all walredo users. Testing ------- I did manual testing to confirm that the problem exists before this PR and that it's gone after. Setup: * `neon_local` with a single tenant, create some data using `pgbench` * ensure walredo process is running, not pid * watch `strace -e kill,wait4 -f -p "$(pgrep pageserver)"` * `neon_local pageserver stop` With this PR, we always observe ``` $ strace -e kill,wait4 -f -p "$(pgrep pageserver)" ... [pid 591120] --- SIGTERM {si_signo=SIGTERM, si_code=SI_USER, si_pid=591215, si_uid=1000} --- [pid 591134] kill(591174, SIGKILL) = 0 [pid 591134] wait4(591174, [pid 591142] --- SIGCHLD {si_signo=SIGCHLD, si_code=CLD_KILLED, si_pid=591174, si_uid=1000, si_status=SIGKILL, si_utime=0, si_stime=0} --- [pid 591134] <... wait4 resumed>[{WIFSIGNALED(s) && WTERMSIG(s) == SIGKILL}], 0, NULL) = 591174 ... +++ exited with 0 +++ ``` Before this PR, we'd usually observe just ``` ... [pid 596239] --- SIGTERM {si_signo=SIGTERM, si_code=SI_USER, si_pid=596455, si_uid=1000} --- ... +++ exited with 0 +++ ``` Refs ---- refs https://github.com/neondatabase/cloud/issues/11387 --- pageserver/benches/bench_walredo.rs | 2 + pageserver/src/tenant.rs | 19 ++- pageserver/src/walredo.rs | 190 ++++++++++++++++++++++------ 3 files changed, 169 insertions(+), 42 deletions(-) diff --git a/pageserver/benches/bench_walredo.rs b/pageserver/benches/bench_walredo.rs index 5aab10e5d9c0..edc09d0bf22a 100644 --- a/pageserver/benches/bench_walredo.rs +++ b/pageserver/benches/bench_walredo.rs @@ -48,6 +48,7 @@ //! medium/128 time: [8.8311 ms 8.9849 ms 9.1263 ms] //! ``` +use anyhow::Context; use bytes::{Buf, Bytes}; use criterion::{BenchmarkId, Criterion}; use pageserver::{config::PageServerConf, walrecord::NeonWalRecord, walredo::PostgresRedoManager}; @@ -188,6 +189,7 @@ impl Request { manager .request_redo(*key, *lsn, base_img.clone(), records.clone(), *pg_version) .await + .context("request_redo") } fn pg_record(will_init: bool, bytes: &'static [u8]) -> NeonWalRecord { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 45e542a3367c..22d680486184 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -88,6 +88,7 @@ use crate::tenant::remote_timeline_client::MaybeDeletedIndexPart; use crate::tenant::remote_timeline_client::INITDB_PATH; use crate::tenant::storage_layer::DeltaLayer; use crate::tenant::storage_layer::ImageLayer; +use crate::walredo; use crate::InitializationOrder; use std::collections::hash_map::Entry; use std::collections::BTreeSet; @@ -323,6 +324,16 @@ impl From for WalRedoManager { } impl WalRedoManager { + pub(crate) async fn shutdown(&self) { + match self { + Self::Prod(mgr) => mgr.shutdown().await, + #[cfg(test)] + Self::Test(_) => { + // Not applicable to test redo manager + } + } + } + pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) { match self { Self::Prod(mgr) => mgr.maybe_quiesce(idle_timeout), @@ -343,7 +354,7 @@ impl WalRedoManager { base_img: Option<(Lsn, bytes::Bytes)>, records: Vec<(Lsn, crate::walrecord::NeonWalRecord)>, pg_version: u32, - ) -> anyhow::Result { + ) -> Result { match self { Self::Prod(mgr) => { mgr.request_redo(key, lsn, base_img, records, pg_version) @@ -1853,6 +1864,10 @@ impl Tenant { tracing::debug!("Waiting for tasks..."); task_mgr::shutdown_tasks(None, Some(self.tenant_shard_id), None).await; + if let Some(walredo_mgr) = self.walredo_mgr.as_ref() { + walredo_mgr.shutdown().await; + } + // Wait for any in-flight operations to complete self.gate.close().await; @@ -3854,7 +3869,7 @@ pub(crate) mod harness { base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, _pg_version: u32, - ) -> anyhow::Result { + ) -> Result { let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1)); if records_neon { // For Neon wal records, we can decode without spawning postgres, so do so. diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index d562540bde9b..5095beefd755 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -40,6 +40,7 @@ use std::time::Duration; use std::time::Instant; use tracing::*; use utils::lsn::Lsn; +use utils::sync::gate::GateError; use utils::sync::heavier_once_cell; /// @@ -53,10 +54,18 @@ pub struct PostgresRedoManager { tenant_shard_id: TenantShardId, conf: &'static PageServerConf, last_redo_at: std::sync::Mutex>, - /// The current [`process::WalRedoProcess`] that is used by new redo requests. - /// We use [`heavier_once_cell`] for coalescing the spawning, but the redo - /// requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the + /// We use [`heavier_once_cell`] for + /// + /// 1. coalescing the lazy spawning of walredo processes ([`ProcessOnceCell::Spawned`]) + /// 2. prevent new processes from being spawned on [`Self::shutdown`] (=> [`ProcessOnceCell::ManagerShutDown`]). + /// + /// # Spawning + /// + /// Redo requests use the once cell to coalesce onto one call to [`process::WalRedoProcess::launch`]. + /// + /// Notably, requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the /// their process object; we use [`Arc::clone`] for that. + /// /// This is primarily because earlier implementations that didn't use [`heavier_once_cell`] /// had that behavior; it's probably unnecessary. /// The only merit of it is that if one walredo process encounters an error, @@ -65,7 +74,63 @@ pub struct PostgresRedoManager { /// still be using the old redo process. But, those other tasks will most likely /// encounter an error as well, and errors are an unexpected condition anyway. /// So, probably we could get rid of the `Arc` in the future. - redo_process: heavier_once_cell::OnceCell>, + /// + /// # Shutdown + /// + /// See [`Self::launched_processes`]. + redo_process: heavier_once_cell::OnceCell, + + /// Gate that is entered when launching a walredo process and held open + /// until the process has been `kill()`ed and `wait()`ed upon. + /// + /// Manager shutdown waits for this gate to close after setting the + /// [`ProcessOnceCell::ManagerShutDown`] state in [`Self::redo_process`]. + /// + /// This type of usage is a bit unusual because gates usually keep track of + /// concurrent operations, e.g., every [`Self::request_redo`] that is inflight. + /// But we use it here to keep track of the _processes_ that we have launched, + /// which may outlive any individual redo request because + /// - we keep walredo process around until its quiesced to amortize spawn cost and + /// - the Arc may be held by multiple concurrent redo requests, so, just because + /// you replace the [`Self::redo_process`] cell's content doesn't mean the + /// process gets killed immediately. + /// + /// We could simplify this by getting rid of the [`Arc`]. + /// See the comment on [`Self::redo_process`] for more details. + launched_processes: utils::sync::gate::Gate, +} + +/// See [`PostgresRedoManager::redo_process`]. +enum ProcessOnceCell { + Spawned(Arc), + ManagerShutDown, +} + +struct Process { + _launched_processes_guard: utils::sync::gate::GateGuard, + process: process::WalRedoProcess, +} + +impl std::ops::Deref for Process { + type Target = process::WalRedoProcess; + + fn deref(&self) -> &Self::Target { + &self.process + } +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("cancelled")] + Cancelled, + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +macro_rules! bail { + ($($arg:tt)*) => { + return Err($crate::walredo::Error::Other(::anyhow::anyhow!($($arg)*))); + } } /// @@ -88,9 +153,9 @@ impl PostgresRedoManager { base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, pg_version: u32, - ) -> anyhow::Result { + ) -> Result { if records.is_empty() { - anyhow::bail!("invalid WAL redo request with no records"); + bail!("invalid WAL redo request with no records"); } let base_img_lsn = base_img.as_ref().map(|p| p.0).unwrap_or(Lsn::INVALID); @@ -148,10 +213,10 @@ impl PostgresRedoManager { chrono::Utc::now().checked_sub_signed(chrono::Duration::from_std(age).ok()?) }) }, - process: self - .redo_process - .get() - .map(|p| WalRedoManagerProcessStatus { pid: p.id() }), + process: self.redo_process.get().and_then(|p| match &*p { + ProcessOnceCell::Spawned(p) => Some(WalRedoManagerProcessStatus { pid: p.id() }), + ProcessOnceCell::ManagerShutDown => None, + }), } } } @@ -170,9 +235,39 @@ impl PostgresRedoManager { conf, last_redo_at: std::sync::Mutex::default(), redo_process: heavier_once_cell::OnceCell::default(), + launched_processes: utils::sync::gate::Gate::default(), } } + /// Shut down the WAL redo manager. + /// + /// After this future completes + /// - no redo process is running + /// - no new redo process will be spawned + /// - redo requests that need walredo process will fail with [`Error::Cancelled`] + /// - [`apply_neon`]-only redo requests may still work, but this may change in the future + /// + /// # Cancel-Safety + /// + /// This method is cancellation-safe. + pub async fn shutdown(&self) { + // prevent new processes from being spawned + let permit = match self.redo_process.get_or_init_detached().await { + Ok(guard) => { + let (proc, permit) = guard.take_and_deinit(); + drop(proc); // this just drops the Arc, its refcount may not be zero yet + permit + } + Err(permit) => permit, + }; + self.redo_process + .set(ProcessOnceCell::ManagerShutDown, permit); + // wait for ongoing requests to drain and the refcounts of all Arc that + // we ever launched to drop to zero, which when it happens synchronously kill()s & wait()s + // for the underlying process. + self.launched_processes.close().await; + } + /// This type doesn't have its own background task to check for idleness: we /// rely on our owner calling this function periodically in its own housekeeping /// loops. @@ -203,38 +298,48 @@ impl PostgresRedoManager { records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, pg_version: u32, - ) -> anyhow::Result { + ) -> Result { *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); let (rel, blknum) = key.to_rel_block().context("invalid record")?; const MAX_RETRY_ATTEMPTS: u32 = 1; let mut n_attempts = 0u32; loop { - let proc: Arc = - match self.redo_process.get_or_init_detached().await { - Ok(guard) => Arc::clone(&guard), - Err(permit) => { - // don't hold poison_guard, the launch code can bail - let start = Instant::now(); - let proc = Arc::new( - process::WalRedoProcess::launch( + let proc: Arc = match self.redo_process.get_or_init_detached().await { + Ok(guard) => match &*guard { + ProcessOnceCell::Spawned(proc) => Arc::clone(proc), + ProcessOnceCell::ManagerShutDown => { + return Err(Error::Cancelled); + } + }, + Err(permit) => { + let start = Instant::now(); + let proc = Arc::new(Process { + _launched_processes_guard: match self.launched_processes.enter() { + Ok(guard) => guard, + Err(GateError::GateClosed) => unreachable!( + "shutdown sets the once cell to `ManagerShutDown` state before closing the gate" + ), + }, + process: process::WalRedoProcess::launch( self.conf, self.tenant_shard_id, pg_version, ) .context("launch walredo process")?, - ); - let duration = start.elapsed(); - WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64()); - info!( - duration_ms = duration.as_millis(), - pid = proc.id(), - "launched walredo process" - ); - self.redo_process.set(Arc::clone(&proc), permit); - proc - } - }; + }); + let duration = start.elapsed(); + WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64()); + info!( + duration_ms = duration.as_millis(), + pid = proc.id(), + "launched walredo process" + ); + self.redo_process + .set(ProcessOnceCell::Spawned(Arc::clone(&proc)), permit); + proc + } + }; let started_at = std::time::Instant::now(); @@ -299,12 +404,17 @@ impl PostgresRedoManager { match self.redo_process.get() { None => (), Some(guard) => { - if Arc::ptr_eq(&proc, &*guard) { - // We're the first to observe an error from `proc`, it's our job to take it out of rotation. - guard.take_and_deinit(); - } else { - // Another task already spawned another redo process (further up in this method) - // and put it into `redo_process`. Do nothing, our view of the world is behind. + match &*guard { + ProcessOnceCell::ManagerShutDown => {} + ProcessOnceCell::Spawned(guard_proc) => { + if Arc::ptr_eq(&proc, guard_proc) { + // We're the first to observe an error from `proc`, it's our job to take it out of rotation. + guard.take_and_deinit(); + } else { + // Another task already spawned another redo process (further up in this method) + // and put it into `redo_process`. Do nothing, our view of the world is behind. + } + } } } } @@ -315,7 +425,7 @@ impl PostgresRedoManager { } n_attempts += 1; if n_attempts > MAX_RETRY_ATTEMPTS || result.is_ok() { - return result; + return result.map_err(Error::Other); } } } @@ -329,7 +439,7 @@ impl PostgresRedoManager { lsn: Lsn, base_img: Option, records: &[(Lsn, NeonWalRecord)], - ) -> anyhow::Result { + ) -> Result { let start_time = Instant::now(); let mut page = BytesMut::new(); @@ -338,7 +448,7 @@ impl PostgresRedoManager { page.extend_from_slice(&fpi[..]); } else { // All the current WAL record types that we can handle require a base image. - anyhow::bail!("invalid neon WAL redo request with no base image"); + bail!("invalid neon WAL redo request with no base image"); } // Apply all the WAL records in the batch From 23827c6b0d400cbb9a972d4d05d49834816c40d1 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 27 Jun 2024 12:03:48 -0400 Subject: [PATCH 43/57] feat(pageserver): add delta layer iterator (#8064) part of https://github.com/neondatabase/neon/issues/8002 ## Summary of changes Add delta layer iterator and tests. --------- Signed-off-by: Alex Chi Z --- .../src/tenant/storage_layer/delta_layer.rs | 197 ++++++++++++++++++ pageserver/src/tenant/timeline.rs | 8 +- 2 files changed, 201 insertions(+), 4 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index bf5d9249ebb5..c2d4a2776b1d 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -1492,6 +1492,24 @@ impl DeltaLayerInner { ); offset } + + #[cfg(test)] + pub(crate) fn iter<'a>(&'a self, ctx: &'a RequestContext) -> DeltaLayerIterator<'a> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let tree_reader = + DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, block_reader); + DeltaLayerIterator { + delta_layer: self, + ctx, + index_iter: tree_reader.iter(&[0; DELTA_KEY_SIZE], ctx), + key_values_batch: std::collections::VecDeque::new(), + is_end: false, + planner: crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner::new( + 1024 * 8192, // The default value. Unit tests might use a different value. 1024 * 8K = 8MB buffer. + 1024, // The default value. Unit tests might use a different value + ), + } + } } /// A set of data associated with a delta layer key and its value @@ -1551,6 +1569,70 @@ impl<'a> pageserver_compaction::interface::CompactionDeltaEntry<'a, Key> for Del } } +#[cfg(test)] +pub struct DeltaLayerIterator<'a> { + delta_layer: &'a DeltaLayerInner, + ctx: &'a RequestContext, + planner: crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner, + index_iter: crate::tenant::disk_btree::DiskBtreeIterator<'a>, + key_values_batch: std::collections::VecDeque<(Key, Lsn, Value)>, + is_end: bool, +} + +#[cfg(test)] +impl<'a> DeltaLayerIterator<'a> { + /// Retrieve a batch of key-value pairs into the iterator buffer. + async fn next_batch(&mut self) -> anyhow::Result<()> { + assert!(self.key_values_batch.is_empty()); + assert!(!self.is_end); + + let plan = loop { + if let Some(res) = self.index_iter.next().await { + let (raw_key, value) = res?; + let key = Key::from_slice(&raw_key[..KEY_SIZE]); + let lsn = DeltaKey::extract_lsn_from_buf(&raw_key); + let blob_ref = BlobRef(value); + let offset = blob_ref.pos(); + if let Some(batch_plan) = self.planner.handle(key, lsn, offset, BlobFlag::None) { + break batch_plan; + } + } else { + self.is_end = true; + let data_end_offset = self.delta_layer.index_start_offset(); + break self.planner.handle_range_end(data_end_offset); + } + }; + let vectored_blob_reader = VectoredBlobReader::new(&self.delta_layer.file); + let mut next_batch = std::collections::VecDeque::new(); + let buf_size = plan.size(); + let buf = BytesMut::with_capacity(buf_size); + let blobs_buf = vectored_blob_reader + .read_blobs(&plan, buf, self.ctx) + .await?; + let frozen_buf = blobs_buf.buf.freeze(); + for meta in blobs_buf.blobs.iter() { + let value = Value::des(&frozen_buf[meta.start..meta.end])?; + next_batch.push_back((meta.meta.key, meta.meta.lsn, value)); + } + self.key_values_batch = next_batch; + Ok(()) + } + + pub async fn next(&mut self) -> anyhow::Result> { + if self.key_values_batch.is_empty() { + if self.is_end { + return Ok(None); + } + self.next_batch().await?; + } + Ok(Some( + self.key_values_batch + .pop_front() + .expect("should not be empty"), + )) + } +} + #[cfg(test)] mod test { use std::collections::BTreeMap; @@ -1560,6 +1642,9 @@ mod test { use rand::RngCore; use super::*; + use crate::tenant::harness::TIMELINE_ID; + use crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner; + use crate::tenant::Tenant; use crate::{ context::DownloadBehavior, task_mgr::TaskKind, @@ -2126,4 +2211,116 @@ mod test { assert_eq!(utils::Hex(&scratch_left), utils::Hex(&scratch_right)); } } + + async fn produce_delta_layer( + tenant: &Tenant, + tline: &Arc, + mut deltas: Vec<(Key, Lsn, Value)>, + ctx: &RequestContext, + ) -> anyhow::Result { + deltas.sort_by(|(k1, l1, _), (k2, l2, _)| (k1, l1).cmp(&(k2, l2))); + let (key_start, _, _) = deltas.first().unwrap(); + let (key_max, _, _) = deltas.first().unwrap(); + let lsn_min = deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap(); + let lsn_max = deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap(); + let lsn_end = Lsn(lsn_max.0 + 1); + let mut writer = DeltaLayerWriter::new( + tenant.conf, + tline.timeline_id, + tenant.tenant_shard_id, + *key_start, + (*lsn_min)..lsn_end, + ctx, + ) + .await?; + let key_end = key_max.next(); + + for (key, lsn, value) in deltas { + writer.put_value(key, lsn, value, ctx).await?; + } + let delta_layer = writer.finish(key_end, tline, ctx).await?; + + Ok::<_, anyhow::Error>(delta_layer) + } + + async fn assert_delta_iter_equal( + delta_iter: &mut DeltaLayerIterator<'_>, + expect: &[(Key, Lsn, Value)], + ) { + let mut expect_iter = expect.iter(); + loop { + let o1 = delta_iter.next().await.unwrap(); + let o2 = expect_iter.next(); + assert_eq!(o1.is_some(), o2.is_some()); + if o1.is_none() && o2.is_none() { + break; + } + let (k1, l1, v1) = o1.unwrap(); + let (k2, l2, v2) = o2.unwrap(); + assert_eq!(&k1, k2); + assert_eq!(l1, *l2); + assert_eq!(&v1, v2); + } + } + + #[tokio::test] + async fn delta_layer_iterator() { + use crate::repository::Value; + use bytes::Bytes; + + let harness = TenantHarness::create("delta_layer_iterator").unwrap(); + let (tenant, ctx) = harness.load().await; + + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + fn get_key(id: u32) -> Key { + let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + const N: usize = 1000; + let test_deltas = (0..N) + .map(|idx| { + ( + get_key(idx as u32 / 10), + Lsn(0x10 * ((idx as u64) % 10 + 1)), + Value::Image(Bytes::from(format!("img{idx:05}"))), + ) + }) + .collect_vec(); + let resident_layer = produce_delta_layer(&tenant, &tline, test_deltas.clone(), &ctx) + .await + .unwrap(); + let delta_layer = resident_layer.get_as_delta(&ctx).await.unwrap(); + for max_read_size in [1, 1024] { + for batch_size in [1, 2, 4, 8, 3, 7, 13] { + println!("running with batch_size={batch_size} max_read_size={max_read_size}"); + // Test if the batch size is correctly determined + let mut iter = delta_layer.iter(&ctx); + iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size); + let mut num_items = 0; + for _ in 0..3 { + iter.next_batch().await.unwrap(); + num_items += iter.key_values_batch.len(); + if max_read_size == 1 { + // every key should be a batch b/c the value is larger than max_read_size + assert_eq!(iter.key_values_batch.len(), 1); + } else { + assert_eq!(iter.key_values_batch.len(), batch_size); + } + if num_items >= N { + break; + } + iter.key_values_batch.clear(); + } + // Test if the result is correct + let mut iter = delta_layer.iter(&ctx); + iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size); + assert_delta_iter_equal(&mut iter, &test_deltas).await; + } + } + } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 1175b750179d..8dd0a23f4637 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -5481,12 +5481,12 @@ impl Timeline { } images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb)); let min_key = *images.first().map(|(k, _)| k).unwrap(); - let max_key = images.last().map(|(k, _)| k).unwrap().next(); + let end_key = images.last().map(|(k, _)| k).unwrap().next(); let mut image_layer_writer = ImageLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, - &(min_key..max_key), + &(min_key..end_key), lsn, ctx, ) @@ -5518,7 +5518,7 @@ impl Timeline { let last_record_lsn = self.get_last_record_lsn(); deltas.sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb))); let min_key = *deltas.first().map(|(k, _, _)| k).unwrap(); - let max_key = deltas.last().map(|(k, _, _)| k).unwrap().next(); + let end_key = deltas.last().map(|(k, _, _)| k).unwrap().next(); let min_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap(); let max_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap(); assert!( @@ -5541,7 +5541,7 @@ impl Timeline { for (key, lsn, val) in deltas { delta_layer_writer.put_value(key, lsn, val, ctx).await?; } - let delta_layer = delta_layer_writer.finish(max_key, self, ctx).await?; + let delta_layer = delta_layer_writer.finish(end_key, self, ctx).await?; { let mut guard = self.layers.write().await; From 1d66ca79a9d50ae6423349c6c184c43c78a27113 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Thu, 27 Jun 2024 18:39:43 +0100 Subject: [PATCH 44/57] Improve slow operations observability in safekeepers (#8188) After https://github.com/neondatabase/neon/pull/8022 was deployed to staging, I noticed many cases of timeouts. After inspecting the logs, I realized that some operations are taking ~20 seconds and they're doing while holding shared state lock. Usually it happens right after redeploy, because compute reconnections put high load on disks. This commit tries to improve observability around slow operations. Non-observability changes: - `TimelineState::finish_change` now skips update if nothing has changed - `wal_residence_guard()` timeout is set to 30s --- libs/metrics/src/lib.rs | 7 +++--- safekeeper/src/metrics.rs | 32 +++++++++++++++++++++------- safekeeper/src/safekeeper.rs | 5 +++++ safekeeper/src/state.rs | 7 +++++- safekeeper/src/timeline.rs | 34 ++++++++++++++++++++---------- safekeeper/src/timeline_manager.rs | 6 +++++- safekeeper/src/wal_storage.rs | 16 +++++++++++++- 7 files changed, 82 insertions(+), 25 deletions(-) diff --git a/libs/metrics/src/lib.rs b/libs/metrics/src/lib.rs index 141d8a6d0198..0ff8ec8be3c0 100644 --- a/libs/metrics/src/lib.rs +++ b/libs/metrics/src/lib.rs @@ -103,9 +103,10 @@ static MAXRSS_KB: Lazy = Lazy::new(|| { .expect("Failed to register maxrss_kb int gauge") }); -pub const DISK_WRITE_SECONDS_BUCKETS: &[f64] = &[ - 0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5, -]; +/// Most common fsync latency is 50 µs - 100 µs, but it can be much higher, +/// especially during many concurrent disk operations. +pub const DISK_FSYNC_SECONDS_BUCKETS: &[f64] = + &[0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 30.0]; pub struct BuildInfo { pub revision: &'static str, diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index 1e965393e397..a484c45af8d7 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -5,15 +5,15 @@ use std::{ time::{Instant, SystemTime}, }; -use ::metrics::{register_histogram, GaugeVec, Histogram, IntGauge, DISK_WRITE_SECONDS_BUCKETS}; +use ::metrics::{register_histogram, GaugeVec, Histogram, IntGauge, DISK_FSYNC_SECONDS_BUCKETS}; use anyhow::Result; use futures::Future; use metrics::{ core::{AtomicU64, Collector, Desc, GenericCounter, GenericGaugeVec, Opts}, proto::MetricFamily, - register_int_counter, register_int_counter_pair, register_int_counter_pair_vec, - register_int_counter_vec, Gauge, IntCounter, IntCounterPair, IntCounterPairVec, IntCounterVec, - IntGaugeVec, + register_histogram_vec, register_int_counter, register_int_counter_pair, + register_int_counter_pair_vec, register_int_counter_vec, Gauge, HistogramVec, IntCounter, + IntCounterPair, IntCounterPairVec, IntCounterVec, IntGaugeVec, }; use once_cell::sync::Lazy; @@ -48,7 +48,7 @@ pub static WRITE_WAL_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_write_wal_seconds", "Seconds spent writing and syncing WAL to a disk in a single request", - DISK_WRITE_SECONDS_BUCKETS.to_vec() + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_write_wal_seconds histogram") }); @@ -56,7 +56,7 @@ pub static FLUSH_WAL_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_flush_wal_seconds", "Seconds spent syncing WAL to a disk", - DISK_WRITE_SECONDS_BUCKETS.to_vec() + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_flush_wal_seconds histogram") }); @@ -64,10 +64,26 @@ pub static PERSIST_CONTROL_FILE_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_persist_control_file_seconds", "Seconds to persist and sync control file", - DISK_WRITE_SECONDS_BUCKETS.to_vec() + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_persist_control_file_seconds histogram vec") }); +pub static WAL_STORAGE_OPERATION_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "safekeeper_wal_storage_operation_seconds", + "Seconds spent on WAL storage operations", + &["operation"] + ) + .expect("Failed to register safekeeper_wal_storage_operation_seconds histogram vec") +}); +pub static MISC_OPERATION_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "safekeeper_misc_operation_seconds", + "Seconds spent on miscellaneous operations", + &["operation"] + ) + .expect("Failed to register safekeeper_misc_operation_seconds histogram vec") +}); pub static PG_IO_BYTES: Lazy = Lazy::new(|| { register_int_counter_vec!( "safekeeper_pg_io_bytes_total", @@ -126,7 +142,7 @@ pub static BROKER_PUSH_ALL_UPDATES_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_broker_push_update_seconds", "Seconds to push all timeline updates to the broker", - DISK_WRITE_SECONDS_BUCKETS.to_vec() + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_broker_push_update_seconds histogram vec") }); diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 4d0992e8bda9..33ec39b852f4 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -15,6 +15,7 @@ use storage_broker::proto::SafekeeperTimelineInfo; use tracing::*; use crate::control_file; +use crate::metrics::MISC_OPERATION_SECONDS; use crate::send_wal::HotStandbyFeedback; use crate::state::TimelineState; @@ -696,6 +697,10 @@ where &mut self, msg: &ProposerElected, ) -> Result> { + let _timer = MISC_OPERATION_SECONDS + .with_label_values(&["handle_elected"]) + .start_timer(); + info!("received ProposerElected {:?}", msg); if self.state.acceptor_state.term < msg.term { let mut state = self.state.start_change(); diff --git a/safekeeper/src/state.rs b/safekeeper/src/state.rs index e0f7b65aef84..dca64140827f 100644 --- a/safekeeper/src/state.rs +++ b/safekeeper/src/state.rs @@ -189,7 +189,12 @@ where /// Persist given state. c.f. start_change. pub async fn finish_change(&mut self, s: &TimelinePersistentState) -> Result<()> { - self.pers.persist(s).await?; + if s.eq(&*self.pers) { + // nothing to do if state didn't change + } else { + self.pers.persist(s).await?; + } + // keep in memory values up to date self.inmem.commit_lsn = s.commit_lsn; self.inmem.backup_lsn = s.backup_lsn; diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index f632cd6fb3ec..6b83270c181b 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -39,7 +39,7 @@ use crate::wal_backup::{self}; use crate::wal_backup_partial::PartialRemoteSegment; use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION}; -use crate::metrics::{FullTimelineInfo, WalStorageMetrics}; +use crate::metrics::{FullTimelineInfo, WalStorageMetrics, MISC_OPERATION_SECONDS}; use crate::wal_storage::{Storage as wal_storage_iface, WalReader}; use crate::{debug_dump, timeline_manager, wal_storage}; use crate::{GlobalTimelines, SafeKeeperConf}; @@ -856,28 +856,40 @@ impl Timeline { } debug!("requesting WalResidentTimeline guard"); - - // Wait 5 seconds for the guard to be acquired, should be enough for uneviction. - // If it times out, most likely there is a deadlock in the manager task. - let res = tokio::time::timeout( - Duration::from_secs(5), + let started_at = Instant::now(); + let status_before = self.mgr_status.get(); + + // Wait 30 seconds for the guard to be acquired. It can time out if someone is + // holding the lock (e.g. during `SafeKeeper::process_msg()`) or manager task + // is stuck. + let res = tokio::time::timeout_at( + started_at + Duration::from_secs(30), self.manager_ctl.wal_residence_guard(), ) .await; let guard = match res { - Ok(Ok(guard)) => guard, + Ok(Ok(guard)) => { + let finished_at = Instant::now(); + let elapsed = finished_at - started_at; + MISC_OPERATION_SECONDS + .with_label_values(&["wal_residence_guard"]) + .observe(elapsed.as_secs_f64()); + + guard + } Ok(Err(e)) => { warn!( - "error while acquiring WalResidentTimeline guard (current state {:?}): {}", - self.mgr_status.get(), - e + "error while acquiring WalResidentTimeline guard, statuses {:?} => {:?}", + status_before, + self.mgr_status.get() ); return Err(e); } Err(_) => { warn!( - "timeout while acquiring WalResidentTimeline guard (current state {:?})", + "timeout while acquiring WalResidentTimeline guard, statuses {:?} => {:?}", + status_before, self.mgr_status.get() ); anyhow::bail!("timeout while acquiring WalResidentTimeline guard"); diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index c3abeac6449f..66c62ce19785 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -22,7 +22,7 @@ use utils::lsn::Lsn; use crate::{ control_file::{FileStorage, Storage}, - metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL}, + metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL, MISC_OPERATION_SECONDS}, recovery::recovery_main, remove_wal::calc_horizon_lsn, safekeeper::Term, @@ -357,6 +357,10 @@ impl Manager { /// Get a snapshot of the timeline state. async fn state_snapshot(&self) -> StateSnapshot { + let _timer = MISC_OPERATION_SECONDS + .with_label_values(&["state_snapshot"]) + .start_timer(); + StateSnapshot::new( self.tli.read_shared_state().await, self.conf.heartbeat_timeout, diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 74c4693ccd9b..ded8571a3e27 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -23,7 +23,9 @@ use tokio::io::{AsyncReadExt, AsyncSeekExt}; use tracing::*; use utils::crashsafe::durable_rename; -use crate::metrics::{time_io_closure, WalStorageMetrics, REMOVED_WAL_SEGMENTS}; +use crate::metrics::{ + time_io_closure, WalStorageMetrics, REMOVED_WAL_SEGMENTS, WAL_STORAGE_OPERATION_SECONDS, +}; use crate::state::TimelinePersistentState; use crate::wal_backup::{read_object, remote_timeline_path}; use crate::SafeKeeperConf; @@ -331,6 +333,10 @@ impl Storage for PhysicalStorage { } async fn initialize_first_segment(&mut self, init_lsn: Lsn) -> Result<()> { + let _timer = WAL_STORAGE_OPERATION_SECONDS + .with_label_values(&["initialize_first_segment"]) + .start_timer(); + let segno = init_lsn.segment_number(self.wal_seg_size); let (mut file, _) = self.open_or_create(segno).await?; let major_pg_version = self.pg_version / 10000; @@ -422,6 +428,10 @@ impl Storage for PhysicalStorage { /// Truncate written WAL by removing all WAL segments after the given LSN. /// end_pos must point to the end of the WAL record. async fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> { + let _timer = WAL_STORAGE_OPERATION_SECONDS + .with_label_values(&["truncate_wal"]) + .start_timer(); + // Streaming must not create a hole, so truncate cannot be called on non-written lsn if self.write_lsn != Lsn(0) && end_pos > self.write_lsn { bail!( @@ -497,6 +507,10 @@ async fn remove_segments_from_disk( wal_seg_size: usize, remove_predicate: impl Fn(XLogSegNo) -> bool, ) -> Result<()> { + let _timer = WAL_STORAGE_OPERATION_SECONDS + .with_label_values(&["remove_segments_from_disk"]) + .start_timer(); + let mut n_removed = 0; let mut min_removed = u64::MAX; let mut max_removed = u64::MIN; From 5700233a47ffc2fb040d862976873e273ae180a7 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 27 Jun 2024 10:27:56 -0500 Subject: [PATCH 45/57] Add application_name to compute activity monitor connection string This was missed in my previous attempt to mark every connection string with an application name. See 0c3e3a8667294a3dc345b0f03364aa359a5154de. --- compute_tools/src/monitor.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index 872a3f775070..d7127aac32d4 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -17,7 +17,11 @@ const MONITOR_CHECK_INTERVAL: Duration = Duration::from_millis(500); // should be handled gracefully. fn watch_compute_activity(compute: &ComputeNode) { // Suppose that `connstr` doesn't change - let connstr = compute.connstr.as_str(); + let mut connstr = compute.connstr.clone(); + connstr + .query_pairs_mut() + .append_pair("application_name", "compute_activity_monitor"); + let connstr = connstr.as_str(); // During startup and configuration we connect to every Postgres database, // but we don't want to count this as some user activity. So wait until From 063553a51b2b866c97fff6a818d2c35d8a9ee13c Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 28 Jun 2024 09:14:19 +0100 Subject: [PATCH 46/57] pageserver: remove tenant create API (#8135) ## Problem For some time, we have created tenants with calls to location_conf. The legacy "POST /v1/tenant" path was only used in some tests. ## Summary of changes - Remove the API - Relocate TenantCreateRequest to the controller API file (this used to be used in both pageserver and controller APIs) - Rewrite tenant_create test helper to use location_config API, as control plane and storage controller do - Update docker-compose test script to create tenants with location_config API (this small commit is also present in https://github.com/neondatabase/neon/pull/7947) --- control_plane/src/bin/neon_local.rs | 6 +- control_plane/src/pageserver.rs | 25 +----- control_plane/src/storage_controller.rs | 7 +- control_plane/storcon_cli/src/main.rs | 26 ++++--- libs/pageserver_api/src/controller_api.rs | 36 +++++++++ libs/pageserver_api/src/models.rs | 39 ---------- pageserver/client/src/mgmt_api.rs | 9 --- pageserver/src/http/routes.rs | 76 +------------------ pageserver/src/metrics.rs | 3 - pageserver/src/tenant.rs | 25 ++---- storage_controller/src/http.rs | 3 +- storage_controller/src/service.rs | 15 ++-- test_runner/fixtures/neon_fixtures.py | 14 +++- test_runner/fixtures/pageserver/http.py | 28 ------- test_runner/regress/test_pageserver_api.py | 6 +- .../regress/test_storage_controller.py | 2 +- 16 files changed, 91 insertions(+), 229 deletions(-) diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 2c05938f4410..4bf1b29785e8 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -21,10 +21,8 @@ use pageserver_api::config::{ DEFAULT_HTTP_LISTEN_PORT as DEFAULT_PAGESERVER_HTTP_PORT, DEFAULT_PG_LISTEN_PORT as DEFAULT_PAGESERVER_PG_PORT, }; -use pageserver_api::controller_api::PlacementPolicy; -use pageserver_api::models::{ - ShardParameters, TenantCreateRequest, TimelineCreateRequest, TimelineInfo, -}; +use pageserver_api::controller_api::{PlacementPolicy, TenantCreateRequest}; +use pageserver_api::models::{ShardParameters, TimelineCreateRequest, TimelineInfo}; use pageserver_api::shard::{ShardCount, ShardStripeSize, TenantShardId}; use postgres_backend::AuthType; use postgres_connection::parse_host_port; diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index da4b98784915..983f78577ce4 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -17,8 +17,7 @@ use anyhow::{bail, Context}; use camino::Utf8PathBuf; use futures::SinkExt; use pageserver_api::models::{ - self, AuxFilePolicy, LocationConfig, ShardParameters, TenantHistorySize, TenantInfo, - TimelineInfo, + self, AuxFilePolicy, LocationConfig, TenantHistorySize, TenantInfo, TimelineInfo, }; use pageserver_api::shard::TenantShardId; use pageserver_client::mgmt_api; @@ -397,28 +396,6 @@ impl PageServerNode { } } - pub async fn tenant_create( - &self, - new_tenant_id: TenantId, - generation: Option, - settings: HashMap<&str, &str>, - ) -> anyhow::Result { - let config = Self::parse_config(settings.clone())?; - - let request = models::TenantCreateRequest { - new_tenant_id: TenantShardId::unsharded(new_tenant_id), - generation, - config, - shard_parameters: ShardParameters::default(), - // Placement policy is not meaningful for creations not done via storage controller - placement_policy: None, - }; - if !settings.is_empty() { - bail!("Unrecognized tenant settings: {settings:?}") - } - Ok(self.http_client.tenant_create(&request).await?) - } - pub async fn tenant_config( &self, tenant_id: TenantId, diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 1c56d5f80fe4..5ca1b13b2a35 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -5,12 +5,11 @@ use crate::{ use camino::{Utf8Path, Utf8PathBuf}; use pageserver_api::{ controller_api::{ - NodeConfigureRequest, NodeRegisterRequest, TenantCreateResponse, TenantLocateResponse, - TenantShardMigrateRequest, TenantShardMigrateResponse, + NodeConfigureRequest, NodeRegisterRequest, TenantCreateRequest, TenantCreateResponse, + TenantLocateResponse, TenantShardMigrateRequest, TenantShardMigrateResponse, }, models::{ - TenantCreateRequest, TenantShardSplitRequest, TenantShardSplitResponse, - TimelineCreateRequest, TimelineInfo, + TenantShardSplitRequest, TenantShardSplitResponse, TimelineCreateRequest, TimelineInfo, }, shard::{ShardStripeSize, TenantShardId}, }; diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 775aedb60001..b2c5dfe58a7f 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -4,13 +4,13 @@ use std::{str::FromStr, time::Duration}; use clap::{Parser, Subcommand}; use pageserver_api::{ controller_api::{ - NodeAvailabilityWrapper, NodeDescribeResponse, ShardSchedulingPolicy, + NodeAvailabilityWrapper, NodeDescribeResponse, ShardSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest, }, models::{ EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary, - ShardParameters, TenantConfig, TenantConfigRequest, TenantCreateRequest, - TenantShardSplitRequest, TenantShardSplitResponse, + ShardParameters, TenantConfig, TenantConfigRequest, TenantShardSplitRequest, + TenantShardSplitResponse, }, shard::{ShardStripeSize, TenantShardId}, }; @@ -336,14 +336,18 @@ async fn main() -> anyhow::Result<()> { .await?; } Command::TenantCreate { tenant_id } => { - vps_client - .tenant_create(&TenantCreateRequest { - new_tenant_id: TenantShardId::unsharded(tenant_id), - generation: None, - shard_parameters: ShardParameters::default(), - placement_policy: Some(PlacementPolicy::Attached(1)), - config: TenantConfig::default(), - }) + storcon_client + .dispatch( + Method::POST, + "v1/tenant".to_string(), + Some(TenantCreateRequest { + new_tenant_id: TenantShardId::unsharded(tenant_id), + generation: None, + shard_parameters: ShardParameters::default(), + placement_policy: Some(PlacementPolicy::Attached(1)), + config: TenantConfig::default(), + }), + ) .await?; } Command::TenantDelete { tenant_id } => { diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index a0d10dc665dc..f05c1315eafa 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -11,6 +11,27 @@ use crate::{ shard::{ShardStripeSize, TenantShardId}, }; +#[derive(Serialize, Deserialize, Debug)] +#[serde(deny_unknown_fields)] +pub struct TenantCreateRequest { + pub new_tenant_id: TenantShardId, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub generation: Option, + + // If omitted, create a single shard with TenantShardId::unsharded() + #[serde(default)] + #[serde(skip_serializing_if = "ShardParameters::is_unsharded")] + pub shard_parameters: ShardParameters, + + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub placement_policy: Option, + + #[serde(flatten)] + pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it +} + #[derive(Serialize, Deserialize)] pub struct TenantCreateResponseShard { pub shard_id: TenantShardId, @@ -280,4 +301,19 @@ mod test { assert_eq!(serde_json::from_str::(&encoded)?, v); Ok(()) } + + #[test] + fn test_reject_unknown_field() { + let id = TenantId::generate(); + let create_request = serde_json::json!({ + "new_tenant_id": id.to_string(), + "unknown_field": "unknown_value".to_string(), + }); + let err = serde_json::from_value::(create_request).unwrap_err(); + assert!( + err.to_string().contains("unknown field `unknown_field`"), + "expect unknown field `unknown_field` error, got: {}", + err + ); + } } diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 4875f4949522..61a255cdbc80 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -25,7 +25,6 @@ use utils::{ serde_system_time, }; -use crate::controller_api::PlacementPolicy; use crate::{ reltag::RelTag, shard::{ShardCount, ShardStripeSize, TenantShardId}, @@ -271,28 +270,6 @@ impl Default for ShardParameters { } } -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields)] -pub struct TenantCreateRequest { - pub new_tenant_id: TenantShardId, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub generation: Option, - - // If omitted, create a single shard with TenantShardId::unsharded() - #[serde(default)] - #[serde(skip_serializing_if = "ShardParameters::is_unsharded")] - pub shard_parameters: ShardParameters, - - // This parameter is only meaningful in requests sent to the storage controller - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub placement_policy: Option, - - #[serde(flatten)] - pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it -} - /// An alternative representation of `pageserver::tenant::TenantConf` with /// simpler types. #[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)] @@ -547,10 +524,6 @@ pub struct LocationConfigListResponse { pub tenant_shards: Vec<(TenantShardId, Option)>, } -#[derive(Serialize, Deserialize)] -#[serde(transparent)] -pub struct TenantCreateResponse(pub TenantId); - #[derive(Serialize)] pub struct StatusResponse { pub id: NodeId, @@ -1507,18 +1480,6 @@ mod tests { #[test] fn test_reject_unknown_field() { - let id = TenantId::generate(); - let create_request = json!({ - "new_tenant_id": id.to_string(), - "unknown_field": "unknown_value".to_string(), - }); - let err = serde_json::from_value::(create_request).unwrap_err(); - assert!( - err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err - ); - let id = TenantId::generate(); let config_request = json!({ "tenant_id": id.to_string(), diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 69b86d9c466a..48b27775cb91 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -205,15 +205,6 @@ impl Client { Ok(()) } - pub async fn tenant_create(&self, req: &TenantCreateRequest) -> Result { - let uri = format!("{}/v1/tenant", self.mgmt_api_endpoint); - self.request(Method::POST, &uri, req) - .await? - .json() - .await - .map_err(Error::ReceiveBody) - } - /// The tenant deletion API can return 202 if deletion is incomplete, or /// 404 if it is complete. Callers are responsible for checking the status /// code and retrying. Error codes other than 404 will return Err(). diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 5ebd34a40690..1fda2eaa854e 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -53,7 +53,6 @@ use utils::http::request::{get_request_param, must_get_query_param, parse_query_ use crate::context::{DownloadBehavior, RequestContext}; use crate::deletion_queue::DeletionQueueClient; -use crate::metrics::{StorageTimeOperation, STORAGE_TIME_GLOBAL}; use crate::pgdatadir_mapping::LsnForTimestamp; use crate::task_mgr::TaskKind; use crate::tenant::config::{LocationConf, TenantConfOpt}; @@ -75,13 +74,12 @@ use crate::tenant::timeline::CompactFlags; use crate::tenant::timeline::CompactionError; use crate::tenant::timeline::Timeline; use crate::tenant::GetTimelineError; -use crate::tenant::SpawnMode; use crate::tenant::{LogicalSizeCalculationCause, PageReconstructError}; use crate::{config::PageServerConf, tenant::mgr}; use crate::{disk_usage_eviction_task, tenant}; use pageserver_api::models::{ - StatusResponse, TenantConfigRequest, TenantCreateRequest, TenantCreateResponse, TenantInfo, - TimelineCreateRequest, TimelineGcRequest, TimelineInfo, + StatusResponse, TenantConfigRequest, TenantInfo, TimelineCreateRequest, TimelineGcRequest, + TimelineInfo, }; use utils::{ auth::SwappableJwtAuth, @@ -1237,75 +1235,6 @@ pub fn html_response(status: StatusCode, data: String) -> Result, Ok(response) } -/// Helper for requests that may take a generation, which is mandatory -/// when control_plane_api is set, but otherwise defaults to Generation::none() -fn get_request_generation(state: &State, req_gen: Option) -> Result { - if state.conf.control_plane_api.is_some() { - req_gen - .map(Generation::new) - .ok_or(ApiError::BadRequest(anyhow!( - "generation attribute missing" - ))) - } else { - // Legacy mode: all tenants operate with no generation - Ok(Generation::none()) - } -} - -async fn tenant_create_handler( - mut request: Request, - _cancel: CancellationToken, -) -> Result, ApiError> { - let request_data: TenantCreateRequest = json_request(&mut request).await?; - let target_tenant_id = request_data.new_tenant_id; - check_permission(&request, None)?; - - let _timer = STORAGE_TIME_GLOBAL - .get_metric_with_label_values(&[StorageTimeOperation::CreateTenant.into()]) - .expect("bug") - .start_timer(); - - let tenant_conf = - TenantConfOpt::try_from(&request_data.config).map_err(ApiError::BadRequest)?; - - let state = get_state(&request); - - let generation = get_request_generation(state, request_data.generation)?; - - let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn); - - let location_conf = - LocationConf::attached_single(tenant_conf, generation, &request_data.shard_parameters); - - let new_tenant = state - .tenant_manager - .upsert_location( - target_tenant_id, - location_conf, - None, - SpawnMode::Create, - &ctx, - ) - .await?; - - let Some(new_tenant) = new_tenant else { - // This should never happen: indicates a bug in upsert_location - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "Upsert succeeded but didn't return tenant!" - ))); - }; - // We created the tenant. Existing API semantics are that the tenant - // is Active when this function returns. - new_tenant - .wait_to_become_active(ACTIVE_TENANT_TIMEOUT) - .await?; - - json_response( - StatusCode::CREATED, - TenantCreateResponse(new_tenant.tenant_shard_id().tenant_id), - ) -} - async fn get_tenant_config_handler( request: Request, _cancel: CancellationToken, @@ -2611,7 +2540,6 @@ pub fn make_router( api_handler(r, reload_auth_validation_keys_handler) }) .get("/v1/tenant", |r| api_handler(r, tenant_list_handler)) - .post("/v1/tenant", |r| api_handler(r, tenant_create_handler)) .get("/v1/tenant/:tenant_shard_id", |r| { api_handler(r, tenant_status) }) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index ca697afcf640..f5aca6dfb36c 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -53,9 +53,6 @@ pub(crate) enum StorageTimeOperation { #[strum(serialize = "find gc cutoffs")] FindGcCutoffs, - - #[strum(serialize = "create tenant")] - CreateTenant, } pub(crate) static STORAGE_TIME_SUM_PER_TIMELINE: Lazy = Lazy::new(|| { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 22d680486184..92d9c5b1432a 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -213,8 +213,6 @@ pub(crate) enum SpawnMode { Eager, /// Lazy activation in the background, with the option to skip the queue if the need comes up Lazy, - /// Tenant has been created during the lifetime of this process - Create, } /// @@ -808,9 +806,6 @@ impl Tenant { }; let preload = match &mode { - SpawnMode::Create => { - None - }, SpawnMode::Eager | SpawnMode::Lazy => { let _preload_timer = TENANT.preload.start_timer(); let res = tenant_clone @@ -832,11 +827,8 @@ impl Tenant { // We will time the duration of the attach phase unless this is a creation (attach will do no work) let attached = { - let _attach_timer = match mode { - SpawnMode::Create => None, - SpawnMode::Eager | SpawnMode::Lazy => Some(TENANT.attach.start_timer()), - }; - tenant_clone.attach(preload, mode, &ctx).await + let _attach_timer = Some(TENANT.attach.start_timer()); + tenant_clone.attach(preload, &ctx).await }; match attached { @@ -912,21 +904,14 @@ impl Tenant { async fn attach( self: &Arc, preload: Option, - mode: SpawnMode, ctx: &RequestContext, ) -> anyhow::Result<()> { span::debug_assert_current_span_has_tenant_id(); failpoint_support::sleep_millis_async!("before-attaching-tenant"); - let preload = match (preload, mode) { - (Some(p), _) => p, - (None, SpawnMode::Create) => TenantPreload { - timelines: HashMap::new(), - }, - (None, _) => { - anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624"); - } + let Some(preload) = preload else { + anyhow::bail!("local-only deployment is no longer supported, https://github.com/neondatabase/neon/issues/5624"); }; let mut timelines_to_resume_deletions = vec![]; @@ -3841,7 +3826,7 @@ pub(crate) mod harness { let preload = tenant .preload(&self.remote_storage, CancellationToken::new()) .await?; - tenant.attach(Some(preload), SpawnMode::Eager, ctx).await?; + tenant.attach(Some(preload), ctx).await?; tenant.state.send_replace(TenantState::Active); for timeline in tenant.timelines.lock().unwrap().values() { diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 680e6f09c4a0..7446ad53a231 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -10,8 +10,9 @@ use hyper::header::CONTENT_TYPE; use hyper::{Body, Request, Response}; use hyper::{StatusCode, Uri}; use metrics::{BuildInfo, NeonMetrics}; +use pageserver_api::controller_api::TenantCreateRequest; use pageserver_api::models::{ - TenantConfigRequest, TenantCreateRequest, TenantLocationConfigRequest, TenantShardSplitRequest, + TenantConfigRequest, TenantLocationConfigRequest, TenantShardSplitRequest, TenantTimeTravelRequest, TimelineCreateRequest, }; use pageserver_api::shard::TenantShardId; diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index a94575b4286d..bcc40c69a25d 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -32,10 +32,10 @@ use itertools::Itertools; use pageserver_api::{ controller_api::{ NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy, - ShardSchedulingPolicy, TenantCreateResponse, TenantCreateResponseShard, - TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, - TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse, - UtilizationScore, + ShardSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, + TenantCreateResponseShard, TenantDescribeResponse, TenantDescribeResponseShard, + TenantLocateResponse, TenantPolicyRequest, TenantShardMigrateRequest, + TenantShardMigrateResponse, UtilizationScore, }, models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest}, }; @@ -46,10 +46,9 @@ use crate::pageserver_client::PageserverClient; use pageserver_api::{ models::{ self, LocationConfig, LocationConfigListResponse, LocationConfigMode, - PageserverUtilization, ShardParameters, TenantConfig, TenantCreateRequest, - TenantLocationConfigRequest, TenantLocationConfigResponse, TenantShardLocation, - TenantShardSplitRequest, TenantShardSplitResponse, TenantTimeTravelRequest, - TimelineCreateRequest, TimelineInfo, + PageserverUtilization, ShardParameters, TenantConfig, TenantLocationConfigRequest, + TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest, + TenantShardSplitResponse, TenantTimeTravelRequest, TimelineCreateRequest, TimelineInfo, }, shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId}, upcall_api::{ diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index a3f83abd3eed..4911917bf452 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2741,7 +2741,19 @@ def tenant_create( if generation is None: generation = self.env.storage_controller.attach_hook_issue(tenant_id, self.id) client = self.http_client(auth_token=auth_token) - return client.tenant_create(tenant_id, conf, generation=generation) + + conf = conf or {} + + client.tenant_location_conf( + tenant_id, + { + "mode": "AttachedSingle", + "generation": generation, + "tenant_conf": conf, + "secondary_conf": None, + }, + ) + return tenant_id def list_layers( self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 794961271418..3da0be802116 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -220,34 +220,6 @@ def tenant_list(self) -> List[Dict[Any, Any]]: assert isinstance(res_json, list) return res_json - def tenant_create( - self, - new_tenant_id: Union[TenantId, TenantShardId], - conf: Optional[Dict[str, Any]] = None, - generation: Optional[int] = None, - ) -> TenantId: - if conf is not None: - assert "new_tenant_id" not in conf.keys() - - body: Dict[str, Any] = { - "new_tenant_id": str(new_tenant_id), - **(conf or {}), - } - - if generation is not None: - body.update({"generation": generation}) - - res = self.post( - f"http://localhost:{self.port}/v1/tenant", - json=body, - ) - self.verbose_error(res) - if res.status_code == 409: - raise Exception(f"could not create tenant: already exists for id {new_tenant_id}") - new_tenant_id = res.json() - assert isinstance(new_tenant_id, str) - return TenantId(new_tenant_id) - def tenant_attach( self, tenant_id: Union[TenantId, TenantShardId], diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index abbea59113f1..caeae7fd15c6 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -85,8 +85,10 @@ def check_client(env: NeonEnv, client: PageserverHttpClient): # create new tenant and check it is also there tenant_id = TenantId.generate() - client.tenant_create( - tenant_id, generation=env.storage_controller.attach_hook_issue(tenant_id, env.pageserver.id) + env.pageserver.tenant_create( + tenant_id, + generation=env.storage_controller.attach_hook_issue(tenant_id, env.pageserver.id), + auth_token=client.auth_token, ) assert tenant_id in {TenantId(t["id"]) for t in client.tenant_list()} diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 139a100872fb..1b294fb2d0aa 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -315,7 +315,7 @@ def test_storage_controller_onboarding(neon_env_builder: NeonEnvBuilder, warm_up # Create a tenant directly via pageserver HTTP API, skipping the storage controller tenant_id = TenantId.generate() generation = 123 - origin_ps.http_client().tenant_create(tenant_id, generation=generation) + origin_ps.tenant_create(tenant_id, generation=generation) # As if doing a live migration, first configure origin into stale mode r = origin_ps.http_client().tenant_location_conf( From deec3bc5789a3daa1bf6c41e1487549e0d1c7dc1 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Fri, 28 Jun 2024 11:20:37 +0200 Subject: [PATCH 47/57] virtual_file: take a `Slice` in the read APIs, eliminate `read_exact_at_n`, fix UB for engine `std-fs` (#8186) part of https://github.com/neondatabase/neon/issues/7418 I reviewed how the VirtualFile API's `read` methods look like and came to the conclusion that we've been using `IoBufMut` / `BoundedBufMut` / `Slice` wrong. This patch rectifies the situation. # Change 1: take `tokio_epoll_uring::Slice` in the read APIs Before, we took an `IoBufMut`, which is too low of a primitive and while it _seems_ convenient to be able to pass in a `Vec` without any fuzz, it's actually very unclear at the callsite that we're going to fill up that `Vec` up to its `capacity()`, because that's what `IoBuf::bytes_total()` returns and that's what `VirtualFile::read_exact_at` fills. By passing a `Slice` instead, a caller that "just wants to read into a `Vec`" is forced to be explicit about it, adding either `slice_full()` or `slice(x..y)`, and these methods panic if the read is outside of the bounds of the `Vec::capacity()`. Last, passing slices is more similar to what the `std::io` APIs look like. # Change 2: fix UB in `virtual_file_io_engine=std-fs` While reviewing call sites, I noticed that the `io_engine::IoEngine::read_at` method for `StdFs` mode has been constructing an `&mut[u8]` from raw parts that were uninitialized. We then used `std::fs::File::read_exact` to initialize that memory, but, IIUC we must not even be constructing an `&mut[u8]` where some of the memory isn't initialized. So, stop doing that and add a helper ext trait on `Slice` to do the zero-initialization. # Change 3: eliminate `read_exact_at_n` The `read_exact_at_n` doesn't make sense because the caller can just 1. `slice = buf.slice()` the exact memory it wants to fill 2. `slice = read_exact_at(slice)` 3. `buf = slice.into_inner()` Again, the `std::io` APIs specify the length of the read via the Rust slice length. We should do the same for the owned buffers IO APIs, i.e., via `Slice::bytes_total()`. # Change 4: simplify filling of `PageWriteGuard` The `PageWriteGuardBuf::init_up_to` was never necessary. Remove it. See changes to doc comment for more details. --- Reviewers should probably look at the added test case first, it illustrates my case a bit. --- pageserver/src/tenant/vectored_blob_io.rs | 6 +- pageserver/src/virtual_file.rs | 181 ++++++++---------- pageserver/src/virtual_file/io_engine.rs | 33 ++-- .../virtual_file/owned_buffers_io/slice.rs | 121 ++++++++++++ 4 files changed, 219 insertions(+), 122 deletions(-) create mode 100644 pageserver/src/virtual_file/owned_buffers_io/slice.rs diff --git a/pageserver/src/tenant/vectored_blob_io.rs b/pageserver/src/tenant/vectored_blob_io.rs index 1241a1390209..7ad8446e0411 100644 --- a/pageserver/src/tenant/vectored_blob_io.rs +++ b/pageserver/src/tenant/vectored_blob_io.rs @@ -20,6 +20,7 @@ use std::num::NonZeroUsize; use bytes::BytesMut; use pageserver_api::key::Key; +use tokio_epoll_uring::BoundedBuf; use utils::lsn::Lsn; use utils::vec_map::VecMap; @@ -316,8 +317,9 @@ impl<'a> VectoredBlobReader<'a> { ); let buf = self .file - .read_exact_at_n(buf, read.start, read.size(), ctx) - .await?; + .read_exact_at(buf.slice(0..read.size()), read.start, ctx) + .await? + .into_inner(); let blobs_at = read.blobs_at.as_slice(); let start_offset = blobs_at.first().expect("VectoredRead is never empty").0; diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index 04d9386fab92..51b0c420c346 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -13,7 +13,7 @@ use crate::context::RequestContext; use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC}; -use crate::page_cache::PageWriteGuard; +use crate::page_cache::{PageWriteGuard, PAGE_SZ}; use crate::tenant::TENANTS_SEGMENT_NAME; use camino::{Utf8Path, Utf8PathBuf}; use once_cell::sync::OnceCell; @@ -48,6 +48,7 @@ pub(crate) mod owned_buffers_io { //! but for the time being we're proving out the primitives in the neon.git repo //! for faster iteration. + pub(crate) mod slice; pub(crate) mod write; pub(crate) mod util { pub(crate) mod size_tracking_writer; @@ -143,16 +144,17 @@ struct SlotInner { /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`]. struct PageWriteGuardBuf { page: PageWriteGuard<'static>, - init_up_to: usize, } // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot, // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved. +// Page cache pages are zero-initialized, so, wrt uninitialized memory we're good. +// (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.) unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf { fn stable_ptr(&self) -> *const u8 { self.page.as_ptr() } fn bytes_init(&self) -> usize { - self.init_up_to + self.page.len() } fn bytes_total(&self) -> usize { self.page.len() @@ -166,8 +168,8 @@ unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf { } unsafe fn set_init(&mut self, pos: usize) { + // There shouldn't really be any reason to call this API since bytes_init() == bytes_total(). assert!(pos <= self.page.len()); - self.init_up_to = pos; } } @@ -585,37 +587,37 @@ impl VirtualFile { Ok(self.pos) } - pub async fn read_exact_at( + /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`. + /// + /// The returned `Slice` is equivalent to the input `slice`, i.e., it's the same view into the same buffer. + pub async fn read_exact_at( &self, - buf: B, + slice: Slice, offset: u64, ctx: &RequestContext, - ) -> Result + ) -> Result, Error> where - B: IoBufMut + Send, + Buf: IoBufMut + Send, { - let (buf, res) = read_exact_at_impl(buf, offset, None, |buf, offset| { - self.read_at(buf, offset, ctx) - }) - .await; - res.map(|()| buf) - } + let assert_we_return_original_bounds = if cfg!(debug_assertions) { + Some((slice.stable_ptr() as usize, slice.bytes_total())) + } else { + None + }; - pub async fn read_exact_at_n( - &self, - buf: B, - offset: u64, - count: usize, - ctx: &RequestContext, - ) -> Result - where - B: IoBufMut + Send, - { - let (buf, res) = read_exact_at_impl(buf, offset, Some(count), |buf, offset| { - self.read_at(buf, offset, ctx) - }) - .await; - res.map(|()| buf) + let original_bounds = slice.bounds(); + let (buf, res) = + read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await; + let res = res.map(|_| buf.slice(original_bounds)); + + if let Some(original_bounds) = assert_we_return_original_bounds { + if let Ok(slice) = &res { + let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total()); + assert_eq!(original_bounds, returned_bounds); + } + } + + res } /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`]. @@ -625,13 +627,11 @@ impl VirtualFile { offset: u64, ctx: &RequestContext, ) -> Result, Error> { - let buf = PageWriteGuardBuf { - page, - init_up_to: 0, - }; - let res = self.read_exact_at(buf, offset, ctx).await; - res.map(|PageWriteGuardBuf { page, .. }| page) - .map_err(|e| Error::new(ErrorKind::Other, e)) + let buf = PageWriteGuardBuf { page }.slice_full(); + debug_assert_eq!(buf.bytes_total(), PAGE_SZ); + self.read_exact_at(buf, offset, ctx) + .await + .map(|slice| slice.into_inner().page) } // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235 @@ -722,14 +722,14 @@ impl VirtualFile { (buf, Ok(n)) } - pub(crate) async fn read_at( + pub(crate) async fn read_at( &self, - buf: B, + buf: tokio_epoll_uring::Slice, offset: u64, _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */ - ) -> (B, Result) + ) -> (tokio_epoll_uring::Slice, Result) where - B: tokio_epoll_uring::BoundedBufMut + Send, + Buf: tokio_epoll_uring::IoBufMut + Send, { let file_guard = match self.lock_file().await { Ok(file_guard) => file_guard, @@ -781,26 +781,16 @@ impl VirtualFile { } // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135 -pub async fn read_exact_at_impl( - buf: B, +pub async fn read_exact_at_impl( + mut buf: tokio_epoll_uring::Slice, mut offset: u64, - count: Option, mut read_at: F, -) -> (B, std::io::Result<()>) +) -> (Buf, std::io::Result<()>) where - B: IoBufMut + Send, - F: FnMut(tokio_epoll_uring::Slice, u64) -> Fut, - Fut: std::future::Future, std::io::Result)>, + Buf: IoBufMut + Send, + F: FnMut(tokio_epoll_uring::Slice, u64) -> Fut, + Fut: std::future::Future, std::io::Result)>, { - let mut buf: tokio_epoll_uring::Slice = match count { - Some(count) => { - assert!(count <= buf.bytes_total()); - assert!(count > 0); - buf.slice(..count) // may include uninitialized memory - } - None => buf.slice_full(), // includes all the uninitialized memory - }; - while buf.bytes_total() != 0 { let res; (buf, res) = read_at(buf, offset).await; @@ -882,7 +872,7 @@ mod test_read_exact_at_impl { #[tokio::test] async fn test_basic() { - let buf = Vec::with_capacity(5); + let buf = Vec::with_capacity(5).slice_full(); let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt { expectations: VecDeque::from(vec![Expectation { offset: 0, @@ -890,7 +880,7 @@ mod test_read_exact_at_impl { result: Ok(vec![b'a', b'b', b'c', b'd', b'e']), }]), })); - let (buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| { + let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| { let mock_read_at = Arc::clone(&mock_read_at); async move { mock_read_at.lock().await.read_at(buf, offset).await } }) @@ -899,33 +889,13 @@ mod test_read_exact_at_impl { assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']); } - #[tokio::test] - async fn test_with_count() { - let buf = Vec::with_capacity(5); - let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt { - expectations: VecDeque::from(vec![Expectation { - offset: 0, - bytes_total: 3, - result: Ok(vec![b'a', b'b', b'c']), - }]), - })); - - let (buf, res) = read_exact_at_impl(buf, 0, Some(3), |buf, offset| { - let mock_read_at = Arc::clone(&mock_read_at); - async move { mock_read_at.lock().await.read_at(buf, offset).await } - }) - .await; - assert!(res.is_ok()); - assert_eq!(buf, vec![b'a', b'b', b'c']); - } - #[tokio::test] async fn test_empty_buf_issues_no_syscall() { - let buf = Vec::new(); + let buf = Vec::new().slice_full(); let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt { expectations: VecDeque::new(), })); - let (_buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| { + let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| { let mock_read_at = Arc::clone(&mock_read_at); async move { mock_read_at.lock().await.read_at(buf, offset).await } }) @@ -935,7 +905,7 @@ mod test_read_exact_at_impl { #[tokio::test] async fn test_two_read_at_calls_needed_until_buf_filled() { - let buf = Vec::with_capacity(4); + let buf = Vec::with_capacity(4).slice_full(); let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt { expectations: VecDeque::from(vec![ Expectation { @@ -950,7 +920,7 @@ mod test_read_exact_at_impl { }, ]), })); - let (buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| { + let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| { let mock_read_at = Arc::clone(&mock_read_at); async move { mock_read_at.lock().await.read_at(buf, offset).await } }) @@ -961,7 +931,7 @@ mod test_read_exact_at_impl { #[tokio::test] async fn test_eof_before_buffer_full() { - let buf = Vec::with_capacity(3); + let buf = Vec::with_capacity(3).slice_full(); let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt { expectations: VecDeque::from(vec![ Expectation { @@ -981,7 +951,7 @@ mod test_read_exact_at_impl { }, ]), })); - let (_buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| { + let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| { let mock_read_at = Arc::clone(&mock_read_at); async move { mock_read_at.lock().await.read_at(buf, offset).await } }) @@ -1051,27 +1021,29 @@ impl VirtualFile { ctx: &RequestContext, ) -> Result, std::io::Error> { use crate::page_cache::PAGE_SZ; - let buf = vec![0; PAGE_SZ]; - let buf = self - .read_exact_at(buf, blknum as u64 * (PAGE_SZ as u64), ctx) + let slice = Vec::with_capacity(PAGE_SZ).slice_full(); + assert_eq!(slice.bytes_total(), PAGE_SZ); + let slice = self + .read_exact_at(slice, blknum as u64 * (PAGE_SZ as u64), ctx) .await?; - Ok(crate::tenant::block_io::BlockLease::Vec(buf)) + Ok(crate::tenant::block_io::BlockLease::Vec(slice.into_inner())) } async fn read_to_end(&mut self, buf: &mut Vec, ctx: &RequestContext) -> Result<(), Error> { let mut tmp = vec![0; 128]; loop { - let res; - (tmp, res) = self.read_at(tmp, self.pos, ctx).await; + let slice = tmp.slice(..128); + let (slice, res) = self.read_at(slice, self.pos, ctx).await; match res { Ok(0) => return Ok(()), Ok(n) => { self.pos += n as u64; - buf.extend_from_slice(&tmp[..n]); + buf.extend_from_slice(&slice[..n]); } Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {} Err(e) => return Err(e), } + tmp = slice.into_inner(); } } } @@ -1185,6 +1157,7 @@ mod tests { use crate::task_mgr::TaskKind; use super::*; + use owned_buffers_io::slice::SliceExt; use rand::seq::SliceRandom; use rand::thread_rng; use rand::Rng; @@ -1206,13 +1179,16 @@ mod tests { impl MaybeVirtualFile { async fn read_exact_at( &self, - mut buf: Vec, + mut slice: tokio_epoll_uring::Slice>, offset: u64, ctx: &RequestContext, - ) -> Result, Error> { + ) -> Result>, Error> { match self { - MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset, ctx).await, - MaybeVirtualFile::File(file) => file.read_exact_at(&mut buf, offset).map(|()| buf), + MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(slice, offset, ctx).await, + MaybeVirtualFile::File(file) => { + let rust_slice: &mut [u8] = slice.as_mut_rust_slice_full_zeroed(); + file.read_exact_at(rust_slice, offset).map(|()| slice) + } } } async fn write_all_at, Buf: IoBuf + Send>( @@ -1286,9 +1262,12 @@ mod tests { len: usize, ctx: &RequestContext, ) -> Result { - let buf = vec![0; len]; - let buf = self.read_exact_at(buf, pos, ctx).await?; - Ok(String::from_utf8(buf).unwrap()) + let slice = Vec::with_capacity(len).slice_full(); + assert_eq!(slice.bytes_total(), len); + let slice = self.read_exact_at(slice, pos, ctx).await?; + let vec = slice.into_inner(); + assert_eq!(vec.len(), len); + Ok(String::from_utf8(vec).unwrap()) } } @@ -1507,7 +1486,11 @@ mod tests { let mut rng = rand::rngs::OsRng; for _ in 1..1000 { let f = &files[rng.gen_range(0..files.len())]; - buf = f.read_exact_at(buf, 0, &ctx).await.unwrap(); + buf = f + .read_exact_at(buf.slice_full(), 0, &ctx) + .await + .unwrap() + .into_inner(); assert!(buf == SAMPLE); } }); diff --git a/pageserver/src/virtual_file/io_engine.rs b/pageserver/src/virtual_file/io_engine.rs index 7a27be2ca108..2820cea097d1 100644 --- a/pageserver/src/virtual_file/io_engine.rs +++ b/pageserver/src/virtual_file/io_engine.rs @@ -107,7 +107,7 @@ use std::{ sync::atomic::{AtomicU8, Ordering}, }; -use super::{FileGuard, Metadata}; +use super::{owned_buffers_io::slice::SliceExt, FileGuard, Metadata}; #[cfg(target_os = "linux")] fn epoll_uring_error_to_std(e: tokio_epoll_uring::Error) -> std::io::Error { @@ -120,38 +120,29 @@ fn epoll_uring_error_to_std(e: tokio_epoll_uring::Error) -> std: } impl IoEngine { - pub(super) async fn read_at( + pub(super) async fn read_at( &self, file_guard: FileGuard, offset: u64, - mut buf: B, - ) -> ((FileGuard, B), std::io::Result) + mut slice: tokio_epoll_uring::Slice, + ) -> ( + (FileGuard, tokio_epoll_uring::Slice), + std::io::Result, + ) where - B: tokio_epoll_uring::BoundedBufMut + Send, + Buf: tokio_epoll_uring::IoBufMut + Send, { match self { IoEngine::NotSet => panic!("not initialized"), IoEngine::StdFs => { - // SAFETY: `dst` only lives at most as long as this match arm, during which buf remains valid memory. - let dst = unsafe { - std::slice::from_raw_parts_mut(buf.stable_mut_ptr(), buf.bytes_total()) - }; - let res = file_guard.with_std_file(|std_file| std_file.read_at(dst, offset)); - if let Ok(nbytes) = &res { - assert!(*nbytes <= buf.bytes_total()); - // SAFETY: see above assertion - unsafe { - buf.set_init(*nbytes); - } - } - #[allow(dropping_references)] - drop(dst); - ((file_guard, buf), res) + let rust_slice = slice.as_mut_rust_slice_full_zeroed(); + let res = file_guard.with_std_file(|std_file| std_file.read_at(rust_slice, offset)); + ((file_guard, slice), res) } #[cfg(target_os = "linux")] IoEngine::TokioEpollUring => { let system = tokio_epoll_uring_ext::thread_local_system().await; - let (resources, res) = system.read(file_guard, offset, buf).await; + let (resources, res) = system.read(file_guard, offset, slice).await; (resources, res.map_err(epoll_uring_error_to_std)) } } diff --git a/pageserver/src/virtual_file/owned_buffers_io/slice.rs b/pageserver/src/virtual_file/owned_buffers_io/slice.rs new file mode 100644 index 000000000000..d19e5ddffefb --- /dev/null +++ b/pageserver/src/virtual_file/owned_buffers_io/slice.rs @@ -0,0 +1,121 @@ +use tokio_epoll_uring::BoundedBuf; +use tokio_epoll_uring::BoundedBufMut; +use tokio_epoll_uring::IoBufMut; +use tokio_epoll_uring::Slice; + +pub(crate) trait SliceExt { + /// Get a `&mut[0..self.bytes_total()`] slice, for when you need to do borrow-based IO. + /// + /// See the test case `test_slice_full_zeroed` for the difference to just doing `&slice[..]` + fn as_mut_rust_slice_full_zeroed(&mut self) -> &mut [u8]; +} + +impl SliceExt for Slice +where + B: IoBufMut, +{ + #[inline(always)] + fn as_mut_rust_slice_full_zeroed(&mut self) -> &mut [u8] { + // zero-initialize the uninitialized parts of the buffer so we can create a Rust slice + // + // SAFETY: we own `slice`, don't write outside the bounds + unsafe { + let to_init = self.bytes_total() - self.bytes_init(); + self.stable_mut_ptr() + .add(self.bytes_init()) + .write_bytes(0, to_init); + self.set_init(self.bytes_total()); + }; + let bytes_total = self.bytes_total(); + &mut self[0..bytes_total] + } +} + +#[cfg(test)] +mod tests { + use std::io::Read; + + use super::*; + use bytes::Buf; + use tokio_epoll_uring::Slice; + + #[test] + fn test_slice_full_zeroed() { + let make_fake_file = || bytes::BytesMut::from(&b"12345"[..]).reader(); + + // before we start the test, let's make sure we have a shared understanding of what slice_full does + { + let buf = Vec::with_capacity(3); + let slice: Slice<_> = buf.slice_full(); + assert_eq!(slice.bytes_init(), 0); + assert_eq!(slice.bytes_total(), 3); + let rust_slice = &slice[..]; + assert_eq!( + rust_slice.len(), + 0, + "Slice only derefs to a &[u8] of the initialized part" + ); + } + + // and also let's establish a shared understanding of .slice() + { + let buf = Vec::with_capacity(3); + let slice: Slice<_> = buf.slice(0..2); + assert_eq!(slice.bytes_init(), 0); + assert_eq!(slice.bytes_total(), 2); + let rust_slice = &slice[..]; + assert_eq!( + rust_slice.len(), + 0, + "Slice only derefs to a &[u8] of the initialized part" + ); + } + + // the above leads to the easy mistake of using slice[..] for borrow-based IO like so: + { + let buf = Vec::with_capacity(3); + let mut slice: Slice<_> = buf.slice_full(); + assert_eq!(slice[..].len(), 0); + let mut file = make_fake_file(); + file.read_exact(&mut slice[..]).unwrap(); // one might think this reads 3 bytes but it reads 0 + assert_eq!(&slice[..] as &[u8], &[][..] as &[u8]); + } + + // With owned buffers IO like with VirtualFilem, you could totally + // pass in a `Slice` with bytes_init()=0 but bytes_total()=5 + // and it will read 5 bytes into the slice, and return a slice that has bytes_init()=5. + { + // TODO: demo + } + + // + // Ok, now that we have a shared understanding let's demo how to use the extension trait. + // + + // slice_full() + { + let buf = Vec::with_capacity(3); + let mut slice: Slice<_> = buf.slice_full(); + let rust_slice = slice.as_mut_rust_slice_full_zeroed(); + assert_eq!(rust_slice.len(), 3); + assert_eq!(rust_slice, &[0, 0, 0]); + let mut file = make_fake_file(); + file.read_exact(rust_slice).unwrap(); + assert_eq!(rust_slice, b"123"); + assert_eq!(&slice[..], b"123"); + } + + // .slice(..) + { + let buf = Vec::with_capacity(3); + let mut slice: Slice<_> = buf.slice(0..2); + let rust_slice = slice.as_mut_rust_slice_full_zeroed(); + assert_eq!(rust_slice.len(), 2); + assert_eq!(rust_slice, &[0, 0]); + let mut file = make_fake_file(); + file.read_exact(rust_slice).unwrap(); + assert_eq!(rust_slice, b"12"); + assert_eq!(&slice[..], b"12"); + } + } +} From c22c6a6c9ece6c1067d75402f6161c6758289484 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Fri, 28 Jun 2024 11:09:11 +0100 Subject: [PATCH 48/57] Add buckets to safekeeper ops metrics (#8194) In #8188 I forgot to specify buckets for new operations metrics. This commit fixes that. --- safekeeper/src/metrics.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index a484c45af8d7..539ecf826bf8 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -72,7 +72,8 @@ pub static WAL_STORAGE_OPERATION_SECONDS: Lazy = Lazy::new(|| { register_histogram_vec!( "safekeeper_wal_storage_operation_seconds", "Seconds spent on WAL storage operations", - &["operation"] + &["operation"], + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_wal_storage_operation_seconds histogram vec") }); @@ -80,7 +81,8 @@ pub static MISC_OPERATION_SECONDS: Lazy = Lazy::new(|| { register_histogram_vec!( "safekeeper_misc_operation_seconds", "Seconds spent on miscellaneous operations", - &["operation"] + &["operation"], + DISK_FSYNC_SECONDS_BUCKETS.to_vec() ) .expect("Failed to register safekeeper_misc_operation_seconds histogram vec") }); From ca2f7d06b230525df62864aa0cc9ebc8ee67aeaf Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 28 Jun 2024 16:47:05 +0300 Subject: [PATCH 49/57] Cherry-pick upstream fix for TruncateMultiXact assertion (#8195) We hit that bug in a new test being added in PR #6528. We'd get the fix from upstream with the next minor release anyway, but cherry-pick it now to unblock PR #6528. Upstream commit b1ffe3ff0b. See https://github.com/neondatabase/neon/pull/6528#issuecomment-2167367910 --- vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 7845c122d51d..223dd925959f 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 7845c122d51d3ebb547a984a640ac0310a2fadce +Subproject commit 223dd925959f8124711dd3d867dc8ba6629d52c0 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index 2ff5ecc67c64..f54d7373eb0d 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit 2ff5ecc67c64e5fe44b7dde598e64e4538e0c373 +Subproject commit f54d7373eb0de5a54bce2becdb1c801026c7edff diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index d55e0aca104a..e06bebc75306 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit d55e0aca104af0b611cf5565f1033b2acd2dcc1c +Subproject commit e06bebc75306b583e758b52c95946d41109239b2 diff --git a/vendor/revisions.json b/vendor/revisions.json index e755cf2e9dfa..574e3719340e 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "d55e0aca104af0b611cf5565f1033b2acd2dcc1c"], - "v15": ["15.7", "2ff5ecc67c64e5fe44b7dde598e64e4538e0c373"], - "v14": ["14.12", "7845c122d51d3ebb547a984a640ac0310a2fadce"] + "v16": ["16.3", "e06bebc75306b583e758b52c95946d41109239b2"], + "v15": ["15.7", "f54d7373eb0de5a54bce2becdb1c801026c7edff"], + "v14": ["14.12", "223dd925959f8124711dd3d867dc8ba6629d52c0"] } From babbe125dabdd528843d78c97874833ae67c314e Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 28 Jun 2024 18:05:09 +0100 Subject: [PATCH 50/57] pageserver: drop out of secondary download if iteration time has passed (#8198) ## Problem Very long running downloads can be wasteful, because the heatmap they're using is outdated after a few minutes. Closes: https://github.com/neondatabase/neon/issues/8182 ## Summary of changes - Impose a deadline on timeline downloads, using the same period as we use for scheduling, and returning an UpdateError::Restart when it is reached. This restart will involve waiting for a scheduling interval, but that's a good thing: it helps let other tenants proceed. - Refactor download_timeline so that the part where we update the state for local layers is done even if we fall out of the layer download loop with an error: this is important, especially for big tenants, because only layers in the SecondaryDetail state will be considered for eviction. --- pageserver/src/tenant/secondary/downloader.rs | 128 ++++++++++++++---- 1 file changed, 98 insertions(+), 30 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 24176ecf1956..f6f30641dbbb 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -262,6 +262,7 @@ impl scheduler::RunningJob for RunningDownload { struct CompleteDownload { secondary_state: Arc, completed_at: Instant, + result: Result<(), UpdateError>, } impl scheduler::Completion for CompleteDownload { @@ -286,21 +287,33 @@ impl JobGenerator { + // Start downloading again as soon as we can. This will involve waiting for the scheduler's + // scheduling interval. This slightly reduces the peak download speed of tenants that hit their + // deadline and keep restarting, but that also helps give other tenants a chance to execute rather + // that letting one big tenant dominate for a long time. + detail.next_download = Some(Instant::now()); + } + _ => { + let period = detail + .last_download + .as_ref() + .map(|d| d.upload_period) + .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL); + + // We advance next_download irrespective of errors: we don't want error cases to result in + // expensive busy-polling. + detail.next_download = Some(Instant::now() + period_jitter(period, 5)); + } + } } async fn schedule(&mut self) -> SchedulingResult { @@ -396,9 +409,10 @@ impl JobGenerator { tracing::info!("No heatmap found for tenant. This is fine if it is new."); @@ -415,6 +429,9 @@ impl JobGenerator { tracing::error!("Error while downloading tenant: {e}"); }, + Err(UpdateError::Restart) => { + tracing::info!("Download reached deadline & will restart to update heatmap") + } Ok(()) => {} }; @@ -436,6 +453,7 @@ impl JobGenerator { /// Errors that may be encountered while updating a tenant #[derive(thiserror::Error, Debug)] enum UpdateError { + /// This is not a true failure, but it's how a download indicates that it would like to be restarted by + /// the scheduler, to pick up the latest heatmap + #[error("Reached deadline, restarting downloads")] + Restart, + #[error("No remote data found")] NoData, #[error("Insufficient local storage space")] @@ -603,6 +626,26 @@ impl<'a> TenantDownloader<'a> { self.prepare_timelines(&heatmap, heatmap_mtime).await?; } + // Calculate a deadline for downloads: if downloading takes longer than this, it is useful to drop out and start again, + // so that we are always using reasonably a fresh heatmap. Otherwise, if we had really huge content to download, we might + // spend 10s of minutes downloading layers we don't need. + // (see https://github.com/neondatabase/neon/issues/8182) + let deadline = { + let period = self + .secondary_state + .detail + .lock() + .unwrap() + .last_download + .as_ref() + .map(|d| d.upload_period) + .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL); + + // Use double the period: we are not promising to complete within the period, this is just a heuristic + // to keep using a "reasonably fresh" heatmap. + Instant::now() + period * 2 + }; + // Download the layers in the heatmap for timeline in heatmap.timelines { let timeline_state = timeline_states @@ -618,7 +661,7 @@ impl<'a> TenantDownloader<'a> { } let timeline_id = timeline.timeline_id; - self.download_timeline(timeline, timeline_state, ctx) + self.download_timeline(timeline, timeline_state, deadline, ctx) .instrument(tracing::info_span!( "secondary_download_timeline", tenant_id=%tenant_shard_id.tenant_id, @@ -827,26 +870,28 @@ impl<'a> TenantDownloader<'a> { .and_then(|x| x) } - async fn download_timeline( + /// Download heatmap layers that are not present on local disk, or update their + /// access time if they are already present. + async fn download_timeline_layers( &self, + tenant_shard_id: &TenantShardId, timeline: HeatMapTimeline, timeline_state: SecondaryDetailTimeline, + deadline: Instant, ctx: &RequestContext, - ) -> Result<(), UpdateError> { - debug_assert_current_span_has_tenant_and_timeline_id(); - let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); - + ) -> (Result<(), UpdateError>, Vec) { // Accumulate updates to the state let mut touched = Vec::new(); - tracing::debug!(timeline_id=%timeline.timeline_id, "Downloading layers, {} in heatmap", timeline.layers.len()); - - // Download heatmap layers that are not present on local disk, or update their - // access time if they are already present. for layer in timeline.layers { if self.secondary_state.cancel.is_cancelled() { tracing::debug!("Cancelled -- dropping out of layer loop"); - return Err(UpdateError::Cancelled); + return (Err(UpdateError::Cancelled), touched); + } + + if Instant::now() > deadline { + // We've been running downloads for a while, restart to download latest heatmap. + return (Err(UpdateError::Restart), touched); } // Existing on-disk layers: just update their access time. @@ -916,20 +961,43 @@ impl<'a> TenantDownloader<'a> { match self .download_layer(tenant_shard_id, &timeline.timeline_id, layer, ctx) - .await? + .await { - Some(layer) => touched.push(layer), - None => { + Ok(Some(layer)) => touched.push(layer), + Ok(None) => { // Not an error but we didn't download it: remote layer is missing. Don't add it to the list of // things to consider touched. } + Err(e) => { + return (Err(e), touched); + } } } - // Write updates to state to record layers we just downloaded or touched. + (Ok(()), touched) + } + + async fn download_timeline( + &self, + timeline: HeatMapTimeline, + timeline_state: SecondaryDetailTimeline, + deadline: Instant, + ctx: &RequestContext, + ) -> Result<(), UpdateError> { + debug_assert_current_span_has_tenant_and_timeline_id(); + let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); + let timeline_id = timeline.timeline_id; + + tracing::debug!(timeline_id=%timeline_id, "Downloading layers, {} in heatmap", timeline.layers.len()); + + let (result, touched) = self + .download_timeline_layers(tenant_shard_id, timeline, timeline_state, deadline, ctx) + .await; + + // Write updates to state to record layers we just downloaded or touched, irrespective of whether the overall result was successful { let mut detail = self.secondary_state.detail.lock().unwrap(); - let timeline_detail = detail.timelines.entry(timeline.timeline_id).or_default(); + let timeline_detail = detail.timelines.entry(timeline_id).or_default(); tracing::info!("Wrote timeline_detail for {} touched layers", touched.len()); @@ -943,14 +1011,14 @@ impl<'a> TenantDownloader<'a> { let local_path = local_layer_path( self.conf, tenant_shard_id, - &timeline.timeline_id, + &timeline_id, &t.name, &t.metadata.generation, ); e.insert(OnDiskState::new( self.conf, tenant_shard_id, - &timeline.timeline_id, + &timeline_id, t.name, t.metadata.clone(), t.access_time, @@ -961,7 +1029,7 @@ impl<'a> TenantDownloader<'a> { } } - Ok(()) + result } /// Call this during timeline download if a layer will _not_ be downloaded, to update progress statistics From e1a06b40b7690e4d622b4588d946eacd5b601ce2 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Fri, 28 Jun 2024 18:16:21 +0100 Subject: [PATCH 51/57] Add rate limiter for partial uploads (#8203) Too many concurrect partial uploads can hurt disk performance, this commit adds a limiter. Context: https://neondb.slack.com/archives/C04KGFVUWUQ/p1719489018814669?thread_ts=1719440183.134739&cid=C04KGFVUWUQ --- safekeeper/src/bin/safekeeper.rs | 7 +++- safekeeper/src/lib.rs | 3 ++ safekeeper/src/timeline.rs | 7 ++-- safekeeper/src/timeline_manager.rs | 16 +++++++-- safekeeper/src/timelines_global_map.rs | 34 +++++++++++++----- safekeeper/src/wal_backup_partial.rs | 35 ++++++++++++++++++- .../tests/walproposer_sim/safekeeper.rs | 1 + 7 files changed, 89 insertions(+), 14 deletions(-) diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 20650490b1ae..c81373c77c7d 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -29,7 +29,8 @@ use utils::pid_file; use metrics::set_build_info_metric; use safekeeper::defaults::{ DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, - DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, + DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_CONCURRENCY, + DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, }; use safekeeper::http; use safekeeper::wal_service; @@ -191,6 +192,9 @@ struct Args { /// Pending updates to control file will be automatically saved after this interval. #[arg(long, value_parser = humantime::parse_duration, default_value = DEFAULT_CONTROL_FILE_SAVE_INTERVAL)] control_file_save_interval: Duration, + /// Number of allowed concurrent uploads of partial segments to remote storage. + #[arg(long, default_value = DEFAULT_PARTIAL_BACKUP_CONCURRENCY)] + partial_backup_concurrency: usize, } // Like PathBufValueParser, but allows empty string. @@ -344,6 +348,7 @@ async fn main() -> anyhow::Result<()> { enable_offload: args.enable_offload, delete_offloaded_wal: args.delete_offloaded_wal, control_file_save_interval: args.control_file_save_interval, + partial_backup_concurrency: args.partial_backup_concurrency, }; // initialize sentry if SENTRY_DSN is provided diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 067e425570e7..5cd676d8570c 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -52,6 +52,7 @@ pub mod defaults { pub const DEFAULT_MAX_OFFLOADER_LAG_BYTES: u64 = 128 * (1 << 20); pub const DEFAULT_PARTIAL_BACKUP_TIMEOUT: &str = "15m"; pub const DEFAULT_CONTROL_FILE_SAVE_INTERVAL: &str = "300s"; + pub const DEFAULT_PARTIAL_BACKUP_CONCURRENCY: &str = "5"; } #[derive(Debug, Clone)] @@ -91,6 +92,7 @@ pub struct SafeKeeperConf { pub enable_offload: bool, pub delete_offloaded_wal: bool, pub control_file_save_interval: Duration, + pub partial_backup_concurrency: usize, } impl SafeKeeperConf { @@ -133,6 +135,7 @@ impl SafeKeeperConf { enable_offload: false, delete_offloaded_wal: false, control_file_save_interval: Duration::from_secs(1), + partial_backup_concurrency: 1, } } } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 6b83270c181b..132e5ec32f4f 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -36,7 +36,7 @@ use crate::timeline_guard::ResidenceGuard; use crate::timeline_manager::{AtomicStatus, ManagerCtl}; use crate::timelines_set::TimelinesSet; use crate::wal_backup::{self}; -use crate::wal_backup_partial::PartialRemoteSegment; +use crate::wal_backup_partial::{PartialRemoteSegment, RateLimiter}; use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION}; use crate::metrics::{FullTimelineInfo, WalStorageMetrics, MISC_OPERATION_SECONDS}; @@ -587,6 +587,7 @@ impl Timeline { shared_state: &mut WriteGuardSharedState<'_>, conf: &SafeKeeperConf, broker_active_set: Arc, + partial_backup_rate_limiter: RateLimiter, ) -> Result<()> { match fs::metadata(&self.timeline_dir).await { Ok(_) => { @@ -617,7 +618,7 @@ impl Timeline { return Err(e); } - self.bootstrap(conf, broker_active_set); + self.bootstrap(conf, broker_active_set, partial_backup_rate_limiter); Ok(()) } @@ -626,6 +627,7 @@ impl Timeline { self: &Arc, conf: &SafeKeeperConf, broker_active_set: Arc, + partial_backup_rate_limiter: RateLimiter, ) { let (tx, rx) = self.manager_ctl.bootstrap_manager(); @@ -637,6 +639,7 @@ impl Timeline { broker_active_set, tx, rx, + partial_backup_rate_limiter, )); } diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 66c62ce19785..62142162de8c 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -32,7 +32,7 @@ use crate::{ timeline_guard::{AccessService, GuardId, ResidenceGuard}, timelines_set::{TimelineSetGuard, TimelinesSet}, wal_backup::{self, WalBackupTaskHandle}, - wal_backup_partial::{self, PartialRemoteSegment}, + wal_backup_partial::{self, PartialRemoteSegment, RateLimiter}, SafeKeeperConf, }; @@ -185,6 +185,7 @@ pub(crate) struct Manager { // misc pub(crate) access_service: AccessService, + pub(crate) partial_backup_rate_limiter: RateLimiter, } /// This task gets spawned alongside each timeline and is responsible for managing the timeline's @@ -197,6 +198,7 @@ pub async fn main_task( broker_active_set: Arc, manager_tx: tokio::sync::mpsc::UnboundedSender, mut manager_rx: tokio::sync::mpsc::UnboundedReceiver, + partial_backup_rate_limiter: RateLimiter, ) { tli.set_status(Status::Started); @@ -209,7 +211,14 @@ pub async fn main_task( } }; - let mut mgr = Manager::new(tli, conf, broker_active_set, manager_tx).await; + let mut mgr = Manager::new( + tli, + conf, + broker_active_set, + manager_tx, + partial_backup_rate_limiter, + ) + .await; // Start recovery task which always runs on the timeline. if !mgr.is_offloaded && mgr.conf.peer_recovery_enabled { @@ -321,6 +330,7 @@ impl Manager { conf: SafeKeeperConf, broker_active_set: Arc, manager_tx: tokio::sync::mpsc::UnboundedSender, + partial_backup_rate_limiter: RateLimiter, ) -> Manager { let (is_offloaded, partial_backup_uploaded) = tli.bootstrap_mgr().await; Manager { @@ -339,6 +349,7 @@ impl Manager { partial_backup_uploaded, access_service: AccessService::new(manager_tx), tli, + partial_backup_rate_limiter, } } @@ -525,6 +536,7 @@ impl Manager { self.partial_backup_task = Some(tokio::spawn(wal_backup_partial::main_task( self.wal_resident_timeline(), self.conf.clone(), + self.partial_backup_rate_limiter.clone(), ))); } diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index 45e08ede3c0a..9ce1112cec43 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -5,6 +5,7 @@ use crate::safekeeper::ServerInfo; use crate::timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError}; use crate::timelines_set::TimelinesSet; +use crate::wal_backup_partial::RateLimiter; use crate::SafeKeeperConf; use anyhow::{bail, Context, Result}; use camino::Utf8PathBuf; @@ -23,6 +24,7 @@ struct GlobalTimelinesState { conf: Option, broker_active_set: Arc, load_lock: Arc>, + partial_backup_rate_limiter: RateLimiter, } // Used to prevent concurrent timeline loading. @@ -37,8 +39,12 @@ impl GlobalTimelinesState { } /// Get dependencies for a timeline constructor. - fn get_dependencies(&self) -> (SafeKeeperConf, Arc) { - (self.get_conf().clone(), self.broker_active_set.clone()) + fn get_dependencies(&self) -> (SafeKeeperConf, Arc, RateLimiter) { + ( + self.get_conf().clone(), + self.broker_active_set.clone(), + self.partial_backup_rate_limiter.clone(), + ) } /// Insert timeline into the map. Returns error if timeline with the same id already exists. @@ -66,6 +72,7 @@ static TIMELINES_STATE: Lazy> = Lazy::new(|| { conf: None, broker_active_set: Arc::new(TimelinesSet::default()), load_lock: Arc::new(tokio::sync::Mutex::new(TimelineLoadLock)), + partial_backup_rate_limiter: RateLimiter::new(1), }) }); @@ -79,6 +86,7 @@ impl GlobalTimelines { // lock, so use explicit block let tenants_dir = { let mut state = TIMELINES_STATE.lock().unwrap(); + state.partial_backup_rate_limiter = RateLimiter::new(conf.partial_backup_concurrency); state.conf = Some(conf); // Iterate through all directories and load tenants for all directories @@ -122,7 +130,7 @@ impl GlobalTimelines { /// this function is called during init when nothing else is running, so /// this is fine. async fn load_tenant_timelines(tenant_id: TenantId) -> Result<()> { - let (conf, broker_active_set) = { + let (conf, broker_active_set, partial_backup_rate_limiter) = { let state = TIMELINES_STATE.lock().unwrap(); state.get_dependencies() }; @@ -145,7 +153,11 @@ impl GlobalTimelines { .unwrap() .timelines .insert(ttid, tli.clone()); - tli.bootstrap(&conf, broker_active_set.clone()); + tli.bootstrap( + &conf, + broker_active_set.clone(), + partial_backup_rate_limiter.clone(), + ); } // If we can't load a timeline, it's most likely because of a corrupted // directory. We will log an error and won't allow to delete/recreate @@ -178,7 +190,8 @@ impl GlobalTimelines { _guard: &tokio::sync::MutexGuard<'a, TimelineLoadLock>, ttid: TenantTimelineId, ) -> Result> { - let (conf, broker_active_set) = TIMELINES_STATE.lock().unwrap().get_dependencies(); + let (conf, broker_active_set, partial_backup_rate_limiter) = + TIMELINES_STATE.lock().unwrap().get_dependencies(); match Timeline::load_timeline(&conf, ttid) { Ok(timeline) => { @@ -191,7 +204,7 @@ impl GlobalTimelines { .timelines .insert(ttid, tli.clone()); - tli.bootstrap(&conf, broker_active_set); + tli.bootstrap(&conf, broker_active_set, partial_backup_rate_limiter); Ok(tli) } @@ -222,7 +235,7 @@ impl GlobalTimelines { commit_lsn: Lsn, local_start_lsn: Lsn, ) -> Result> { - let (conf, broker_active_set) = { + let (conf, broker_active_set, partial_backup_rate_limiter) = { let state = TIMELINES_STATE.lock().unwrap(); if let Ok(timeline) = state.get(&ttid) { // Timeline already exists, return it. @@ -257,7 +270,12 @@ impl GlobalTimelines { // Bootstrap is transactional, so if it fails, the timeline will be deleted, // and the state on disk should remain unchanged. if let Err(e) = timeline - .init_new(&mut shared_state, &conf, broker_active_set) + .init_new( + &mut shared_state, + &conf, + broker_active_set, + partial_backup_rate_limiter, + ) .await { // Note: the most likely reason for init failure is that the timeline diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index 9c7cd0888d83..825851c97c9a 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -18,6 +18,8 @@ //! This way control file stores information about all potentially existing //! remote partial segments and can clean them up after uploading a newer version. +use std::sync::Arc; + use camino::Utf8PathBuf; use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI}; use remote_storage::RemotePath; @@ -27,7 +29,7 @@ use tracing::{debug, error, info, instrument, warn}; use utils::lsn::Lsn; use crate::{ - metrics::{PARTIAL_BACKUP_UPLOADED_BYTES, PARTIAL_BACKUP_UPLOADS}, + metrics::{MISC_OPERATION_SECONDS, PARTIAL_BACKUP_UPLOADED_BYTES, PARTIAL_BACKUP_UPLOADS}, safekeeper::Term, timeline::WalResidentTimeline, timeline_manager::StateSnapshot, @@ -35,6 +37,30 @@ use crate::{ SafeKeeperConf, }; +#[derive(Clone)] +pub struct RateLimiter { + semaphore: Arc, +} + +impl RateLimiter { + pub fn new(permits: usize) -> Self { + Self { + semaphore: Arc::new(tokio::sync::Semaphore::new(permits)), + } + } + + async fn acquire_owned(&self) -> tokio::sync::OwnedSemaphorePermit { + let _timer = MISC_OPERATION_SECONDS + .with_label_values(&["partial_permit_acquire"]) + .start_timer(); + self.semaphore + .clone() + .acquire_owned() + .await + .expect("semaphore is closed") + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum UploadStatus { /// Upload is in progress. This status should be used only for garbage collection, @@ -208,6 +234,9 @@ impl PartialBackup { /// Upload the latest version of the partial segment and garbage collect older versions. #[instrument(name = "upload", skip_all, fields(name = %prepared.name))] async fn do_upload(&mut self, prepared: &PartialRemoteSegment) -> anyhow::Result<()> { + let _timer = MISC_OPERATION_SECONDS + .with_label_values(&["partial_do_upload"]) + .start_timer(); info!("starting upload {:?}", prepared); let state_0 = self.state.clone(); @@ -307,6 +336,7 @@ pub(crate) fn needs_uploading( pub async fn main_task( tli: WalResidentTimeline, conf: SafeKeeperConf, + limiter: RateLimiter, ) -> Option { debug!("started"); let await_duration = conf.partial_backup_timeout; @@ -411,6 +441,9 @@ pub async fn main_task( continue 'outer; } + // limit concurrent uploads + let _upload_permit = limiter.acquire_owned().await; + let prepared = backup.prepare_upload().await; if let Some(seg) = &uploaded_segment { if seg.eq_without_status(&prepared) { diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 43835c7f4411..6bbf96d71df4 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -187,6 +187,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { enable_offload: false, delete_offloaded_wal: false, control_file_save_interval: Duration::from_secs(1), + partial_backup_concurrency: 1, }; let mut global = GlobalMap::new(disk, conf.clone())?; From b8bbaafc0352237ffd90b91f646df886739593b2 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 28 Jun 2024 18:27:13 +0100 Subject: [PATCH 52/57] storage controller: fix heatmaps getting disabled during shard split (#8197) ## Problem At the start of do_tenant_shard_split, we drop any secondary location for the parent shards. The reconciler uses presence of secondary locations as a condition for enabling heatmaps. On the pageserver, child shards inherit their configuration from parents, but the storage controller assumes the child's ObservedState is the same as the parent's config from the prepare phase. The result is that some child shards end up with inaccurate ObservedState, and until something next migrates or restarts, those tenant shards aren't uploading heatmaps, so their secondary locations are downloading everything that was resident at the moment of the split (including ancestor layers which are often cleaned up shortly after the split). Closes: https://github.com/neondatabase/neon/issues/8189 ## Summary of changes - Use PlacementPolicy to control enablement of heatmap upload, rather than the literal presence of secondaries in IntentState: this way we avoid switching them off during shard split - test: during tenant split test, assert that the child shards have heatmap uploads enabled. --- storage_controller/src/reconciler.rs | 13 +++++++++++-- storage_controller/src/service.rs | 4 ++-- storage_controller/src/tenant_shard.rs | 9 +++------ test_runner/regress/test_sharding.py | 7 +++++++ 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/storage_controller/src/reconciler.rs b/storage_controller/src/reconciler.rs index fe97f724c132..886ceae90fbf 100644 --- a/storage_controller/src/reconciler.rs +++ b/storage_controller/src/reconciler.rs @@ -1,6 +1,7 @@ use crate::pageserver_client::PageserverClient; use crate::persistence::Persistence; use crate::service; +use pageserver_api::controller_api::PlacementPolicy; use pageserver_api::models::{ LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig, }; @@ -29,6 +30,7 @@ pub(super) struct Reconciler { /// of a tenant's state from when we spawned a reconcile task. pub(super) tenant_shard_id: TenantShardId, pub(crate) shard: ShardIdentity, + pub(crate) placement_policy: PlacementPolicy, pub(crate) generation: Option, pub(crate) intent: TargetState, @@ -641,7 +643,7 @@ impl Reconciler { generation, &self.shard, &self.config, - !self.intent.secondary.is_empty(), + &self.placement_policy, ); match self.observed.locations.get(&node.get_id()) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { @@ -801,8 +803,15 @@ pub(crate) fn attached_location_conf( generation: Generation, shard: &ShardIdentity, config: &TenantConfig, - has_secondaries: bool, + policy: &PlacementPolicy, ) -> LocationConfig { + let has_secondaries = match policy { + PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => { + false + } + PlacementPolicy::Attached(_) => true, + }; + LocationConfig { mode: LocationConfigMode::AttachedSingle, generation: generation.into(), diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index bcc40c69a25d..3965d7453d49 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -1390,7 +1390,7 @@ impl Service { tenant_shard.generation.unwrap(), &tenant_shard.shard, &tenant_shard.config, - false, + &PlacementPolicy::Attached(0), )), }, )]); @@ -3321,7 +3321,7 @@ impl Service { generation, &child_shard, &config, - matches!(policy, PlacementPolicy::Attached(n) if n > 0), + &policy, )), }, ); diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index 45295bc59be8..3fcf31ac1028 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -908,12 +908,8 @@ impl TenantShard { .generation .expect("Attempted to enter attached state without a generation"); - let wanted_conf = attached_location_conf( - generation, - &self.shard, - &self.config, - !self.intent.secondary.is_empty(), - ); + let wanted_conf = + attached_location_conf(generation, &self.shard, &self.config, &self.policy); match self.observed.locations.get(&node_id) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {} Some(_) | None => { @@ -1099,6 +1095,7 @@ impl TenantShard { let mut reconciler = Reconciler { tenant_shard_id: self.tenant_shard_id, shard: self.shard, + placement_policy: self.policy.clone(), generation: self.generation, intent: reconciler_intent, detach, diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 62a9f422ee4d..8267d3f36c0b 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -542,6 +542,13 @@ def check_effective_tenant_config(): for k, v in non_default_tenant_config.items(): assert config.effective_config[k] == v + # Check that heatmap uploads remain enabled after shard split + # (https://github.com/neondatabase/neon/issues/8189) + assert ( + config.effective_config["heatmap_period"] + and config.effective_config["heatmap_period"] != "0s" + ) + # Validate pageserver state: expect every child shard to have an attached and secondary location (total, attached) = get_node_shard_counts(env, tenant_ids=[tenant_id]) assert sum(attached.values()) == split_shard_count From bc704917a38b824e683f8f1a3c05f1ae496ddf53 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 28 Jun 2024 15:13:25 -0400 Subject: [PATCH 53/57] fix(pageserver): ensure tenant harness has different names (#8205) rename the tenant test harness name Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 92d9c5b1432a..3ffbaf98c69f 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -6264,7 +6264,7 @@ mod tests { #[tokio::test] async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?; + let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads")?; let (tenant, ctx) = harness.load().await; let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap(); From 30027d94a26ad6624e1b0f55d3819a1c4cb8f59d Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 1 Jul 2024 01:49:49 +0300 Subject: [PATCH 54/57] Fix tracking of the nextMulti in the pageserver's copy of CheckPoint (#6528) Whenever we see an XLOG_MULTIXACT_CREATE_ID WAL record, we need to update the nextMulti and NextMultiOffset fields in the pageserver's copy of the CheckPoint struct, to cover the new multi-XID. In PostgreSQL, this is done by updating an in-memory struct during WAL replay, but because in Neon you can start a compute node at any LSN, we need to have an up-to-date value pre-calculated in the pageserver at all times. We do the same for nextXid. However, we had a bug in WAL ingestion code that does that: the multi-XIDs will wrap around at 2^32, just like XIDs, so we need to do the comparisons in a wraparound-aware fashion. Fix that, and add tests. Fixes issue #6520 Co-authored-by: Konstantin Knizhnik --- libs/postgres_ffi/src/xlog_utils.rs | 22 ++ .../wal_craft/src/xlog_utils_test.rs | 47 +++ pageserver/src/walingest.rs | 29 +- test_runner/regress/test_next_xid.py | 273 ++++++++++++++++++ 4 files changed, 365 insertions(+), 6 deletions(-) diff --git a/libs/postgres_ffi/src/xlog_utils.rs b/libs/postgres_ffi/src/xlog_utils.rs index 0bbb91afc282..d25b23663bf6 100644 --- a/libs/postgres_ffi/src/xlog_utils.rs +++ b/libs/postgres_ffi/src/xlog_utils.rs @@ -356,6 +356,28 @@ impl CheckPoint { } false } + + /// Advance next multi-XID/offset to those given in arguments. + /// + /// It's important that this handles wraparound correctly. This should match the + /// MultiXactAdvanceNextMXact() logic in PostgreSQL's xlog_redo() function. + /// + /// Returns 'true' if the Checkpoint was updated. + pub fn update_next_multixid(&mut self, multi_xid: u32, multi_offset: u32) -> bool { + let mut modified = false; + + if multi_xid.wrapping_sub(self.nextMulti) as i32 > 0 { + self.nextMulti = multi_xid; + modified = true; + } + + if multi_offset.wrapping_sub(self.nextMultiOffset) as i32 > 0 { + self.nextMultiOffset = multi_offset; + modified = true; + } + + modified + } } /// Generate new, empty WAL segment, with correct block headers at the first diff --git a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs index 496458b2e42d..750affc94eed 100644 --- a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs +++ b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs @@ -202,6 +202,53 @@ pub fn test_update_next_xid() { assert_eq!(checkpoint.nextXid.value, 2048); } +#[test] +pub fn test_update_next_multixid() { + let checkpoint_buf = [0u8; std::mem::size_of::()]; + let mut checkpoint = CheckPoint::decode(&checkpoint_buf).unwrap(); + + // simple case + checkpoint.nextMulti = 20; + checkpoint.nextMultiOffset = 20; + checkpoint.update_next_multixid(1000, 2000); + assert_eq!(checkpoint.nextMulti, 1000); + assert_eq!(checkpoint.nextMultiOffset, 2000); + + // No change + checkpoint.update_next_multixid(500, 900); + assert_eq!(checkpoint.nextMulti, 1000); + assert_eq!(checkpoint.nextMultiOffset, 2000); + + // Close to wraparound, but not wrapped around yet + checkpoint.nextMulti = 0xffff0000; + checkpoint.nextMultiOffset = 0xfffe0000; + checkpoint.update_next_multixid(0xffff00ff, 0xfffe00ff); + assert_eq!(checkpoint.nextMulti, 0xffff00ff); + assert_eq!(checkpoint.nextMultiOffset, 0xfffe00ff); + + // Wraparound + checkpoint.update_next_multixid(1, 900); + assert_eq!(checkpoint.nextMulti, 1); + assert_eq!(checkpoint.nextMultiOffset, 900); + + // Wraparound nextMulti to 0. + // + // It's a bit surprising that nextMulti can be 0, because that's a special value + // (InvalidMultiXactId). However, that's how Postgres does it at multi-xid wraparound: + // nextMulti wraps around to 0, but then when the next multi-xid is assigned, it skips + // the 0 and the next multi-xid actually assigned is 1. + checkpoint.nextMulti = 0xffff0000; + checkpoint.nextMultiOffset = 0xfffe0000; + checkpoint.update_next_multixid(0, 0xfffe00ff); + assert_eq!(checkpoint.nextMulti, 0); + assert_eq!(checkpoint.nextMultiOffset, 0xfffe00ff); + + // Wraparound nextMultiOffset to 0 + checkpoint.update_next_multixid(0, 0); + assert_eq!(checkpoint.nextMulti, 0); + assert_eq!(checkpoint.nextMultiOffset, 0); +} + #[test] pub fn test_encode_logical_message() { let expected = [ diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index 4f26f2f6d1f5..fb10bca5a6ba 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -1384,14 +1384,31 @@ impl WalIngest { // Note: The multixact members can wrap around, even within one WAL record. offset = offset.wrapping_add(n_this_page as u32); } - if xlrec.mid >= self.checkpoint.nextMulti { - self.checkpoint.nextMulti = xlrec.mid + 1; - self.checkpoint_modified = true; - } - if xlrec.moff + xlrec.nmembers > self.checkpoint.nextMultiOffset { - self.checkpoint.nextMultiOffset = xlrec.moff + xlrec.nmembers; + let next_offset = offset; + assert!(xlrec.moff.wrapping_add(xlrec.nmembers) == next_offset); + + // Update next-multi-xid and next-offset + // + // NB: In PostgreSQL, the next-multi-xid stored in the control file is allowed to + // go to 0, and it's fixed up by skipping to FirstMultiXactId in functions that + // read it, like GetNewMultiXactId(). This is different from how nextXid is + // incremented! nextXid skips over < FirstNormalTransactionId when the the value + // is stored, so it's never 0 in a checkpoint. + // + // I don't know why it's done that way, it seems less error-prone to skip over 0 + // when the value is stored rather than when it's read. But let's do it the same + // way here. + let next_multi_xid = xlrec.mid.wrapping_add(1); + + if self + .checkpoint + .update_next_multixid(next_multi_xid, next_offset) + { self.checkpoint_modified = true; } + + // Also update the next-xid with the highest member. According to the comments in + // multixact_redo(), this shouldn't be necessary, but let's do the same here. let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| { if let Some(max_xid) = acc { if mbr.xid.wrapping_sub(max_xid) as i32 > 0 { diff --git a/test_runner/regress/test_next_xid.py b/test_runner/regress/test_next_xid.py index b9e7e642b51c..51e847135efd 100644 --- a/test_runner/regress/test_next_xid.py +++ b/test_runner/regress/test_next_xid.py @@ -7,6 +7,7 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, + VanillaPostgres, import_timeline_from_vanilla_postgres, wait_for_wal_insert_lsn, ) @@ -182,3 +183,275 @@ def test_import_at_2bil( cur = conn.cursor() cur.execute("SELECT count(*) from t") assert cur.fetchone() == (10000 + 1 + 1,) + + +# Constants and macros copied from PostgreSQL multixact.c and headers. These are needed to +# calculate the SLRU segments that a particular multixid or multixid-offsets falls into. +BLCKSZ = 8192 +MULTIXACT_OFFSETS_PER_PAGE = int(BLCKSZ / 4) +SLRU_PAGES_PER_SEGMENT = int(32) +MXACT_MEMBER_BITS_PER_XACT = 8 +MXACT_MEMBER_FLAGS_PER_BYTE = 1 +MULTIXACT_FLAGBYTES_PER_GROUP = 4 +MULTIXACT_MEMBERS_PER_MEMBERGROUP = MULTIXACT_FLAGBYTES_PER_GROUP * MXACT_MEMBER_FLAGS_PER_BYTE +MULTIXACT_MEMBERGROUP_SIZE = 4 * MULTIXACT_MEMBERS_PER_MEMBERGROUP + MULTIXACT_FLAGBYTES_PER_GROUP +MULTIXACT_MEMBERGROUPS_PER_PAGE = int(BLCKSZ / MULTIXACT_MEMBERGROUP_SIZE) +MULTIXACT_MEMBERS_PER_PAGE = MULTIXACT_MEMBERGROUPS_PER_PAGE * MULTIXACT_MEMBERS_PER_MEMBERGROUP + + +def MultiXactIdToOffsetSegment(xid: int): + return int(xid / (SLRU_PAGES_PER_SEGMENT * MULTIXACT_OFFSETS_PER_PAGE)) + + +def MXOffsetToMemberSegment(off: int): + return int(off / (SLRU_PAGES_PER_SEGMENT * MULTIXACT_MEMBERS_PER_PAGE)) + + +def advance_multixid_to( + pg_bin: PgBin, vanilla_pg: VanillaPostgres, next_multi_xid: int, next_multi_offset: int +): + """ + Use pg_resetwal to advance the nextMulti and nextMultiOffset values in a stand-alone + Postgres cluster. This is useful to get close to wraparound or some other interesting + value, without having to burn a lot of time consuming the (multi-)XIDs one by one. + + The new values should be higher than the old ones, in a wraparound-aware sense. + + On entry, the server should be running. It will be shut down and restarted. + """ + + # Read old values from the last checkpoint. We will pass the old oldestMultiXid value + # back to pg_resetwal, there's no option to leave it alone. + with vanilla_pg.connect() as conn: + with conn.cursor() as cur: + # Make sure the oldest-multi-xid value in the control file is up-to-date + cur.execute("checkpoint") + cur.execute("select oldest_multi_xid, next_multixact_id from pg_control_checkpoint()") + rec = cur.fetchone() + assert rec is not None + (ckpt_oldest_multi_xid, ckpt_next_multi_xid) = rec + log.info(f"oldestMultiXid was {ckpt_oldest_multi_xid}, nextMultiXid was {ckpt_next_multi_xid}") + log.info(f"Resetting to {next_multi_xid}") + + # Use pg_resetwal to reset the next multiXid and multiOffset to given values. + vanilla_pg.stop() + pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, "pg_resetwal") + cmd = [ + pg_resetwal_path, + f"--multixact-ids={next_multi_xid},{ckpt_oldest_multi_xid}", + f"--multixact-offset={next_multi_offset}", + "-D", + str(vanilla_pg.pgdatadir), + ] + pg_bin.run_capture(cmd) + + # Because we skip over a lot of values, Postgres hasn't created the SLRU segments for + # the new values yet. Create them manually, to allow Postgres to start up. + # + # This leaves "gaps" in the SLRU where segments between old value and new value are + # missing. That's OK for our purposes. Autovacuum will print some warnings about the + # missing segments, but will clean it up by truncating the SLRUs up to the new value, + # closing the gap. + segname = "%04X" % MultiXactIdToOffsetSegment(next_multi_xid) + log.info(f"Creating dummy segment pg_multixact/offsets/{segname}") + with open(vanilla_pg.pgdatadir / "pg_multixact" / "offsets" / segname, "w") as of: + of.write("\0" * SLRU_PAGES_PER_SEGMENT * BLCKSZ) + of.flush() + + segname = "%04X" % MXOffsetToMemberSegment(next_multi_offset) + log.info(f"Creating dummy segment pg_multixact/members/{segname}") + with open(vanilla_pg.pgdatadir / "pg_multixact" / "members" / segname, "w") as of: + of.write("\0" * SLRU_PAGES_PER_SEGMENT * BLCKSZ) + of.flush() + + # Start Postgres again and wait until autovacuum has processed all the databases + # + # This allows truncating the SLRUs, fixing the gaps with missing segments. + vanilla_pg.start() + with vanilla_pg.connect().cursor() as cur: + for _ in range(1000): + datminmxid = int( + query_scalar(cur, "select min(datminmxid::text::int8) from pg_database") + ) + log.info(f"datminmxid {datminmxid}") + if next_multi_xid - datminmxid < 1_000_000: # not wraparound-aware! + break + time.sleep(0.5) + + +def test_multixid_wraparound_import( + neon_env_builder: NeonEnvBuilder, + test_output_dir: Path, + pg_bin: PgBin, + vanilla_pg, +): + """ + Test that the wraparound of the "next-multi-xid" counter is handled correctly in + pageserver, And multi-offsets as well + """ + env = neon_env_builder.init_start() + + # In order to to test multixid wraparound, we need to first advance the counter to + # within spitting distance of the wraparound, that is 2^32 multi-XIDs. We could simply + # run a workload that consumes a lot of multi-XIDs until we approach that, but that + # takes a very long time. So we cheat. + # + # Our strategy is to create a vanilla Postgres cluster, and use pg_resetwal to + # directly set the multi-xid counter a higher value. However, we cannot directly set + # it to just before 2^32 (~ 4 billion), because that would make the exisitng + # 'relminmxid' values to look like they're in the future. It's not clear how the + # system would behave in that situation. So instead, we bump it up ~ 1 billion + # multi-XIDs at a time, and let autovacuum to process all the relations and update + # 'relminmxid' between each run. + # + # XXX: For the multi-offsets, most of the bump is done in the last call. This is + # because advancing it ~ 1 billion at a time hit a pathological case in the + # MultiXactMemberFreezeThreshold() function, causing autovacuum not trigger multixid + # freezing. See + # https://www.postgresql.org/message-id/85fb354c-f89f-4d47-b3a2-3cbd461c90a3%40iki.fi + # Multi-offsets don't have the same wraparound problems at 2 billion mark as + # multi-xids do, so one big jump is fine. + vanilla_pg.configure( + [ + "log_autovacuum_min_duration = 0", + # Perform anti-wraparound vacuuming aggressively + "autovacuum_naptime='1 s'", + "autovacuum_freeze_max_age = 1000000", + "autovacuum_multixact_freeze_max_age = 1000000", + ], + ) + vanilla_pg.start() + advance_multixid_to(pg_bin, vanilla_pg, 0x40000000, 0x10000000) + advance_multixid_to(pg_bin, vanilla_pg, 0x80000000, 0x20000000) + advance_multixid_to(pg_bin, vanilla_pg, 0xC0000000, 0x30000000) + advance_multixid_to(pg_bin, vanilla_pg, 0xFFFFFF00, 0xFFFFFF00) + + vanilla_pg.safe_psql("create user cloud_admin with password 'postgres' superuser") + vanilla_pg.safe_psql("create table tt as select g as id from generate_series(1, 10) g") + vanilla_pg.safe_psql("CHECKPOINT") + + # Import the cluster to the pageserver + tenant_id = TenantId.generate() + env.pageserver.tenant_create(tenant_id) + timeline_id = TimelineId.generate() + import_timeline_from_vanilla_postgres( + test_output_dir, + env, + pg_bin, + tenant_id, + timeline_id, + "imported_multixid_wraparound_test", + vanilla_pg.connstr(), + ) + vanilla_pg.stop() + + endpoint = env.endpoints.create_start( + "imported_multixid_wraparound_test", + tenant_id=tenant_id, + config_lines=[ + "log_autovacuum_min_duration = 0", + "autovacuum_naptime='5 s'", + "autovacuum=off", + ], + ) + conn = endpoint.connect() + cur = conn.cursor() + assert query_scalar(cur, "select count(*) from tt") == 10 # sanity check + + # Install extension containing function needed for test + cur.execute("CREATE EXTENSION neon_test_utils") + + # Consume a lot of XIDs, just to advance the XIDs to different range than the + # multi-xids. That avoids confusion while debugging + cur.execute("select test_consume_xids(100000)") + cur.execute("select pg_switch_wal()") + cur.execute("checkpoint") + + # Use subtransactions so that each row in 'tt' is stamped with different XID. Leave + # the transaction open. + cur.execute("BEGIN") + cur.execute( + """ +do $$ +declare + idvar int; +begin + for idvar in select id from tt loop + begin + update tt set id = idvar where id = idvar; + exception when others then + raise 'didn''t expect an error: %', sqlerrm; + end; + end loop; +end; +$$; +""" + ) + + # In a different transaction, acquire a FOR KEY SHARE lock on each row. This generates + # a new multixid for each row, with the previous xmax and this transaction's XID as the + # members. + # + # Repeat this until the multi-xid counter wraps around. + conn3 = endpoint.connect() + cur3 = conn3.cursor() + next_multixact_id_before_restart = 0 + observed_before_wraparound = False + while True: + cur3.execute("BEGIN") + cur3.execute("SELECT * FROM tt FOR KEY SHARE") + + # Get the xmax of one of the rows we locked. It should be a multi-xid. It might + # not be the latest one, but close enough. + row_xmax = int(query_scalar(cur3, "SELECT xmax FROM tt LIMIT 1")) + cur3.execute("COMMIT") + log.info(f"observed a row with xmax {row_xmax}") + + # High value means not wrapped around yet + if row_xmax >= 0xFFFFFF00: + observed_before_wraparound = True + continue + + # xmax should not be a regular XID. (We bumped up the regular XID range earlier + # to around 100000 and above.) + assert row_xmax < 100 + + # xmax values < FirstNormalTransactionId (== 3) could be special XID values, or + # multixid values after wraparound. We don't know for sure which, so keep going to + # be sure we see value that's unambiguously a wrapped-around multixid + if row_xmax < 3: + continue + + next_multixact_id_before_restart = row_xmax + log.info( + f"next_multixact_id is now at {next_multixact_id_before_restart} or a little higher" + ) + break + + # We should have observed the state before wraparound + assert observed_before_wraparound + + cur.execute("COMMIT") + + # Wait until pageserver has received all the data, and restart the endpoint + wait_for_wal_insert_lsn(env, endpoint, tenant_id, timeline_id) + endpoint.stop(mode="immediate") # 'immediate' to avoid writing shutdown checkpoint + endpoint.start() + + # Check that the next-multixid value wrapped around correctly + conn = endpoint.connect() + cur = conn.cursor() + cur.execute("select next_multixact_id from pg_control_checkpoint()") + next_multixact_id_after_restart = int( + query_scalar(cur, "select next_multixact_id from pg_control_checkpoint()") + ) + log.info(f"next_multixact_id after restart: {next_multixact_id_after_restart}") + assert next_multixact_id_after_restart >= next_multixact_id_before_restart + + # The multi-offset should wrap around as well + cur.execute("select next_multi_offset from pg_control_checkpoint()") + next_multi_offset_after_restart = int( + query_scalar(cur, "select next_multi_offset from pg_control_checkpoint()") + ) + log.info(f"next_multi_offset after restart: {next_multi_offset_after_restart}") + assert next_multi_offset_after_restart < 100000 From be598f1bf41f93cec0f3462687f4c75a63b44c16 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 1 Jul 2024 11:23:31 +0300 Subject: [PATCH 55/57] tests: remove a leftover 'running' flag (#8216) The 'running' boolean was replaced with a semaphore in commit f0e2bb79b2, but this initialization was missed. Remove it so that if a test tries to access it, you get an error rather than always claiming that the endpoint is not running. Spotted by Arseny at https://github.com/neondatabase/neon/pull/7288#discussion_r1660068657 --- test_runner/fixtures/neon_fixtures.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 4911917bf452..a1cb1b51953c 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3491,7 +3491,6 @@ def __init__( ): super().__init__(host="localhost", port=pg_port, user="cloud_admin", dbname="postgres") self.env = env - self.running = False self.branch_name: Optional[str] = None # dubious self.endpoint_id: Optional[str] = None # dubious, see asserts below self.pgdata_dir: Optional[str] = None # Path to computenode PGDATA From 7ee2bebdb78d199c69745ee2d65285acfbe2eba9 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 1 Jul 2024 12:58:08 +0300 Subject: [PATCH 56/57] tests: Make neon_xlogflush() flush all WAL, if you omit the LSN arg This makes it much more convenient to use in the common case that you want to flush all the WAL. (Passing pg_current_wal_insert_lsn() as the argument doesn't work for the same reasons as explained in the comments: we need to be back off to the beginning of a page if the previous record ended at page boundary.) I plan to use this to fix the issue that Arseny Sher called out at https://github.com/neondatabase/neon/pull/7288#discussion_r1660063852 --- pgxn/neon_test_utils/Makefile | 2 +- ...tils--1.1.sql => neon_test_utils--1.2.sql} | 2 +- pgxn/neon_test_utils/neon_test_utils.control | 2 +- pgxn/neon_test_utils/neontest.c | 38 ++++++++++++++++++- 4 files changed, 40 insertions(+), 4 deletions(-) rename pgxn/neon_test_utils/{neon_test_utils--1.1.sql => neon_test_utils--1.2.sql} (96%) diff --git a/pgxn/neon_test_utils/Makefile b/pgxn/neon_test_utils/Makefile index 1ee87357e5e2..13712724399d 100644 --- a/pgxn/neon_test_utils/Makefile +++ b/pgxn/neon_test_utils/Makefile @@ -7,7 +7,7 @@ OBJS = \ neontest.o EXTENSION = neon_test_utils -DATA = neon_test_utils--1.1.sql +DATA = neon_test_utils--1.2.sql PGFILEDESC = "neon_test_utils - helpers for neon testing and debugging" PG_CONFIG = pg_config diff --git a/pgxn/neon_test_utils/neon_test_utils--1.1.sql b/pgxn/neon_test_utils/neon_test_utils--1.2.sql similarity index 96% rename from pgxn/neon_test_utils/neon_test_utils--1.1.sql rename to pgxn/neon_test_utils/neon_test_utils--1.2.sql index 534784f31912..f84a24ec8d48 100644 --- a/pgxn/neon_test_utils/neon_test_utils--1.1.sql +++ b/pgxn/neon_test_utils/neon_test_utils--1.2.sql @@ -41,7 +41,7 @@ RETURNS bytea AS 'MODULE_PATHNAME', 'get_raw_page_at_lsn_ex' LANGUAGE C PARALLEL UNSAFE; -CREATE FUNCTION neon_xlogflush(lsn pg_lsn) +CREATE FUNCTION neon_xlogflush(lsn pg_lsn DEFAULT NULL) RETURNS VOID AS 'MODULE_PATHNAME', 'neon_xlogflush' LANGUAGE C PARALLEL UNSAFE; diff --git a/pgxn/neon_test_utils/neon_test_utils.control b/pgxn/neon_test_utils/neon_test_utils.control index 5f6d64083591..c7b9191ddc12 100644 --- a/pgxn/neon_test_utils/neon_test_utils.control +++ b/pgxn/neon_test_utils/neon_test_utils.control @@ -1,6 +1,6 @@ # neon_test_utils extension comment = 'helpers for neon testing and debugging' -default_version = '1.1' +default_version = '1.2' module_pathname = '$libdir/neon_test_utils' relocatable = true trusted = true diff --git a/pgxn/neon_test_utils/neontest.c b/pgxn/neon_test_utils/neontest.c index 47f245fbf1af..944936d39517 100644 --- a/pgxn/neon_test_utils/neontest.c +++ b/pgxn/neon_test_utils/neontest.c @@ -15,6 +15,7 @@ #include "access/relation.h" #include "access/xact.h" #include "access/xlog.h" +#include "access/xlog_internal.h" #include "catalog/namespace.h" #include "fmgr.h" #include "funcapi.h" @@ -444,11 +445,46 @@ get_raw_page_at_lsn_ex(PG_FUNCTION_ARGS) /* * Directly calls XLogFlush(lsn) to flush WAL buffers. + * + * If 'lsn' is not specified (is NULL), flush all generated WAL. */ Datum neon_xlogflush(PG_FUNCTION_ARGS) { - XLogRecPtr lsn = PG_GETARG_LSN(0); + XLogRecPtr lsn; + + if (RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("recovery is in progress"), + errhint("cannot flush WAL during recovery."))); + + if (!PG_ARGISNULL(0)) + lsn = PG_GETARG_LSN(0); + else + { + lsn = GetXLogInsertRecPtr(); + + /*--- + * The LSN returned by GetXLogInsertRecPtr() is the position where the + * next inserted record would begin. If the last record ended just at + * the page boundary, the next record will begin after the page header + * on the next page, and that's what GetXLogInsertRecPtr().returns, + * but the page header has not been written yet. If we tried to flush + * it, XLogFlush() would throw an error: + * + * ERROR : xlog flush request %X/%X is not satisfied --- flushed only to %X/%X + * + * To avoid that, if the insert position points to just after the page + * header, back off to page boundary. + */ + if (lsn % XLOG_BLCKSZ == SizeOfXLogShortPHD && + XLogSegmentOffset(lsn, wal_segment_size) > XLOG_BLCKSZ) + lsn -= SizeOfXLogShortPHD; + else if (lsn % XLOG_BLCKSZ == SizeOfXLogLongPHD && + XLogSegmentOffset(lsn, wal_segment_size) < XLOG_BLCKSZ) + lsn -= SizeOfXLogLongPHD; + } XLogFlush(lsn); PG_RETURN_VOID(); From 57f476ff5a2d5eec4d3585f710b78636ce75f794 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 1 Jul 2024 12:58:12 +0300 Subject: [PATCH 57/57] Restore running xacts from CLOG on replica startup (#7288) We have one pretty serious MVCC visibility bug with hot standby replicas. We incorrectly treat any transactions that are in progress in the primary, when the standby is started, as aborted. That can break MVCC for queries running concurrently in the standby. It can also lead to hint bits being set incorrectly, and that damage can last until the replica is restarted. The fundamental bug was that we treated any replica start as starting from a shut down server. The fix for that is straightforward: we need to set 'wasShutdown = false' in InitWalRecovery() (see changes in the postgres repo). However, that introduces a new problem: with wasShutdown = false, the standby will not open up for queries until it receives a running-xacts WAL record from the primary. That's correct, and that's how Postgres hot standby always works. But it's a problem for Neon, because: * It changes the historical behavior for existing users. Currently, the standby immediately opens up for queries, so if they now need to wait, we can breka existing use cases that were working fine (assuming you don't hit the MVCC issues). * The problem is much worse for Neon than it is for standalone PostgreSQL, because in Neon, we can start a replica from an arbitrary LSN. In standalone PostgreSQL, the replica always starts WAL replay from a checkpoint record, and the primary arranges things so that there is always a running-xacts record soon after each checkpoint record. You can still hit this issue with PostgreSQL if you have a transaction with lots of subtransactions running in the primary, but it's pretty rare in practice. To mitigate that, we introduce another way to collect the running-xacts information at startup, without waiting for the running-xacts WAL record: We can the CLOG for XIDs that haven't been marked as committed or aborted. It has limitations with subtransactions too, but should mitigate the problem for most users. See https://github.com/neondatabase/neon/issues/7236. Co-authored-by: Konstantin Knizhnik --- pageserver/src/walingest.rs | 40 +- pgxn/neon/neon.c | 293 ++++++++ test_runner/fixtures/neon_fixtures.py | 4 +- test_runner/fixtures/pageserver/utils.py | 2 +- test_runner/regress/test_replica_start.py | 646 ++++++++++++++++++ test_runner/regress/test_replication_start.py | 32 - vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +- 10 files changed, 981 insertions(+), 48 deletions(-) create mode 100644 test_runner/regress/test_replica_start.py delete mode 100644 test_runner/regress/test_replication_start.py diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index fb10bca5a6ba..07c90385e654 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -343,7 +343,33 @@ impl WalIngest { xlog_checkpoint.oldestActiveXid, self.checkpoint.oldestActiveXid ); - self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid; + + // A shutdown checkpoint has `oldestActiveXid == InvalidTransactionid`, + // because at shutdown, all in-progress transactions will implicitly + // end. Postgres startup code knows that, and allows hot standby to start + // immediately from a shutdown checkpoint. + // + // In Neon, Postgres hot standby startup always behaves as if starting from + // an online checkpoint. It needs a valid `oldestActiveXid` value, so + // instead of overwriting self.checkpoint.oldestActiveXid with + // InvalidTransactionid from the checkpoint WAL record, update it to a + // proper value, knowing that there are no in-progress transactions at this + // point, except for prepared transactions. + // + // See also the neon code changes in the InitWalRecovery() function. + if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID + && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN + { + let mut oldest_active_xid = self.checkpoint.nextXid.value as u32; + for xid in modification.tline.list_twophase_files(lsn, ctx).await? { + if (xid.wrapping_sub(oldest_active_xid) as i32) < 0 { + oldest_active_xid = xid; + } + } + self.checkpoint.oldestActiveXid = oldest_active_xid; + } else { + self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid; + } // Write a new checkpoint key-value pair on every checkpoint record, even // if nothing really changed. Not strictly required, but it seems nice to @@ -375,6 +401,7 @@ impl WalIngest { if info == pg_constants::XLOG_RUNNING_XACTS { let xlrec = crate::walrecord::XlRunningXacts::decode(&mut buf); self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid; + self.checkpoint_modified = true; } } pg_constants::RM_REPLORIGIN_ID => { @@ -1277,13 +1304,10 @@ impl WalIngest { xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db ); - // Here we treat oldestXid and oldestXidDB - // differently from postgres redo routines. - // In postgres checkpoint.oldestXid lags behind xlrec.oldest_xid - // until checkpoint happens and updates the value. - // Here we can use the most recent value. - // It's just an optimization, though and can be deleted. - // TODO Figure out if there will be any issues with replica. + // In Postgres, oldestXid and oldestXidDB are updated in memory when the CLOG is + // truncated, but a checkpoint record with the updated values isn't written until + // later. In Neon, a server can start at any LSN, not just on a checkpoint record, + // so we keep the oldestXid and oldestXidDB up-to-date. self.checkpoint.oldestXid = xlrec.oldest_xid; self.checkpoint.oldestXidDB = xlrec.oldest_xid_db; self.checkpoint_modified = true; diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index b6b2db7e71ad..e4968bdf8991 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -12,6 +12,8 @@ #include "fmgr.h" #include "miscadmin.h" +#include "access/subtrans.h" +#include "access/twophase.h" #include "access/xact.h" #include "access/xlog.h" #include "storage/buf_internals.h" @@ -22,10 +24,12 @@ #include "replication/logical.h" #include "replication/slot.h" #include "replication/walsender.h" +#include "storage/proc.h" #include "storage/procsignal.h" #include "tcop/tcopprot.h" #include "funcapi.h" #include "access/htup_details.h" +#include "utils/builtins.h" #include "utils/pg_lsn.h" #include "utils/guc.h" #include "utils/wait_event.h" @@ -266,6 +270,293 @@ LogicalSlotsMonitorMain(Datum main_arg) } } +/* + * XXX: These private to procarray.c, but we need them here. + */ +#define PROCARRAY_MAXPROCS (MaxBackends + max_prepared_xacts) +#define TOTAL_MAX_CACHED_SUBXIDS \ + ((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS) + +/* + * Restore running-xact information by scanning the CLOG at startup. + * + * In PostgreSQL, a standby always has to wait for a running-xacts WAL record + * to arrive before it can start accepting queries. Furthermore, if there are + * transactions with too many subxids (> 64) open to fit in the in-memory + * subxids cache, the running-xacts record will be marked as "suboverflowed", + * and the standby will need to also wait for the currently in-progress + * transactions to finish. + * + * That's not great in PostgreSQL, because a hot standby does not necessary + * open up for queries immediately as you might expect. But it's worse in + * Neon: A standby in Neon doesn't need to start WAL replay from a checkpoint + * record; it can start at any LSN. Postgres arranges things so that there is + * a running-xacts record soon after every checkpoint record, but when you + * start from an arbitrary LSN, that doesn't help. If the primary is idle, or + * not running at all, it might never write a new running-xacts record, + * leaving the replica in a limbo where it can never start accepting queries. + * + * To mitigate that, we have an additional mechanism to find the running-xacts + * information: we scan the CLOG, making note of any XIDs not marked as + * committed or aborted. They are added to the Postgres known-assigned XIDs + * array by calling ProcArrayApplyRecoveryInfo() in the caller of this + * function. + * + * There is one big limitation with that mechanism: The size of the + * known-assigned XIDs is limited, so if there are a lot of in-progress XIDs, + * we have to give up. Furthermore, we don't know how many of the in-progress + * XIDs are subtransactions, and if we use up all the space in the + * known-assigned XIDs array for subtransactions, we might run out of space in + * the array later during WAL replay, causing the replica to shut down with + * "ERROR: too many KnownAssignedXids". The safe # of XIDs that we can add to + * the known-assigned array without risking that error later is very low, + * merely PGPROC_MAX_CACHED_SUBXIDS == 64, so we take our chances and use up + * to half of the known-assigned XIDs array for the subtransactions, even + * though that risks getting the error later. + * + * Note: It's OK if the recovered list of XIDs includes some transactions that + * have crashed in the primary, and hence will never commit. They will be seen + * as in-progress, until we see a new next running-acts record with an + * oldestActiveXid that invalidates them. That's how the known-assigned XIDs + * array always works. + * + * If scraping the CLOG doesn't succeed for some reason, like the subxid + * overflow, Postgres will fall back to waiting for a running-xacts record + * like usual. + * + * Returns true if a complete list of in-progress XIDs was scraped. + */ +static bool +RestoreRunningXactsFromClog(CheckPoint *checkpoint, TransactionId **xids, int *nxids) +{ + TransactionId from; + TransactionId till; + int max_xcnt; + TransactionId *prepared_xids = NULL; + int n_prepared_xids; + TransactionId *restored_xids = NULL; + int n_restored_xids; + int next_prepared_idx; + + Assert(*xids == NULL); + + /* + * If the checkpoint doesn't have a valid oldestActiveXid, bail out. We + * don't know where to start the scan. + * + * This shouldn't happen, because the pageserver always maintains a valid + * oldestActiveXid nowadays. Except when starting at an old point in time + * that was ingested before the pageserver was taught to do that. + */ + if (!TransactionIdIsValid(checkpoint->oldestActiveXid)) + { + elog(LOG, "cannot restore running-xacts from CLOG because oldestActiveXid is not set"); + goto fail; + } + + /* + * We will scan the CLOG starting from the oldest active XID. + * + * In some corner cases, the oldestActiveXid from the last checkpoint + * might already have been truncated from the CLOG. That is, + * oldestActiveXid might be older than oldestXid. That's possible because + * oldestActiveXid is only updated at checkpoints. After the last + * checkpoint, the oldest transaction might have committed, and the CLOG + * might also have been already truncated. So if oldestActiveXid is older + * than oldestXid, start at oldestXid instead. (Otherwise we'd try to + * access CLOG segments that have already been truncated away.) + */ + from = TransactionIdPrecedes(checkpoint->oldestXid, checkpoint->oldestActiveXid) + ? checkpoint->oldestActiveXid : checkpoint->oldestXid; + till = XidFromFullTransactionId(checkpoint->nextXid); + + /* + * To avoid "too many KnownAssignedXids" error later during replay, we + * limit number of collected transactions. This is a tradeoff: if we are + * willing to consume more of the KnownAssignedXids space for the XIDs + * now, that allows us to start up, but we might run out of space later. + * + * The size of the KnownAssignedXids array is TOTAL_MAX_CACHED_SUBXIDS, + * which is (PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS). In + * PostgreSQL, that's always enough because the primary will always write + * an XLOG_XACT_ASSIGNMENT record if a transaction has more than + * PGPROC_MAX_CACHED_SUBXIDS subtransactions. Seeing that record allows + * the standby to mark the XIDs in pg_subtrans and removing them from the + * KnowingAssignedXids array. + * + * Here, we don't know which XIDs belong to subtransactions that have + * already been WAL-logged with an XLOG_XACT_ASSIGNMENT record. If we + * wanted to be totally safe and avoid the possibility of getting a "too + * many KnownAssignedXids" error later, we would have to limit ourselves + * to PGPROC_MAX_CACHED_SUBXIDS, which is not much. And that includes top + * transaction IDs too, because we cannot distinguish between top + * transaction IDs and subtransactions here. + * + * Somewhat arbitrarily, we use up to half of KnownAssignedXids. That + * strikes a sensible balance between being useful, and risking a "too + * many KnownAssignedXids" error later. + */ + max_xcnt = TOTAL_MAX_CACHED_SUBXIDS / 2; + + /* + * Collect XIDs of prepared transactions in an array. This includes only + * their top-level XIDs. We assume that StandbyRecoverPreparedTransactions + * has already been called, so we can find all the sub-transactions in + * pg_subtrans. + */ + PrescanPreparedTransactions(&prepared_xids, &n_prepared_xids); + qsort(prepared_xids, n_prepared_xids, sizeof(TransactionId), xidLogicalComparator); + + /* + * Scan the CLOG, collecting in-progress XIDs into 'restored_xids'. + */ + elog(DEBUG1, "scanning CLOG between %u and %u for in-progress XIDs", from, till); + restored_xids = (TransactionId *) palloc(max_xcnt * sizeof(TransactionId)); + n_restored_xids = 0; + next_prepared_idx = 0; + for (TransactionId xid = from; xid != till;) + { + XLogRecPtr xidlsn; + XidStatus xidstatus; + + xidstatus = TransactionIdGetStatus(xid, &xidlsn); + + /* + * "Merge" the prepared transactions into the restored_xids array as + * we go. The prepared transactions array is sorted. This is mostly + * a sanity check to ensure that all the prpeared transactions are + * seen as in-progress. (There is a check after the loop that we didn't + * miss any.) + */ + if (next_prepared_idx < n_prepared_xids && xid == prepared_xids[next_prepared_idx]) + { + /* + * This is a top-level transaction ID of a prepared transaction. + * Include it in the array. + */ + + /* sanity check */ + if (xidstatus != TRANSACTION_STATUS_IN_PROGRESS) + { + elog(LOG, "prepared transaction %u has unexpected status %X, cannot restore running-xacts from CLOG", + xid, xidstatus); + Assert(false); + goto fail; + } + + elog(DEBUG1, "XID %u: was next prepared xact (%d / %d)", xid, next_prepared_idx, n_prepared_xids); + next_prepared_idx++; + } + else if (xidstatus == TRANSACTION_STATUS_COMMITTED) + { + elog(DEBUG1, "XID %u: was committed", xid); + goto skip; + } + else if (xidstatus == TRANSACTION_STATUS_ABORTED) + { + elog(DEBUG1, "XID %u: was aborted", xid); + goto skip; + } + else if (xidstatus == TRANSACTION_STATUS_IN_PROGRESS) + { + /* + * In-progress transactions are included in the array. + * + * Except subtransactions of the prepared transactions. They are + * already set in pg_subtrans, and hence don't need to be tracked + * in the known-assigned XIDs array. + */ + if (n_prepared_xids > 0) + { + TransactionId parent = SubTransGetParent(xid); + + if (TransactionIdIsValid(parent)) + { + /* + * This is a subtransaction belonging to a prepared + * transaction. + * + * Sanity check that it is in the prepared XIDs array. It + * should be, because StandbyRecoverPreparedTransactions + * populated pg_subtrans, and no other XID should be set + * in it yet. (This also relies on the fact that + * StandbyRecoverPreparedTransactions sets the parent of + * each subxid to point directly to the top-level XID, + * rather than restoring the original subtransaction + * hierarchy.) + */ + if (bsearch(&parent, prepared_xids, next_prepared_idx, + sizeof(TransactionId), xidLogicalComparator) == NULL) + { + elog(LOG, "sub-XID %u has unexpected parent %u, cannot restore running-xacts from CLOG", + xid, parent); + Assert(false); + goto fail; + } + elog(DEBUG1, "XID %u: was a subtransaction of prepared xid %u", xid, parent); + goto skip; + } + } + + /* include it in the array */ + elog(DEBUG1, "XID %u: is in progress", xid); + } + else + { + /* + * SUB_COMMITTED is a transient state used at commit. We don't + * expect to see that here. + */ + elog(LOG, "XID %u has unexpected status %X in pg_xact, cannot restore running-xacts from CLOG", + xid, xidstatus); + Assert(false); + goto fail; + } + + if (n_restored_xids >= max_xcnt) + { + /* + * Overflowed. We won't be able to install the RunningTransactions + * snapshot. + */ + elog(LOG, "too many running xacts to restore from the CLOG; oldestXid=%u oldestActiveXid=%u nextXid %u", + checkpoint->oldestXid, checkpoint->oldestActiveXid, + XidFromFullTransactionId(checkpoint->nextXid)); + goto fail; + } + + restored_xids[n_restored_xids++] = xid; + + skip: + TransactionIdAdvance(xid); + continue; + } + + /* sanity check */ + if (next_prepared_idx != n_prepared_xids) + { + elog(LOG, "prepared transaction ID %u was not visited in the CLOG scan, cannot restore running-xacts from CLOG", + prepared_xids[next_prepared_idx]); + Assert(false); + goto fail; + } + + elog(LOG, "restored %d running xacts by scanning the CLOG; oldestXid=%u oldestActiveXid=%u nextXid %u", + n_restored_xids, checkpoint->oldestXid, checkpoint->oldestActiveXid, XidFromFullTransactionId(checkpoint->nextXid)); + *nxids = n_restored_xids; + *xids = restored_xids; + return true; + + fail: + *nxids = 0; + *xids = NULL; + if (restored_xids) + pfree(restored_xids); + if (prepared_xids) + pfree(prepared_xids); + return false; +} + void _PG_init(void) { @@ -288,6 +579,8 @@ _PG_init(void) pg_init_extension_server(); + restore_running_xacts_callback = RestoreRunningXactsFromClog; + /* * Important: This must happen after other parts of the extension are * loaded, otherwise any settings to GUCs that were set before the diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index a1cb1b51953c..e1c851435142 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3856,7 +3856,9 @@ def stop_all(self) -> "EndpointFactory": return self - def new_replica(self, origin: Endpoint, endpoint_id: str, config_lines: Optional[List[str]]): + def new_replica( + self, origin: Endpoint, endpoint_id: str, config_lines: Optional[List[str]] = None + ): branch_name = origin.branch_name assert origin in self.endpoints assert branch_name is not None diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 60535b759261..b75a480a637e 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -198,7 +198,7 @@ def wait_for_last_record_lsn( lsn: Lsn, ) -> Lsn: """waits for pageserver to catch up to a certain lsn, returns the last observed lsn.""" - for i in range(100): + for i in range(1000): current_lsn = last_record_lsn(pageserver_http, tenant, timeline) if current_lsn >= lsn: return current_lsn diff --git a/test_runner/regress/test_replica_start.py b/test_runner/regress/test_replica_start.py new file mode 100644 index 000000000000..17d476a8a690 --- /dev/null +++ b/test_runner/regress/test_replica_start.py @@ -0,0 +1,646 @@ +""" +In PostgreSQL, a standby always has to wait for a running-xacts WAL record to +arrive before it can start accepting queries. Furthermore, if there are +transactions with too many subxids (> 64) open to fit in the in-memory subxids +cache, the running-xacts record will be marked as "suboverflowed", and the +standby will need to also wait for the currently in-progress transactions to +finish. + +In Neon, we have an additional mechanism that scans the CLOG at server startup +to determine the list of running transactions, so that the standby can start up +immediately without waiting for the running-xacts record, but that mechanism +only works if the # of active (sub-)transactions is reasonably small. Otherwise +it falls back to waiting. Furthermore, it's somewhat optimistic in using up the +known-assigned XIDs array: if too many transactions with subxids are started in +the primary later, the replay in the replica will crash with "too many +KnownAssignedXids" error. + +This module contains tests for those various cases at standby startup: starting +from shutdown checkpoint, using the CLOG scanning mechanism, waiting for +running-xacts record and for in-progress transactions to finish etc. +""" + +import threading +from contextlib import closing + +import psycopg2 +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import NeonEnv, wait_for_last_flush_lsn, wait_replica_caughtup +from fixtures.pg_version import PgVersion +from fixtures.utils import query_scalar, wait_until + +CREATE_SUBXACTS_FUNC = """ +create or replace function create_subxacts(n integer) returns void as $$ +declare + i integer; +begin + for i in 1..n loop + begin + insert into t (payload) values (0); + exception + when others then + raise exception 'caught something: %', sqlerrm; + end; + end loop; +end; $$ language plpgsql +""" + + +def test_replica_start_scan_clog(neon_simple_env: NeonEnv): + """ + Test the CLOG-scanning mechanism at hot standby startup. There is one + transaction active in the primary when the standby is started. The primary + is killed before it has a chance to write a running-xacts record. The + CLOG-scanning at neon startup allows the standby to start up anyway. + + See the module docstring for background. + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("CREATE EXTENSION neon_test_utils") + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + primary_cur.execute("select pg_switch_wal()") + + # Start a transaction in the primary. Leave the transaction open. + # + # The transaction has some subtransactions, but not too many to cause the + # CLOG-scanning mechanism to give up. + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(50)") + + # Wait for the WAL to be flushed, but then immediately kill the primary, + # before it has a chance to generate a running-xacts record. + primary_cur.execute("select neon_xlogflush()") + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + primary.stop(mode="immediate") + + # Create a replica. It should start up normally, thanks to the CLOG-scanning + # mechanism. + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + + # The transaction did not commit, so it should not be visible in the secondary + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (0,) + + +def test_replica_start_scan_clog_crashed_xids(neon_simple_env: NeonEnv): + """ + Test the CLOG-scanning mechanism at hot standby startup, after + leaving behind crashed transactions. + + See the module docstring for background. + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + primary_cur.execute("select pg_switch_wal()") + + # Consume a lot of XIDs, then kill Postgres without giving it a + # chance to write abort records for them. + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(100000)") + primary.stop(mode="immediate") + + # Restart the primary. Do some light work, and shut it down cleanly + primary.start() + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("insert into t (payload) values (0)") + primary.stop(mode="fast") + + # Create a replica. It should start up normally, thanks to the CLOG-scanning + # mechanism. (Restarting the primary writes a checkpoint and/or running-xacts + # record, which allows the standby to know that the crashed XIDs are aborted) + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (1,) + + +def test_replica_start_at_running_xacts(neon_simple_env: NeonEnv, pg_version): + """ + Test that starting a replica works right after the primary has + created a running-xacts record. This may seem like a trivial case, + but during development, we had a bug that was triggered by having + oldestActiveXid == nextXid. Starting right after a running-xacts + record is one way to test that case. + + See the module docstring for background. + """ + env = neon_simple_env + + if env.pg_version == PgVersion.V14 or env.pg_version == PgVersion.V15: + pytest.skip("pg_log_standby_snapshot() function is available only in PG16") + + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + + primary_cur.execute("CREATE EXTENSION neon_test_utils") + primary_cur.execute("select pg_log_standby_snapshot()") + primary_cur.execute("select neon_xlogflush()") + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select 123") + assert secondary_cur.fetchone() == (123,) + + +def test_replica_start_wait_subxids_finish(neon_simple_env: NeonEnv): + """ + Test replica startup when there are a lot of (sub)transactions active in the + primary. That's too many for the CLOG-scanning mechanism to handle, so the + replica has to wait for the large transaction to finish before it starts to + accept queries. + + After replica startup, test MVCC with transactions that were in-progress + when the replica was started. + + See the module docstring for background. + """ + + # Initialize the primary, a test table, and a helper function to create + # lots of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + + # Start a transaction with 100000 subtransactions, and leave it open. That's + # too many to fit in the "known-assigned XIDs array" in the replica, and + # also too many to fit in the subxid caches so the running-xacts record will + # also overflow. + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(100000)") + + # Start another, smaller transaction in the primary. We'll come back to this + # later. + primary_conn2 = primary.connect() + primary_cur2 = primary_conn2.cursor() + primary_cur2.execute("begin") + primary_cur2.execute("insert into t (payload) values (0)") + + # Create a replica. but before that, wait for the wal to be flushed to + # safekeepers, so that the replica is started at a point where the large + # transaction is already active. (The whole transaction might not be flushed + # yet, but that's OK.) + # + # Start it in a separate thread, so that we can do other stuff while it's + # blocked waiting for the startup to finish. + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + secondary = env.endpoints.new_replica(origin=primary, endpoint_id="secondary") + start_secondary_thread = threading.Thread(target=secondary.start) + start_secondary_thread.start() + + # Verify that the replica has otherwise started up, but cannot start + # accepting queries yet. + log.info("Waiting 5 s to verify that the secondary does not start") + start_secondary_thread.join(5) + assert secondary.log_contains("consistent recovery state reached") + assert secondary.log_contains("started streaming WAL from primary") + # The "redo starts" message is printed when the first WAL record is + # received. It might or might not be present in the log depending on how + # far exactly the WAL was flushed when the replica was started, and whether + # background activity caused any more WAL records to be flushed on the + # primary afterwards. + # + # assert secondary.log_contains("redo # starts") + + # should not be open for connections yet + assert start_secondary_thread.is_alive() + assert not secondary.is_running() + assert not secondary.log_contains("database system is ready to accept read-only connections") + + # Commit the large transaction in the primary. + # + # Within the next 15 s, the primary should write a new running-xacts record + # to the WAL which shows the transaction as completed. Once the replica + # replays that record, it will start accepting queries. + primary_cur.execute("commit") + start_secondary_thread.join() + + # Verify that the large transaction is correctly visible in the secondary + # (but not the second, small transaction, which is still in-progress!) + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (100000,) + + # Perform some more MVCC testing using the second transaction that was + # started in the primary before the replica was created + primary_cur2.execute("select create_subxacts(10000)") + + # The second transaction still hasn't committed + wait_replica_caughtup(primary, secondary) + secondary_cur.execute("BEGIN ISOLATION LEVEL REPEATABLE READ") + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (100000,) + + # Commit the second transaction in the primary + primary_cur2.execute("commit") + + # Should still be invisible to the old snapshot + wait_replica_caughtup(primary, secondary) + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (100000,) + + # Commit the REPEATABLE READ transaction in the replica. Both + # primary transactions should now be visible to a new snapshot. + secondary_cur.execute("commit") + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (110001,) + + +def test_replica_too_many_known_assigned_xids(neon_simple_env: NeonEnv): + """ + The CLOG-scanning mechanism fills the known-assigned XIDs array + optimistically at standby startup, betting that it can still fit + upcoming transactions replayed later from the WAL in the + array. This test tests what happens when that bet fails and the + known-assigned XID array fills up after the standby has already + been started. The WAL redo will fail with an error: + + FATAL: too many KnownAssignedXids + CONTEXT: WAL redo at 0/1895CB0 for neon/INSERT: off: 25, flags: 0x08; blkref #0: rel 1663/5/16385, blk 64 + + which causes the standby to shut down. + + See the module docstring for background. + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("CREATE EXTENSION neon_test_utils") + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + + # Determine how many connections we can use + primary_cur.execute("show max_connections") + max_connections = int(primary_cur.fetchall()[0][0]) + primary_cur.execute("show superuser_reserved_connections") + superuser_reserved_connections = int(primary_cur.fetchall()[0][0]) + n_connections = max_connections - superuser_reserved_connections + n_subxids = 200 + + # Start one top transaction in primary, with lots of subtransactions. This + # uses up much of the known-assigned XIDs space in the standby, but doesn't + # cause it to overflow. + large_p_conn = primary.connect() + large_p_cur = large_p_conn.cursor() + large_p_cur.execute("begin") + large_p_cur.execute(f"select create_subxacts({max_connections} * 30)") + + with closing(primary.connect()) as small_p_conn: + with small_p_conn.cursor() as small_p_cur: + small_p_cur.execute("select create_subxacts(1)") + + # Create a replica at this LSN + primary_cur.execute("select neon_xlogflush()") + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + + # The transaction in primary has not committed yet. + wait_replica_caughtup(primary, secondary) + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (1,) + + # Start max number of top transactions in primary, with a lot of + # subtransactions each. We add the subtransactions to each top transaction + # in a round-robin fashion, instead of adding a lot of subtransactions to + # one top transaction at a time. This way, we will have the max number of + # subtransactions in the in-memory subxid cache of each top transaction, + # until they all overflow. + # + # Currently, PGPROC_MAX_CACHED_SUBXIDS == 64, so this will overflow the all + # the subxid caches after creating 64 subxids in each top transaction. The + # point just before the caches have overflowed is the most interesting point + # in time, but we'll keep going beyond that, to ensure that this test is + # robust even if PGPROC_MAX_CACHED_SUBXIDS changes. + p_curs = [] + for _ in range(0, n_connections): + p_cur = primary.connect().cursor() + p_cur.execute("begin") + p_curs.append(p_cur) + + for _subxid in range(0, n_subxids): + for i in range(0, n_connections): + p_curs[i].execute("select create_subxacts(1)") + + # Commit all the transactions in the primary + for i in range(0, n_connections): + p_curs[i].execute("commit") + large_p_cur.execute("commit") + + # Wait until the replica crashes with "too many KnownAssignedXids" error. + def check_replica_crashed(): + try: + secondary.connect() + except psycopg2.Error: + # Once the connection fails, return success + return None + raise RuntimeError("connection succeeded") + + wait_until(20, 0.5, check_replica_crashed) + assert secondary.log_contains("too many KnownAssignedXids") + + # Replica is crashed, so ignore stop result + secondary.check_stop_result = False + + +def test_replica_start_repro_visibility_bug(neon_simple_env: NeonEnv): + """ + Before PR #7288, a hot standby in neon incorrectly started up + immediately, before it had received a running-xacts record. That + led to visibility bugs if there were active transactions in the + primary. This test reproduces the incorrect query results and + incorrectly set hint bits, before that was fixed. + """ + env = neon_simple_env + + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + p_cur = primary.connect().cursor() + + p_cur.execute("begin") + p_cur.execute("create table t(pk integer primary key, payload integer)") + p_cur.execute("insert into t values (generate_series(1,100000), 0)") + + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + wait_replica_caughtup(primary, secondary) + s_cur = secondary.connect().cursor() + + # Set hint bits for pg_class tuples. If primary's transaction is + # not marked as in-progress in MVCC snapshot, then XMIN_INVALID + # hint bit will be set for table's 't' tuple, making it invisible + # even after the commit record is replayed later. + s_cur.execute("select * from pg_class") + + p_cur.execute("commit") + wait_replica_caughtup(primary, secondary) + s_cur.execute("select * from t where pk = 1") + assert s_cur.fetchone() == (1, 0) + + +@pytest.mark.parametrize("shutdown", [True, False]) +def test_replica_start_with_prepared_xacts(neon_simple_env: NeonEnv, shutdown: bool): + """ + Test the CLOG-scanning mechanism at hot standby startup in the presence of + prepared transactions. + + This test is run in two variants: one where the primary server is shut down + before starting the secondary, or not. + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start( + branch_name="main", endpoint_id="primary", config_lines=["max_prepared_transactions=5"] + ) + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("CREATE EXTENSION neon_test_utils") + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute("create table t1(pk integer primary key)") + primary_cur.execute("create table t2(pk integer primary key)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + + # Prepare a transaction for two-phase commit + primary_cur.execute("begin") + primary_cur.execute("insert into t1 values (1)") + primary_cur.execute("prepare transaction 't1'") + + # Prepare another transaction for two-phase commit, with a subtransaction + primary_cur.execute("begin") + primary_cur.execute("insert into t2 values (2)") + primary_cur.execute("savepoint sp") + primary_cur.execute("insert into t2 values (3)") + primary_cur.execute("prepare transaction 't2'") + + # Start a transaction in the primary. Leave the transaction open. + # + # The transaction has some subtransactions, but not too many to cause the + # CLOG-scanning mechanism to give up. + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(50)") + + # Wait for the WAL to be flushed + primary_cur.execute("select neon_xlogflush()") + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + + if shutdown: + primary.stop(mode="fast") + + # Create a replica. It should start up normally, thanks to the CLOG-scanning + # mechanism. + secondary = env.endpoints.new_replica_start( + origin=primary, endpoint_id="secondary", config_lines=["max_prepared_transactions=5"] + ) + + # The transaction did not commit, so it should not be visible in the secondary + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (0,) + secondary_cur.execute("select count(*) from t1") + assert secondary_cur.fetchone() == (0,) + secondary_cur.execute("select count(*) from t2") + assert secondary_cur.fetchone() == (0,) + + if shutdown: + primary.start() + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + else: + primary_cur.execute("commit") + primary_cur.execute("commit prepared 't1'") + primary_cur.execute("commit prepared 't2'") + + wait_replica_caughtup(primary, secondary) + + secondary_cur.execute("select count(*) from t") + if shutdown: + assert secondary_cur.fetchone() == (0,) + else: + assert secondary_cur.fetchone() == (50,) + secondary_cur.execute("select * from t1") + assert secondary_cur.fetchall() == [(1,)] + secondary_cur.execute("select * from t2") + assert secondary_cur.fetchall() == [(2,), (3,)] + + +def test_replica_start_with_prepared_xacts_with_subxacts(neon_simple_env: NeonEnv): + """ + Test the CLOG-scanning mechanism at hot standby startup in the presence of + prepared transactions, with subtransactions. + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start( + branch_name="main", endpoint_id="primary", config_lines=["max_prepared_transactions=5"] + ) + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + + # Install extension containing function needed for test + primary_cur.execute("CREATE EXTENSION neon_test_utils") + + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + + # Advance nextXid close to the beginning of the next pg_subtrans segment (2^16 XIDs) + # + # This is interesting, because it tests that pg_subtrans is initialized correctly + # at standby startup. (We had a bug where it didn't at one point during development.) + while True: + xid = int(query_scalar(primary_cur, "SELECT txid_current()")) + log.info(f"xid now {xid}") + # Consume 500 transactions at a time until we get close + if xid < 65535 - 600: + primary_cur.execute("select test_consume_xids(500);") + else: + break + primary_cur.execute("checkpoint") + + # Prepare a transaction for two-phase commit + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(1000)") + primary_cur.execute("prepare transaction 't1'") + + # Wait for the WAL to be flushed, and stop the primary + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + primary.stop(mode="fast") + + # Create a replica. It should start up normally, thanks to the CLOG-scanning + # mechanism. + secondary = env.endpoints.new_replica_start( + origin=primary, endpoint_id="secondary", config_lines=["max_prepared_transactions=5"] + ) + + # The transaction did not commit, so it should not be visible in the secondary + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (0,) + + primary.start() + + # Open a lot of subtransactions in the primary, causing the subxids cache to overflow + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("select create_subxacts(100000)") + + wait_replica_caughtup(primary, secondary) + + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (100000,) + + primary_cur.execute("commit prepared 't1'") + + wait_replica_caughtup(primary, secondary) + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (101000,) + + +def test_replica_start_with_prepared_xacts_with_many_subxacts(neon_simple_env: NeonEnv): + """ + Test the CLOG-scanning mechanism at hot standby startup in the presence of + prepared transactions, with lots of subtransactions. + + Like test_replica_start_with_prepared_xacts_with_subxacts, but with more + subxacts, to test that the prepared transaction's subxids don't consume + space in the known-assigned XIDs array. (They are set in pg_subtrans + instead) + """ + + # Initialize the primary, a test table, and a helper function to create lots + # of subtransactions. + env = neon_simple_env + primary = env.endpoints.create_start( + branch_name="main", endpoint_id="primary", config_lines=["max_prepared_transactions=5"] + ) + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + + # Install extension containing function needed for test + primary_cur.execute("CREATE EXTENSION neon_test_utils") + + primary_cur.execute("create table t(pk serial primary key, payload integer)") + primary_cur.execute(CREATE_SUBXACTS_FUNC) + + # Prepare a transaction for two-phase commit, with lots of subxids + primary_cur.execute("begin") + primary_cur.execute("select create_subxacts(50000)") + + # to make things a bit more varied, intersperse a few other XIDs in between + # the prepared transaction's sub-XIDs + with primary.connect().cursor() as primary_cur2: + primary_cur2.execute("insert into t (payload) values (123)") + primary_cur2.execute("begin; insert into t (payload) values (-1); rollback") + + primary_cur.execute("select create_subxacts(50000)") + primary_cur.execute("prepare transaction 't1'") + + # Wait for the WAL to be flushed + wait_for_last_flush_lsn(env, primary, env.initial_tenant, env.initial_timeline) + + primary.stop(mode="fast") + + # Create a replica. It should start up normally, thanks to the CLOG-scanning + # mechanism. + secondary = env.endpoints.new_replica_start( + origin=primary, endpoint_id="secondary", config_lines=["max_prepared_transactions=5"] + ) + + # The transaction did not commit, so it should not be visible in the secondary + secondary_conn = secondary.connect() + secondary_cur = secondary_conn.cursor() + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (1,) + + primary.start() + + # Open a lot of subtransactions in the primary, causing the subxids cache to overflow + primary_conn = primary.connect() + primary_cur = primary_conn.cursor() + primary_cur.execute("select create_subxacts(100000)") + + wait_replica_caughtup(primary, secondary) + + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (100001,) + + primary_cur.execute("commit prepared 't1'") + + wait_replica_caughtup(primary, secondary) + secondary_cur.execute("select count(*) from t") + assert secondary_cur.fetchone() == (200001,) diff --git a/test_runner/regress/test_replication_start.py b/test_runner/regress/test_replication_start.py deleted file mode 100644 index 236074599021..000000000000 --- a/test_runner/regress/test_replication_start.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest -from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, wait_replica_caughtup - - -@pytest.mark.xfail -def test_replication_start(neon_simple_env: NeonEnv): - env = neon_simple_env - - with env.endpoints.create_start(branch_name="main", endpoint_id="primary") as primary: - with primary.connect() as p_con: - with p_con.cursor() as p_cur: - p_cur.execute("begin") - p_cur.execute("create table t(pk integer primary key, payload integer)") - p_cur.execute("insert into t values (generate_series(1,100000), 0)") - p_cur.execute("select txid_current()") - xid = p_cur.fetchall()[0][0] - log.info(f"Master transaction {xid}") - with env.endpoints.new_replica_start( - origin=primary, endpoint_id="secondary" - ) as secondary: - wait_replica_caughtup(primary, secondary) - with secondary.connect() as s_con: - with s_con.cursor() as s_cur: - # Enforce setting hint bits for pg_class tuples. - # If master's transaction is not marked as in-progress in MVCC snapshot, - # then XMIN_INVALID hint bit will be set for table's 't' tuple makeing it invisible. - s_cur.execute("select * from pg_class") - p_cur.execute("commit") - wait_replica_caughtup(primary, secondary) - s_cur.execute("select * from t where pk = 1") - assert s_cur.fetchone() == (1, 0) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 223dd925959f..ad73770c446e 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 223dd925959f8124711dd3d867dc8ba6629d52c0 +Subproject commit ad73770c446ea361f43e4f0404798b7e5e7a62d8 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index f54d7373eb0d..4874c8e52ed3 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit f54d7373eb0de5a54bce2becdb1c801026c7edff +Subproject commit 4874c8e52ed349a9f8290bbdcd91eb92677a5d24 diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index e06bebc75306..b810fdfcbb59 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit e06bebc75306b583e758b52c95946d41109239b2 +Subproject commit b810fdfcbb59afea7ea7bbe0cf94eaccb55a2ea2 diff --git a/vendor/revisions.json b/vendor/revisions.json index 574e3719340e..da49ff19c3ec 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "e06bebc75306b583e758b52c95946d41109239b2"], - "v15": ["15.7", "f54d7373eb0de5a54bce2becdb1c801026c7edff"], - "v14": ["14.12", "223dd925959f8124711dd3d867dc8ba6629d52c0"] + "v16": ["16.3", "b810fdfcbb59afea7ea7bbe0cf94eaccb55a2ea2"], + "v15": ["15.7", "4874c8e52ed349a9f8290bbdcd91eb92677a5d24"], + "v14": ["14.12", "ad73770c446ea361f43e4f0404798b7e5e7a62d8"] }