From 3e63d0f9e0532d0e49668bf41b2534ffcee0ca2a Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 10 Jun 2024 04:42:13 -0400 Subject: [PATCH 01/41] test(pageserver): quantify compaction outcome (#7867) A simple API to collect some statistics after compaction to easily understand the result. The tool reads the layer map, and analyze range by range instead of doing single-key operations, which is more efficient than doing a benchmark to collect the result. It currently computes two key metrics: * Latest data access efficiency, which finds how many delta layers / image layers the system needs to iterate before returning any key in a key range. * (Approximate) PiTR efficiency, as in https://github.com/neondatabase/neon/issues/7770, which is simply the number of delta files in the range. The reason behind that is, assume no image layer is created, PiTR efficiency is simply the cost of collect records from the delta layers, and the replay time. Number of delta files (or in the future, estimated size of reads) is a simple yet efficient way of estimating how much effort the page server needs to reconstruct a page. Signed-off-by: Alex Chi Z --- pageserver/src/http/routes.rs | 23 +++++ pageserver/src/tenant/timeline.rs | 1 + pageserver/src/tenant/timeline/analysis.rs | 90 +++++++++++++++++++ .../src/tenant/timeline/layer_manager.rs | 5 ++ test_runner/fixtures/pageserver/http.py | 15 ++++ test_runner/performance/test_gc_feedback.py | 17 ++++ 6 files changed, 151 insertions(+) create mode 100644 pageserver/src/tenant/timeline/analysis.rs diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 19bc88fbc7cf..12d02c52fedd 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -2429,6 +2429,25 @@ async fn list_aux_files( json_response(StatusCode::OK, files) } +async fn perf_info( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let state = get_state(&request); + + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + + let result = timeline.perf_info().await; + + json_response(StatusCode::OK, result) +} + async fn ingest_aux_files( mut request: Request, _cancel: CancellationToken, @@ -2856,5 +2875,9 @@ pub fn make_router( |r| testing_api_handler("list_aux_files", r, list_aux_files), ) .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants)) + .post( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info", + |r| testing_api_handler("perf_info", r, perf_info), + ) .any(handler_404)) } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 32cf7be0f7fc..388d5b9d5426 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1,3 +1,4 @@ +pub(crate) mod analysis; mod compaction; pub mod delete; pub(crate) mod detach_ancestor; diff --git a/pageserver/src/tenant/timeline/analysis.rs b/pageserver/src/tenant/timeline/analysis.rs new file mode 100644 index 000000000000..cd61418f3d31 --- /dev/null +++ b/pageserver/src/tenant/timeline/analysis.rs @@ -0,0 +1,90 @@ +use std::{collections::BTreeSet, ops::Range}; + +use utils::lsn::Lsn; + +use super::Timeline; + +#[derive(serde::Serialize)] +pub(crate) struct RangeAnalysis { + start: String, + end: String, + has_image: bool, + num_of_deltas_above_image: usize, + total_num_of_deltas: usize, +} + +impl Timeline { + pub(crate) async fn perf_info(&self) -> Vec { + // First, collect all split points of the layers. + let mut split_points = BTreeSet::new(); + let mut delta_ranges = Vec::new(); + let mut image_ranges = Vec::new(); + + let all_layer_files = { + let guard = self.layers.read().await; + guard.all_persistent_layers() + }; + let lsn = self.get_last_record_lsn(); + + for key in all_layer_files { + split_points.insert(key.key_range.start); + split_points.insert(key.key_range.end); + if key.is_delta { + delta_ranges.push((key.key_range.clone(), key.lsn_range.clone())); + } else { + image_ranges.push((key.key_range.clone(), key.lsn_range.start)); + } + } + + // For each split range, compute the estimated read amplification. + let split_points = split_points.into_iter().collect::>(); + + let mut result = Vec::new(); + + for i in 0..(split_points.len() - 1) { + let start = split_points[i]; + let end = split_points[i + 1]; + // Find the latest image layer that contains the information. + let mut maybe_image_layers = image_ranges + .iter() + // We insert split points for all image layers, and therefore a `contains` check for the start point should be enough. + .filter(|(key_range, img_lsn)| key_range.contains(&start) && img_lsn <= &lsn) + .cloned() + .collect::>(); + maybe_image_layers.sort_by(|a, b| a.1.cmp(&b.1)); + let image_layer = maybe_image_layers.last().cloned(); + let lsn_filter_start = image_layer + .as_ref() + .map(|(_, lsn)| *lsn) + .unwrap_or(Lsn::INVALID); + + fn overlaps_with(lsn_range_a: &Range, lsn_range_b: &Range) -> bool { + !(lsn_range_a.end <= lsn_range_b.start || lsn_range_a.start >= lsn_range_b.end) + } + + let maybe_delta_layers = delta_ranges + .iter() + .filter(|(key_range, lsn_range)| { + key_range.contains(&start) && overlaps_with(&(lsn_filter_start..lsn), lsn_range) + }) + .cloned() + .collect::>(); + + let pitr_delta_layers = delta_ranges + .iter() + .filter(|(key_range, _)| key_range.contains(&start)) + .cloned() + .collect::>(); + + result.push(RangeAnalysis { + start: start.to_string(), + end: end.to_string(), + has_image: image_layer.is_some(), + num_of_deltas_above_image: maybe_delta_layers.len(), + total_num_of_deltas: pitr_delta_layers.len(), + }); + } + + result + } +} diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index b78c98a506d8..0e82dedecb18 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -1,4 +1,5 @@ use anyhow::{bail, ensure, Context, Result}; +use itertools::Itertools; use pageserver_api::shard::TenantShardId; use std::{collections::HashMap, sync::Arc}; use tracing::trace; @@ -308,6 +309,10 @@ impl LayerManager { pub(crate) fn contains(&self, layer: &Layer) -> bool { self.layer_fmgr.contains(layer) } + + pub(crate) fn all_persistent_layers(&self) -> Vec { + self.layer_fmgr.0.keys().cloned().collect_vec() + } } pub(crate) struct LayerFileManager(HashMap); diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 08bf66058a68..d5441bd69464 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -923,3 +923,18 @@ def top_tenants( ) self.verbose_error(res) return res.json() # type: ignore + + def perf_info( + self, + tenant_id: Union[TenantId, TenantShardId], + timeline_id: TimelineId, + ): + self.is_testing_enabled_or_skip() + + log.info(f"Requesting perf info: tenant {tenant_id}, timeline {timeline_id}") + res = self.post( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/perf_info", + ) + log.info(f"Got perf info response code: {res.status_code}") + self.verbose_error(res) + return res.json() diff --git a/test_runner/performance/test_gc_feedback.py b/test_runner/performance/test_gc_feedback.py index be56203b26b8..9a03994b2925 100644 --- a/test_runner/performance/test_gc_feedback.py +++ b/test_runner/performance/test_gc_feedback.py @@ -75,12 +75,29 @@ def test_gc_feedback(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma physical_size = client.timeline_detail(tenant_id, timeline_id)["current_physical_size"] log.info(f"Physical storage size {physical_size}") + max_num_of_deltas_above_image = 0 + max_total_num_of_deltas = 0 + for key_range in client.perf_info(tenant_id, timeline_id): + max_total_num_of_deltas = max(max_total_num_of_deltas, key_range["total_num_of_deltas"]) + max_num_of_deltas_above_image = max( + max_num_of_deltas_above_image, key_range["num_of_deltas_above_image"] + ) + MB = 1024 * 1024 zenbenchmark.record("logical_size", logical_size // MB, "Mb", MetricReport.LOWER_IS_BETTER) zenbenchmark.record("physical_size", physical_size // MB, "Mb", MetricReport.LOWER_IS_BETTER) zenbenchmark.record( "physical/logical ratio", physical_size / logical_size, "", MetricReport.LOWER_IS_BETTER ) + zenbenchmark.record( + "max_total_num_of_deltas", max_total_num_of_deltas, "", MetricReport.LOWER_IS_BETTER + ) + zenbenchmark.record( + "max_num_of_deltas_above_image", + max_num_of_deltas_above_image, + "", + MetricReport.LOWER_IS_BETTER, + ) layer_map_path = env.repo_dir / "layer-map.json" log.info(f"Writing layer map to {layer_map_path}") From ae5badd375e284ed6098503c3e4ead09995b902f Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Mon, 10 Jun 2024 13:20:20 +0200 Subject: [PATCH 02/41] Revert "Include openssl and ICU statically linked" (#8003) Reverts neondatabase/neon#7956 Rationale: compute incompatibilties Slack thread: https://neondb.slack.com/archives/C033RQ5SPDH/p1718011276665839?thread_ts=1718008160.431869&cid=C033RQ5SPDH Relevant quotes from @hlinnaka > If we go through with the current release candidate, but the compute is pinned, people who create new projects will get that warning, which is silly. To them, it looks like the ICU version was downgraded, because initdb was run with newer version. > We should upgrade the ICU version eventually. And when we do that, users with old projects that use ICU will start to see that warning. I think that's acceptable, as long as we do homework, notify users, and communicate that properly. > When do that, we should to try to upgrade the storage and compute versions at roughly the same time. --- .github/workflows/build_and_test.yml | 6 +++--- Dockerfile | 2 ++ Dockerfile.build-tools | 32 ---------------------------- Makefile | 15 +------------ 4 files changed, 6 insertions(+), 49 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1fc0fbb0b60d..b9caf7606020 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -299,21 +299,21 @@ jobs: uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' diff --git a/Dockerfile b/Dockerfile index b4900d4a94a1..5f82df3e1811 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,6 +69,8 @@ RUN set -e \ && apt install -y \ libreadline-dev \ libseccomp-dev \ + libicu67 \ + openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 91194eda1a84..460b8c996d0a 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -112,35 +112,6 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && make install \ && rm -rf ../lcov.tar.gz -# Compile and install the static OpenSSL library -ENV OPENSSL_VERSION=3.2.2 -ENV OPENSSL_PREFIX=/usr/local/openssl -RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ - echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ - cd /tmp && \ - tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ - rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ - cd /tmp/openssl-${OPENSSL_VERSION} && \ - ./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \ - make ${MAKE_ARGS} && \ - make install && \ - cd /tmp && \ - rm -rf /tmp/openssl-${OPENSSL_VERSION} - -# Set the ICU version -ENV ICU_VERSION=72.1 -ENV ICU_PREFIX=/usr/local/icu - -# Download and build static ICU -RUN wget https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \ - tar -xzf icu4c-${ICU_VERSION//./_}-src.tgz && \ - cd icu/source && \ - ./configure --prefix=${ICU_PREFIX} --enable-static --enable-shared=no CXXFLAGS="-fPIC" CFLAGS="-fPIC" && \ - make && \ - make install && \ - cd ../.. && \ - rm -rf icu icu4c-${ICU_VERSION//./_}-src.tgz - # Switch to nonroot user USER nonroot:nonroot WORKDIR /home/nonroot @@ -199,6 +170,3 @@ RUN whoami \ && rustup --version --verbose \ && rustc --version --verbose \ && clang --version - -# Set following flag to check in Makefile if its running in Docker -RUN touch /home/nonroot/.docker_build diff --git a/Makefile b/Makefile index b5f426344ef7..dcbfdbcbc15a 100644 --- a/Makefile +++ b/Makefile @@ -3,9 +3,6 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package managers POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ -OPENSSL_PREFIX_DIR := /usr/local/openssl -ICU_PREFIX_DIR := /usr/local/icu - # # We differentiate between release / debug build types using the BUILD_TYPE # environment variable. @@ -23,16 +20,6 @@ else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif -ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes) - # Exclude static build openssl, icu for local build (MacOS, Linux) - # Only keep for build type release and debug - PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include - PG_CONFIGURE_OPTS += --with-icu - PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION' - PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm' - PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread' -endif - UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) # Seccomp BPF is only available for Linux @@ -41,7 +28,7 @@ else ifeq ($(UNAME_S),Darwin) ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable - OPENSSL_PREFIX := $(shell brew --prefix openssl@3) + OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure From 5a7e285c2c98d0ae15c6e2d7059881bf52a23027 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 10 Jun 2024 15:52:49 +0300 Subject: [PATCH 03/41] Simplify scanning compute logs in tests (#7997) Implement LogUtils in the Endpoint fixture class, so that the "log_contains" function can be used on compute logs too. Per discussion at: https://github.com/neondatabase/neon/pull/7288#discussion_r1623633803 --- test_runner/fixtures/neon_fixtures.py | 3 ++- test_runner/regress/test_hot_standby.py | 21 +++++---------------- test_runner/regress/test_migrations.py | 10 ++-------- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index a25b8bfca120..6fdad2188ccb 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3386,7 +3386,7 @@ def static_proxy( yield proxy -class Endpoint(PgProtocol): +class Endpoint(PgProtocol, LogUtils): """An object representing a Postgres compute endpoint managed by the control plane.""" def __init__( @@ -3452,6 +3452,7 @@ def create( ) path = Path("endpoints") / self.endpoint_id / "pgdata" self.pgdata_dir = os.path.join(self.env.repo_dir, path) + self.logfile = self.endpoint_path() / "compute.log" config_lines = config_lines or [] diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index cf7a1c56ee12..1d1b2fb48564 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -1,6 +1,5 @@ import asyncio import os -import re import threading import time from functools import partial @@ -18,20 +17,6 @@ from fixtures.utils import wait_until -# Check for corrupted WAL messages which might otherwise go unnoticed if -# reconnection fixes this. -def scan_standby_log_for_errors(secondary): - log_path = secondary.endpoint_path() / "compute.log" - with log_path.open("r") as f: - markers = re.compile( - r"incorrect resource manager data|record with incorrect|invalid magic number|unexpected pageaddr" - ) - for line in f: - if markers.search(line): - log.info(f"bad error in standby log: {line}") - raise AssertionError() - - def test_hot_standby(neon_simple_env: NeonEnv): env = neon_simple_env @@ -91,7 +76,11 @@ def test_hot_standby(neon_simple_env: NeonEnv): assert response is not None assert response == responses[query] - scan_standby_log_for_errors(secondary) + # Check for corrupted WAL messages which might otherwise go unnoticed if + # reconnection fixes this. + assert not secondary.log_contains( + "incorrect resource manager data|record with incorrect|invalid magic number|unexpected pageaddr" + ) # clean up if slow_down_send: diff --git a/test_runner/regress/test_migrations.py b/test_runner/regress/test_migrations.py index 526ae14b8715..5637f160cfcb 100644 --- a/test_runner/regress/test_migrations.py +++ b/test_runner/regress/test_migrations.py @@ -8,8 +8,6 @@ def test_migrations(neon_simple_env: NeonEnv): env.neon_cli.create_branch("test_migrations", "empty") endpoint = env.endpoints.create("test_migrations") - log_path = endpoint.endpoint_path() / "compute.log" - endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() @@ -22,9 +20,7 @@ def test_migrations(neon_simple_env: NeonEnv): migration_id = cur.fetchall() assert migration_id[0][0] == num_migrations - with open(log_path, "r") as log_file: - logs = log_file.read() - assert f"INFO handle_migrations: Ran {num_migrations} migrations" in logs + endpoint.assert_log_contains(f"INFO handle_migrations: Ran {num_migrations} migrations") endpoint.stop() endpoint.start() @@ -36,6 +32,4 @@ def test_migrations(neon_simple_env: NeonEnv): migration_id = cur.fetchall() assert migration_id[0][0] == num_migrations - with open(log_path, "r") as log_file: - logs = log_file.read() - assert "INFO handle_migrations: Ran 0 migrations" in logs + endpoint.assert_log_contains("INFO handle_migrations: Ran 0 migrations") From b52e31c1a42e186d578975cce632bf244c5f2957 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 10 Jun 2024 16:50:17 +0300 Subject: [PATCH 04/41] fix: allow layer flushes more often (#7927) As seen with the pgvector 0.7.0 index builds, we can receive large batches of images, leading to very large L0 layers in the range of 1GB. These large layers are produced because we are only able to roll the layer after we have witnessed two different Lsns in a single `DataDirModification::commit`. As the single Lsn batches of images can span over multiple `DataDirModification` lifespans, we will rarely get to write two different Lsns in a single `put_batch` currently. The solution is to remember the TimelineWriterState instead of eagerly forgetting it until we really open the next layer or someone else flushes (while holding the write_guard). Additional changes are test fixes to avoid "initdb image layer optimization" or ignoring initdb layers for assertion. Cc: #7197 because small `checkpoint_distance` will now trigger the "initdb image layer optimization" --- .../tenant/storage_layer/inmemory_layer.rs | 2 +- pageserver/src/tenant/timeline.rs | 40 +++-- test_runner/fixtures/pageserver/utils.py | 2 +- test_runner/fixtures/utils.py | 17 ++ .../regress/test_disk_usage_eviction.py | 22 +-- .../regress/test_ingestion_layer_size.py | 151 ++++++++++++++++++ .../regress/test_pageserver_secondary.py | 1 + test_runner/regress/test_s3_restore.py | 18 ++- test_runner/regress/test_sharding.py | 29 +++- 9 files changed, 245 insertions(+), 37 deletions(-) create mode 100644 test_runner/regress/test_ingestion_layer_size.py diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 9553f8302678..1ecc56ce993f 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -52,7 +52,7 @@ pub struct InMemoryLayer { /// Frozen layers have an exclusive end LSN. /// Writes are only allowed when this is `None`. - end_lsn: OnceLock, + pub(crate) end_lsn: OnceLock, /// Used for traversal path. Cached representation of the in-memory layer before frozen. local_path_str: Arc, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 388d5b9d5426..58bdd8490665 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -322,6 +322,8 @@ pub struct Timeline { /// Locked automatically by [`TimelineWriter`] and checkpointer. /// Must always be acquired before the layer map/individual layer lock /// to avoid deadlock. + /// + /// The state is cleared upon freezing. write_lock: tokio::sync::Mutex>, /// Used to avoid multiple `flush_loop` tasks running @@ -1578,7 +1580,7 @@ impl Timeline { // an ephemeral layer open forever when idle. It also freezes layers if the global limit on // ephemeral layer bytes has been breached. pub(super) async fn maybe_freeze_ephemeral_layer(&self) { - let Ok(_write_guard) = self.write_lock.try_lock() else { + let Ok(mut write_guard) = self.write_lock.try_lock() else { // If the write lock is held, there is an active wal receiver: rolling open layers // is their responsibility while they hold this lock. return; @@ -1672,6 +1674,7 @@ impl Timeline { .await; } } + write_guard.take(); self.flush_frozen_layers(); } } @@ -2036,11 +2039,11 @@ impl Timeline { true } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() { info!( - "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})", - projected_lsn, - layer_size, - opened_at.elapsed() - ); + "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})", + projected_lsn, + layer_size, + opened_at.elapsed() + ); true } else { @@ -3653,7 +3656,10 @@ impl Timeline { let _write_guard = if write_lock_held { None } else { - Some(self.write_lock.lock().await) + let mut g = self.write_lock.lock().await; + // remove the reference to an open layer + g.take(); + Some(g) }; let to_lsn = self.get_last_record_lsn(); @@ -5541,6 +5547,9 @@ impl Timeline { type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); +/// Tracking writes ingestion does to a particular in-memory layer. +/// +/// Cleared upon freezing a layer. struct TimelineWriterState { open_layer: Arc, current_size: u64, @@ -5581,12 +5590,6 @@ impl Deref for TimelineWriter<'_> { } } -impl Drop for TimelineWriter<'_> { - fn drop(&mut self) { - self.write_guard.take(); - } -} - #[derive(PartialEq)] enum OpenLayerAction { Roll, @@ -5692,6 +5695,17 @@ impl<'a> TimelineWriter<'a> { return OpenLayerAction::Open; }; + if state.cached_last_freeze_at < self.tl.last_freeze_at.load() { + // TODO(#7993): branch is needed before refactoring the many places of freezing for the + // possibility `state` having a "dangling" reference to an already frozen in-memory + // layer. + assert!( + state.open_layer.end_lsn.get().is_some(), + "our open_layer must be outdated" + ); + return OpenLayerAction::Open; + } + if state.prev_lsn == Some(lsn) { // Rolling mid LSN is not supported by downstream code. // Hence, only roll at LSN boundaries. diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 91435e8a1f67..72384c138b54 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -313,7 +313,7 @@ def assert_prefix_empty( # https://neon-github-public-dev.s3.amazonaws.com/reports/pr-5322/6207777020/index.html#suites/3556ed71f2d69272a7014df6dcb02317/53b5c368b5a68865 # this seems like a mock_s3 issue log.warning( - f"contrading ListObjectsV2 response with KeyCount={keys} and Contents={objects}, CommonPrefixes={common_prefixes}, assuming this means KeyCount=0" + f"contradicting ListObjectsV2 response with KeyCount={keys} and Contents={objects}, CommonPrefixes={common_prefixes}, assuming this means KeyCount=0" ) keys = 0 elif keys != 0 and len(objects) == 0: diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index b55329e054ce..0989dc1893a7 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -582,3 +582,20 @@ def join(self, timeout=None): if self.exc: raise self.exc return self.ret + + +def human_bytes(amt: float) -> str: + """ + Render a bytes amount into nice IEC bytes string. + """ + + suffixes = ["", "Ki", "Mi", "Gi"] + + last = suffixes[-1] + + for name in suffixes: + if amt < 1024 or name == last: + return f"{int(round(amt))} {name}B" + amt = amt / 1024 + + raise RuntimeError("unreachable") diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index 7ae2352c0623..7722828c79dd 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -17,7 +17,7 @@ from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_upload_queue_empty from fixtures.remote_storage import RemoteStorageKind -from fixtures.utils import wait_until +from fixtures.utils import human_bytes, wait_until GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy" @@ -218,19 +218,6 @@ def count_layers_per_tenant( return dict(ret) -def human_bytes(amt: float) -> str: - suffixes = ["", "Ki", "Mi", "Gi"] - - last = suffixes[-1] - - for name in suffixes: - if amt < 1024 or name == last: - return f"{int(round(amt))} {name}B" - amt = amt / 1024 - - raise RuntimeError("unreachable") - - def _eviction_env( request, neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, num_pageservers: int ) -> EvictionEnv: @@ -294,7 +281,7 @@ def pgbench_init_tenant( "gc_period": "0s", "compaction_period": "0s", "checkpoint_distance": f"{layer_size}", - "image_creation_threshold": "100", + "image_creation_threshold": "999999", "compaction_target_size": f"{layer_size}", } ) @@ -668,11 +655,10 @@ def test_fast_growing_tenant(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, or finish_tenant_creation(env, tenant_id, timeline_id, min_expected_layers) tenant_layers = count_layers_per_tenant(env.pageserver, map(lambda x: x[0], timelines)) - (total_on_disk, _, _) = poor_mans_du(env, map(lambda x: x[0], timelines), env.pageserver, False) + (total_on_disk, _, _) = poor_mans_du(env, map(lambda x: x[0], timelines), env.pageserver, True) - # cut 10 percent response = env.pageserver.http_client().disk_usage_eviction_run( - {"evict_bytes": total_on_disk // 10, "eviction_order": order.config()} + {"evict_bytes": total_on_disk // 5, "eviction_order": order.config()} ) log.info(f"{response}") diff --git a/test_runner/regress/test_ingestion_layer_size.py b/test_runner/regress/test_ingestion_layer_size.py new file mode 100644 index 000000000000..44c77b341060 --- /dev/null +++ b/test_runner/regress/test_ingestion_layer_size.py @@ -0,0 +1,151 @@ +from dataclasses import dataclass +from typing import Iterable, List, Union + +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn +from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo +from fixtures.utils import human_bytes + + +def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder, build_type: str): + """ + Build a non-small GIN index which includes similarly batched up images in WAL stream as does pgvector + to show that we no longer create oversized layers. + """ + + if build_type == "debug": + pytest.skip("debug run is unnecessarily slow") + + minimum_initdb_size = 20 * 1024**2 + checkpoint_distance = 32 * 1024**2 + minimum_good_layer_size = checkpoint_distance * 0.9 + minimum_too_large_layer_size = 2 * checkpoint_distance + + # index size: 99MiB + rows = 2_500_000 + + # bucket lower limits + buckets = [0, minimum_initdb_size, minimum_good_layer_size, minimum_too_large_layer_size] + + assert ( + minimum_initdb_size < minimum_good_layer_size + ), "keep checkpoint_distance higher than the initdb size (find it by experimenting)" + + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_distance": f"{checkpoint_distance}", + "compaction_target_size": f"{checkpoint_distance}", + # this test is primarly interested in L0 sizes but we'll compact after ingestion to ensure sizes are good even then + "compaction_period": "0s", + "gc_period": "0s", + "compaction_threshold": "255", + "image_creation_threshold": "99999", + } + ) + + # build a larger than 3*checkpoint_distance sized gin index. + # gin index building exhibits the same behaviour as the pgvector with the two phase build + with env.endpoints.create_start("main") as ep, ep.cursor() as cur: + cur.execute( + f"create table int_array_test as select array_agg(g) as int_array from generate_series(1, {rows}) g group by g / 10;" + ) + cur.execute( + "create index int_array_test_gin_index on int_array_test using gin (int_array);" + ) + cur.execute("select pg_table_size('int_array_test_gin_index')") + size = cur.fetchone() + assert size is not None + assert isinstance(size[0], int) + log.info(f"gin index size: {human_bytes(size[0])}") + assert ( + size[0] > checkpoint_distance * 3 + ), f"gin index is not large enough: {human_bytes(size[0])}" + wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline) + + ps_http = env.pageserver.http_client() + ps_http.timeline_checkpoint(env.initial_tenant, env.initial_timeline) + + infos = ps_http.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(infos.in_memory_layers) == 0, "should had flushed open layers" + post_ingest = histogram_historic_layers(infos, buckets) + + # describe first, assert later for easier debugging + log.info("non-cumulative layer size distribution after ingestion:") + print_layer_size_histogram(post_ingest) + + # since all we have are L0s, we should be getting nice L1s and images out of them now + ps_http.patch_tenant_config_client_side( + env.initial_tenant, + { + "compaction_threshold": 1, + "image_creation_threshold": 1, + }, + ) + + ps_http.timeline_compact(env.initial_tenant, env.initial_timeline, True, True) + + infos = ps_http.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(infos.in_memory_layers) == 0, "no new inmem layers expected" + post_compact = histogram_historic_layers(infos, buckets) + + log.info("non-cumulative layer size distribution after compaction:") + print_layer_size_histogram(post_compact) + + assert ( + post_ingest.counts[3] == 0 + ), f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2*checkpoint_distance)})" + assert post_ingest.counts[1] == 1, "expect one smaller layer for initdb" + assert ( + post_ingest.counts[0] <= 1 + ), "expect at most one tiny layer from shutting down the endpoint" + + # just make sure we don't have trouble splitting the layers apart + assert post_compact.counts[3] == 0 + + +@dataclass +class Histogram: + buckets: List[Union[int, float]] + counts: List[int] + sums: List[int] + + +def histogram_historic_layers( + infos: LayerMapInfo, minimum_sizes: List[Union[int, float]] +) -> Histogram: + def log_layer(layer: HistoricLayerInfo) -> HistoricLayerInfo: + log.info( + f"{layer.layer_file_name} {human_bytes(layer.layer_file_size)} ({layer.layer_file_size} bytes)" + ) + return layer + + layers = map(log_layer, infos.historic_layers) + sizes = (x.layer_file_size for x in layers) + return histogram(sizes, minimum_sizes) + + +def histogram(sizes: Iterable[int], minimum_sizes: List[Union[int, float]]) -> Histogram: + assert all(minimum_sizes[i] < minimum_sizes[i + 1] for i in range(len(minimum_sizes) - 1)) + buckets = list(enumerate(minimum_sizes)) + counts = [0 for _ in buckets] + sums = [0 for _ in buckets] + + for size in sizes: + found = False + for index, min_size in reversed(buckets): + if size >= min_size: + counts[index] += 1 + sums[index] += size + found = True + break + assert found + + return Histogram(minimum_sizes, counts, sums) + + +def print_layer_size_histogram(h: Histogram): + for index, min_size in enumerate(h.buckets): + log.info( + f">= {human_bytes(min_size)}: {h.counts[index]} layers total {human_bytes(h.sums[index])}" + ) diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 5bfa9cce8c67..757ea60882a9 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -563,6 +563,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): ) ), ) + workload.stop() def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 7fdabaaec730..6383d24c57ae 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -2,6 +2,7 @@ from datetime import datetime, timezone from fixtures.common_types import Lsn +from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, @@ -32,7 +33,12 @@ def test_tenant_s3_restore( assert remote_storage, "remote storage not configured" enable_remote_storage_versioning(remote_storage) - env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) + # change it back after initdb, recovery doesn't work if the two + # index_part.json uploads happen at same second or too close to each other. + initial_tenant_conf = MANY_SMALL_LAYERS_TENANT_CONFIG + del initial_tenant_conf["checkpoint_distance"] + + env = neon_env_builder.init_start(initial_tenant_conf) env.pageserver.allowed_errors.extend( [ # The deletion queue will complain when it encounters simulated S3 errors @@ -43,14 +49,16 @@ def test_tenant_s3_restore( ) ps_http = env.pageserver.http_client() - tenant_id = env.initial_tenant + # now lets create the small layers + ps_http.set_tenant_config(tenant_id, MANY_SMALL_LAYERS_TENANT_CONFIG) + # Default tenant and the one we created assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 # create two timelines one being the parent of another, both with non-trivial data - parent = None + parent = "main" last_flush_lsns = [] for timeline in ["first", "second"]: @@ -64,6 +72,7 @@ def test_tenant_s3_restore( last_flush_lsns.append(last_flush_lsn) ps_http.timeline_checkpoint(tenant_id, timeline_id) wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn) + log.info(f"{timeline} timeline {timeline_id} {last_flush_lsn=}") parent = timeline # These sleeps are important because they fend off differences in clocks between us and S3 @@ -108,6 +117,9 @@ def test_tenant_s3_restore( ps_http.tenant_attach(tenant_id, generation=generation) env.pageserver.quiesce_tenants() + for tline in ps_http.timeline_list(env.initial_tenant): + log.info(f"timeline detail: {tline}") + for i, timeline in enumerate(["first", "second"]): with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint: endpoint.safe_psql(f"SELECT * FROM created_{timeline};") diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 545ba05b172c..1996e995574c 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -697,6 +697,9 @@ def test_sharding_ingest_layer_sizes( # small checkpointing and compaction targets to ensure we generate many upload operations "checkpoint_distance": f"{expect_layer_size}", "compaction_target_size": f"{expect_layer_size}", + # aim to reduce flakyness, we are not doing explicit checkpointing + "compaction_period": "0s", + "gc_period": "0s", } shard_count = 4 neon_env_builder.num_pageservers = shard_count @@ -712,6 +715,23 @@ def test_sharding_ingest_layer_sizes( tenant_id = env.initial_tenant timeline_id = env.initial_timeline + # ignore the initdb layer(s) for the purposes of the size comparison as a initdb image layer optimization + # will produce a lot more smaller layers. + initial_layers_per_shard = {} + log.info("initdb distribution (not asserted on):") + for shard in env.storage_controller.locate(tenant_id): + pageserver = env.get_pageserver(shard["node_id"]) + shard_id = shard["shard_id"] + layers = ( + env.get_pageserver(shard["node_id"]).http_client().layer_map_info(shard_id, timeline_id) + ) + for layer in layers.historic_layers: + log.info( + f"layer[{pageserver.id}]: {layer.layer_file_name} (size {layer.layer_file_size})" + ) + + initial_layers_per_shard[shard_id] = set(layers.historic_layers) + workload = Workload(env, tenant_id, timeline_id) workload.init() workload.write_rows(4096, upload=False) @@ -733,7 +753,13 @@ def test_sharding_ingest_layer_sizes( historic_layers = sorted(layer_map.historic_layers, key=lambda layer: layer.lsn_start) + initial_layers = initial_layers_per_shard[shard_id] + for layer in historic_layers: + if layer in initial_layers: + # ignore the initdb image layers for the size histogram + continue + if layer.layer_file_size < expect_layer_size // 2: classification = "Small" small_layer_count += 1 @@ -763,7 +789,8 @@ def test_sharding_ingest_layer_sizes( pass else: # General case: - assert float(small_layer_count) / float(ok_layer_count) < 0.25 + # old limit was 0.25 but pg14 is right at the limit with 7/28 + assert float(small_layer_count) / float(ok_layer_count) < 0.3 # Each shard may emit up to one huge layer, because initdb ingest doesn't respect checkpoint_distance. assert huge_layer_count <= shard_count From a8ca7a1a1d88c6cff476eaf55c9f38c46dbfc645 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 10 Jun 2024 12:08:16 -0400 Subject: [PATCH 05/41] docs: highlight neon env comes with an initial timeline (#7995) Quite a few existing test cases create their own timelines instead of using the default one. This pull request highlights that and hopefully people can write simpler tests in the future. Signed-off-by: Alex Chi Z Co-authored-by: Yuchen Liang <70461588+yliang412@users.noreply.github.com> --- test_runner/README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test_runner/README.md b/test_runner/README.md index fd68cfff79bd..7d95634ea816 100644 --- a/test_runner/README.md +++ b/test_runner/README.md @@ -285,6 +285,21 @@ def test_foobar(neon_env_builder: NeonEnvBuilder): ... ``` +The env includes a default tenant and timeline. Therefore, you do not need to create your own +tenant/timeline for testing. + +```python +def test_foobar2(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_start() # Start the environment + with env.endpoints.create_start("main") as endpoint: + # Start the compute endpoint + client = env.pageserver.http_client() # Get the pageserver client + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id) +``` + For more information about pytest fixtures, see https://docs.pytest.org/en/stable/fixture.html At the end of a test, all the nodes in the environment are automatically stopped, so you From e46692788e9d3e2b010b156c034af0c95a13a2a8 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 10 Jun 2024 19:34:34 +0300 Subject: [PATCH 06/41] refactor: Timeline layer flushing (#7993) The new features have deteriorated layer flushing, most recently with #7927. Changes: - inline `Timeline::freeze_inmem_layer` to the only caller - carry the TimelineWriterState guard to the actual point of freezing the layer - this allows us to `#[cfg(feature = "testing")]` the assertion added in #7927 - remove duplicate `flush_frozen_layer` in favor of splitting the `flush_frozen_layers_and_wait` - this requires starting the flush loop earlier for `checkpoint_distance < initdb size` tests --- pageserver/src/tenant.rs | 12 +- pageserver/src/tenant/timeline.rs | 136 ++++++++++-------- .../src/tenant/timeline/layer_manager.rs | 30 +++- 3 files changed, 104 insertions(+), 74 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 60cd5c969535..2e3ce45c2bc7 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3395,6 +3395,12 @@ impl Tenant { let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id; let unfinished_timeline = raw_timeline.raw_timeline()?; + // Flush the new layer files to disk, before we make the timeline as available to + // the outside world. + // + // Flush loop needs to be spawned in order to be able to flush. + unfinished_timeline.maybe_spawn_flush_loop(); + import_datadir::import_timeline_from_postgres_datadir( unfinished_timeline, &pgdata_path, @@ -3406,12 +3412,6 @@ impl Tenant { format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}") })?; - // Flush the new layer files to disk, before we make the timeline as available to - // the outside world. - // - // Flush loop needs to be spawned in order to be able to flush. - unfinished_timeline.maybe_spawn_flush_loop(); - fail::fail_point!("before-checkpoint-new-timeline", |_| { anyhow::bail!("failpoint before-checkpoint-new-timeline"); }); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 58bdd8490665..6da0f9d91c25 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1571,7 +1571,15 @@ impl Timeline { // This exists to provide a non-span creating version of `freeze_and_flush` we can call without // polluting the span hierarchy. pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> { - let to_lsn = self.freeze_inmem_layer(false).await; + let to_lsn = { + // Freeze the current open in-memory layer. It will be written to disk on next + // iteration. + let mut g = self.write_lock.lock().await; + + let to_lsn = self.get_last_record_lsn(); + self.freeze_inmem_layer_at(to_lsn, &mut g).await; + to_lsn + }; self.flush_frozen_layers_and_wait(to_lsn).await } @@ -1657,25 +1665,35 @@ impl Timeline { self.last_freeze_at.load(), open_layer.get_opened_at(), ) { - match open_layer.info() { + let at_lsn = match open_layer.info() { InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => { // We may reach this point if the layer was already frozen by not yet flushed: flushing // happens asynchronously in the background. tracing::debug!( "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})" ); + None } InMemoryLayerInfo::Open { .. } => { // Upgrade to a write lock and freeze the layer drop(layers_guard); let mut layers_guard = self.layers.write().await; - layers_guard - .try_freeze_in_memory_layer(current_lsn, &self.last_freeze_at) + let froze = layers_guard + .try_freeze_in_memory_layer( + current_lsn, + &self.last_freeze_at, + &mut write_guard, + ) .await; + Some(current_lsn).filter(|_| froze) + } + }; + if let Some(lsn) = at_lsn { + let res: Result = self.flush_frozen_layers(lsn); + if let Err(e) = res { + tracing::info!("failed to flush frozen layer after background freeze: {e:#}"); } } - write_guard.take(); - self.flush_frozen_layers(); } } @@ -2384,7 +2402,7 @@ impl Timeline { let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error); self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await; let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap(); - assert!(matches!(*flush_loop_state, FlushLoopState::Running{ ..})); + assert!(matches!(*flush_loop_state, FlushLoopState::Running{..})); *flush_loop_state = FlushLoopState::Exited; Ok(()) } @@ -3647,31 +3665,21 @@ impl Timeline { self.last_record_lsn.advance(new_lsn); } - /// Whether there was a layer to freeze or not, return the value of get_last_record_lsn - /// before we attempted the freeze: this guarantees that ingested data is frozen up to this lsn (inclusive). - async fn freeze_inmem_layer(&self, write_lock_held: bool) -> Lsn { - // Freeze the current open in-memory layer. It will be written to disk on next - // iteration. - - let _write_guard = if write_lock_held { - None - } else { - let mut g = self.write_lock.lock().await; - // remove the reference to an open layer - g.take(); - Some(g) + async fn freeze_inmem_layer_at( + &self, + at: Lsn, + write_lock: &mut tokio::sync::MutexGuard<'_, Option>, + ) { + let frozen = { + let mut guard = self.layers.write().await; + guard + .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock) + .await }; - - let to_lsn = self.get_last_record_lsn(); - self.freeze_inmem_layer_at(to_lsn).await; - to_lsn - } - - async fn freeze_inmem_layer_at(&self, at: Lsn) { - let mut guard = self.layers.write().await; - guard - .try_freeze_in_memory_layer(at, &self.last_freeze_at) - .await; + if frozen { + let now = Instant::now(); + *(self.last_freeze_ts.write().unwrap()) = now; + } } /// Layer flusher task's main loop. @@ -3765,18 +3773,14 @@ impl Timeline { } } - /// Request the flush loop to write out all frozen layers up to `to_lsn` as Delta L0 files to disk. - /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer`]. + /// Request the flush loop to write out all frozen layers up to `at_lsn` as Delta L0 files to disk. + /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer_at`]. /// - /// `last_record_lsn` may be higher than the highest LSN of a frozen layer: if this is the case, - /// it means no data will be written between the top of the highest frozen layer and to_lsn, - /// e.g. because this tenant shard has ingested up to to_lsn and not written any data locally for that part of the WAL. - async fn flush_frozen_layers_and_wait( - &self, - last_record_lsn: Lsn, - ) -> Result<(), FlushLayerError> { - let mut rx = self.layer_flush_done_tx.subscribe(); - + /// `at_lsn` may be higher than the highest LSN of a frozen layer: if this is the + /// case, it means no data will be written between the top of the highest frozen layer and + /// to_lsn, e.g. because this tenant shard has ingested up to to_lsn and not written any data + /// locally for that part of the WAL. + fn flush_frozen_layers(&self, at_lsn: Lsn) -> Result { // Increment the flush cycle counter and wake up the flush task. // Remember the new value, so that when we listen for the flush // to finish, we know when the flush that we initiated has @@ -3791,13 +3795,18 @@ impl Timeline { self.layer_flush_start_tx.send_modify(|(counter, lsn)| { my_flush_request = *counter + 1; *counter = my_flush_request; - *lsn = std::cmp::max(last_record_lsn, *lsn); + *lsn = std::cmp::max(at_lsn, *lsn); }); + Ok(my_flush_request) + } + + async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> { + let mut rx = self.layer_flush_done_tx.subscribe(); loop { { let (last_result_counter, last_result) = &*rx.borrow(); - if *last_result_counter >= my_flush_request { + if *last_result_counter >= request { if let Err(err) = last_result { // We already logged the original error in // flush_loop. We cannot propagate it to the caller @@ -3824,12 +3833,9 @@ impl Timeline { } } - fn flush_frozen_layers(&self) { - self.layer_flush_start_tx.send_modify(|(counter, lsn)| { - *counter += 1; - - *lsn = std::cmp::max(*lsn, Lsn(self.last_freeze_at.load().0 - 1)); - }); + async fn flush_frozen_layers_and_wait(&self, at_lsn: Lsn) -> Result<(), FlushLayerError> { + let token = self.flush_frozen_layers(at_lsn)?; + self.wait_flush_completion(token).await } /// Flush one frozen in-memory layer to disk, as a new delta layer. @@ -5672,16 +5678,15 @@ impl<'a> TimelineWriter<'a> { } async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> { - assert!(self.write_guard.is_some()); - - self.tl.freeze_inmem_layer_at(freeze_at).await; + let current_size = self.write_guard.as_ref().unwrap().current_size; - let now = Instant::now(); - *(self.last_freeze_ts.write().unwrap()) = now; + // self.write_guard will be taken by the freezing + self.tl + .freeze_inmem_layer_at(freeze_at, &mut self.write_guard) + .await; - self.tl.flush_frozen_layers(); + self.tl.flush_frozen_layers(freeze_at)?; - let current_size = self.write_guard.as_ref().unwrap().current_size; if current_size >= self.get_checkpoint_distance() * 2 { warn!("Flushed oversized open layer with size {}", current_size) } @@ -5695,20 +5700,27 @@ impl<'a> TimelineWriter<'a> { return OpenLayerAction::Open; }; + #[cfg(feature = "testing")] if state.cached_last_freeze_at < self.tl.last_freeze_at.load() { - // TODO(#7993): branch is needed before refactoring the many places of freezing for the - // possibility `state` having a "dangling" reference to an already frozen in-memory - // layer. + // this check and assertion are not really needed because + // LayerManager::try_freeze_in_memory_layer will always clear out the + // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there + // is no TimelineWriterState. assert!( state.open_layer.end_lsn.get().is_some(), "our open_layer must be outdated" ); - return OpenLayerAction::Open; + + // this would be a memory leak waiting to happen because the in-memory layer always has + // an index + panic!("BUG: TimelineWriterState held on to frozen in-memory layer."); } if state.prev_lsn == Some(lsn) { - // Rolling mid LSN is not supported by downstream code. + // Rolling mid LSN is not supported by [downstream code]. // Hence, only roll at LSN boundaries. + // + // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422 return OpenLayerAction::None; } diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 0e82dedecb18..21e64d562af7 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -21,6 +21,8 @@ use crate::{ }, }; +use super::TimelineWriterState; + /// Provides semantic APIs to manipulate the layer map. #[derive(Default)] pub(crate) struct LayerManager { @@ -120,18 +122,20 @@ impl LayerManager { Ok(layer) } - /// Called from `freeze_inmem_layer`, returns true if successfully frozen. - pub(crate) async fn try_freeze_in_memory_layer( + /// Tries to freeze an open layer and also manages clearing the TimelineWriterState. + /// + /// Returns true if anything was frozen. + pub(super) async fn try_freeze_in_memory_layer( &mut self, lsn: Lsn, last_freeze_at: &AtomicLsn, - ) { + write_lock: &mut tokio::sync::MutexGuard<'_, Option>, + ) -> bool { let Lsn(last_record_lsn) = lsn; let end_lsn = Lsn(last_record_lsn + 1); - if let Some(open_layer) = &self.layer_map.open_layer { + let froze = if let Some(open_layer) = &self.layer_map.open_layer { let open_layer_rc = Arc::clone(open_layer); - // Does this layer need freezing? open_layer.freeze(end_lsn).await; // The layer is no longer open, update the layer map to reflect this. @@ -139,11 +143,25 @@ impl LayerManager { self.layer_map.frozen_layers.push_back(open_layer_rc); self.layer_map.open_layer = None; self.layer_map.next_open_layer_at = Some(end_lsn); - } + + true + } else { + false + }; // Even if there was no layer to freeze, advance last_freeze_at to last_record_lsn+1: this // accounts for regions in the LSN range where we might have ingested no data due to sharding. last_freeze_at.store(end_lsn); + + // the writer state must no longer have a reference to the frozen layer + let taken = write_lock.take(); + assert_eq!( + froze, + taken.is_some(), + "should only had frozen a layer when TimelineWriterState existed" + ); + + froze } /// Add image layers to the layer map, called from `create_image_layers`. From e27ce3861914e89ad43aafb36e5fb96f13cd2bc2 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:07:51 +0200 Subject: [PATCH 07/41] Add testing for extensions (#7818) ## Problem We need automated tests of extensions shipped with Neon to detect possible problems. ## Summary of changes A new image neon-test-extensions is added. Workflow changes to test the shipped extensions are added as well. Currently, the regression tests, shipped with extensions are in use. Some extensions, i.e. rum, timescaledb, rdkit, postgis, pgx_ulid, pgtap, pg_tiktoken, pg_jsonschema, pg_graphql, kq_imcx, wal2json_2_5 are excluded due to problems or absence of internal tests. --------- Co-authored-by: Alexander Bayandin Co-authored-by: Heikki Linnakangas --- .dockerignore | 1 + .github/workflows/build_and_test.yml | 29 ++- Dockerfile.compute-node | 63 +++++ docker-compose/compute_wrapper/Dockerfile | 7 +- .../var/db/postgres/specs/spec.json | 12 +- docker-compose/docker-compose.yml | 12 +- docker-compose/docker_compose_test.sh | 83 +++++-- docker-compose/run-tests.sh | 15 ++ patches/pg_anon.patch | 223 ++++++++++++++++++ patches/pg_cron.patch | 19 ++ patches/pg_hintplan.patch | 39 +++ 11 files changed, 478 insertions(+), 25 deletions(-) create mode 100644 docker-compose/run-tests.sh create mode 100644 patches/pg_anon.patch create mode 100644 patches/pg_cron.patch create mode 100644 patches/pg_hintplan.patch diff --git a/.dockerignore b/.dockerignore index 1258532db894..eead727994a3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,6 +8,7 @@ !scripts/combine_control_files.py !scripts/ninstall.sh !vm-cgconfig.conf +!docker-compose/run-tests.sh # Directories !.cargo/ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index b9caf7606020..79a0a77638fa 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -858,6 +858,26 @@ jobs: cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} + + - name: Build neon extensions test image + if: matrix.version == 'v16' + uses: docker/build-push-action@v5 + with: + context: . + build-args: | + GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} + PG_VERSION=${{ matrix.version }} + BUILD_TAG=${{ needs.tag.outputs.build-tag }} + TAG=${{ needs.build-build-tools-image.outputs.image-tag }} + provenance: false + push: true + pull: true + file: Dockerfile.compute-node + target: neon-pg-ext-test + cache-from: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }} + cache-to: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max + tags: | + neondatabase/neon-test-extensions-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}-${{ matrix.arch }} - name: Build compute-tools image # compute-tools are Postgres independent, so build it only once @@ -902,6 +922,13 @@ jobs: neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch neon-test-extensions image + if: matrix.version == 'v16' + run: | + docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch compute-tools image if: matrix.version == 'v16' run: | @@ -1020,7 +1047,7 @@ jobs: exit 1 fi - - name: Verify docker-compose example + - name: Verify docker-compose example and test extensions timeout-minutes: 20 run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 90b8868b43fa..a86fdd0bc3d1 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -928,6 +928,69 @@ RUN rm -r /usr/local/pgsql/include # if they were to be used by other libraries. RUN rm /usr/local/pgsql/lib/lib*.a + +######################################################################################### +# +# Layer neon-pg-ext-test +# +######################################################################################### + +FROM neon-pg-ext-build AS neon-pg-ext-test +ARG PG_VERSION +RUN mkdir /ext-src + +#COPY --from=postgis-build /postgis.tar.gz /ext-src/ +#COPY --from=postgis-build /sfcgal/* /usr +COPY --from=plv8-build /plv8.tar.gz /ext-src/ +COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/ +COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.patch /ext-src/ +COPY --from=pgjwt-pg-build /pgjwt.tar.gz /ext-src +#COPY --from=pg-jsonschema-pg-build /home/nonroot/pg_jsonschema.tar.gz /ext-src +#COPY --from=pg-graphql-pg-build /home/nonroot/pg_graphql.tar.gz /ext-src +#COPY --from=pg-tiktoken-pg-build /home/nonroot/pg_tiktoken.tar.gz /ext-src +COPY --from=hypopg-pg-build /hypopg.tar.gz /ext-src +COPY --from=pg-hashids-pg-build /pg_hashids.tar.gz /ext-src +#COPY --from=rum-pg-build /rum.tar.gz /ext-src +#COPY --from=pgtap-pg-build /pgtap.tar.gz /ext-src +COPY --from=ip4r-pg-build /ip4r.tar.gz /ext-src +COPY --from=prefix-pg-build /prefix.tar.gz /ext-src +COPY --from=hll-pg-build /hll.tar.gz /ext-src +COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src +#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src +COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src +COPY patches/pg_hintplan.patch /ext-src +#COPY --from=kq-imcx-pg-build /kq_imcx.tar.gz /ext-src +COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src +COPY patches/pg_cron.patch /ext-src +#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src +COPY --from=rdkit-pg-build /rdkit.tar.gz /ext-src +COPY --from=pg-uuidv7-pg-build /pg_uuidv7.tar.gz /ext-src +COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src +COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src +#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src +#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src +COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src +COPY patches/pg_anon.patch /ext-src +COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src +COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src +RUN cd /ext-src/ && for f in *.tar.gz; \ + do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \ + rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \ + || exit 1; rm -f $f; done +RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch +# cmake is required for the h3 test +RUN apt-get update && apt-get install -y cmake +RUN patch -p1 < /ext-src/pg_hintplan.patch +COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh +RUN patch -p1 /dev/null && pwd )" -COMPOSE_FILE=$SCRIPT_DIR/docker-compose.yml - +COMPOSE_FILE='docker-compose.yml' +cd $(dirname $0) +docker compose -f $COMPOSE_FILE COMPUTE_CONTAINER_NAME=docker-compose-compute-1 -SQL="CREATE TABLE t(key int primary key, value text); insert into t values(1,1); select * from t;" -PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -c '$SQL' postgres" +TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1 +PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" +: ${http_proxy:=} +: ${https_proxy:=} +export http_proxy https_proxy cleanup() { echo "show container information" @@ -25,34 +31,71 @@ cleanup() { docker compose -f $COMPOSE_FILE down } -echo "clean up containers if exists" -cleanup - for pg_version in 14 15 16; do - echo "start containers (pg_version=$pg_version)." - PG_VERSION=$pg_version docker compose -f $COMPOSE_FILE up --build -d + echo "clean up containers if exists" + cleanup + PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version)) + PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose -f $COMPOSE_FILE up --build -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 - while sleep 1; do + while sleep 3; do # check timeout - cnt=`expr $cnt + 1` + cnt=`expr $cnt + 3` if [ $cnt -gt 60 ]; then echo "timeout before the compute is ready." cleanup exit 1 fi - - # check if the compute is ready - set +o pipefail - result=`docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep "accepting connections" | wc -l` - set -o pipefail - if [ $result -eq 1 ]; then + if docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then echo "OK. The compute is ready to connect." echo "execute simple queries." docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION" - cleanup break fi done + + if [ $pg_version -ge 16 ] + then + echo Enabling trust connection + docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' " + echo Adding postgres role + docker exec $COMPUTE_CONTAINER_NAME psql $PSQL_OPTION -c "CREATE ROLE postgres SUPERUSER LOGIN" + # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail + # It cannot be moved to Dockerfile now because the database directory is created after the start of the container + echo Adding dummy config + docker exec $COMPUTE_CONTAINER_NAME touch /var/db/postgres/compute/compute_ctl_temp_override.conf + # This block is required for the pg_anon extension test. + # The test assumes that it is running on the same host with the postgres engine. + # In our case it's not true, that's why we are copying files to the compute node + TMPDIR=$(mktemp -d) + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data + echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data + rm -rf $TMPDIR + TMPDIR=$(mktemp -d) + # The following block does the same for the pg_hintplan test + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/ext-src/pg_hint_plan-src/ + rm -rf $TMPDIR + # We are running tests now + if docker exec -e SKIP=rum-src,timescaledb-src,rdkit-src,postgis-src,pgx_ulid-src,pgtap-src,pg_tiktoken-src,pg_jsonschema-src,pg_graphql-src,kq_imcx-src,wal2json_2_5-src \ + $TEST_CONTAINER_NAME /run-tests.sh | tee testout.txt + then + cleanup + else + FAILED=$(tail -1 testout.txt) + for d in $FAILED + do + mkdir $d + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.diffs $d || true + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.out $d || true + cat $d/regression.out $d/regression.diffs || true + done + rm -rf $FAILED + cleanup + exit 1 + fi + fi + cleanup done diff --git a/docker-compose/run-tests.sh b/docker-compose/run-tests.sh new file mode 100644 index 000000000000..c05fc159aa51 --- /dev/null +++ b/docker-compose/run-tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -x + +cd /ext-src +FAILED= +LIST=$((echo ${SKIP} | sed 's/,/\n/g'; ls -d *-src) | sort | uniq -u) +for d in ${LIST} +do + [ -d ${d} ] || continue + psql -c "select 1" >/dev/null || break + make -C ${d} installcheck || FAILED="${d} ${FAILED}" +done +[ -z "${FAILED}" ] && exit 0 +echo ${FAILED} +exit 1 \ No newline at end of file diff --git a/patches/pg_anon.patch b/patches/pg_anon.patch new file mode 100644 index 000000000000..15dfd3c5a0ba --- /dev/null +++ b/patches/pg_anon.patch @@ -0,0 +1,223 @@ +commit 7dd414ee75f2875cffb1d6ba474df1f135a6fc6f +Author: Alexey Masterov +Date: Fri May 31 06:34:26 2024 +0000 + + These alternative expected files were added to consider the neon features + +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +new file mode 100644 +index 0000000..2539cfd +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +@@ -0,0 +1,101 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE mallory_the_masked_user; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.anonymize_table('t1'); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ERROR: Only supersusers can start the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.start_dynamic_masking(boolean) line 18 at RAISE ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT * FROM mask.t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ SELECT * FROM public.t1; ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: Only supersusers can stop the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.stop_dynamic_masking() line 18 at RAISE ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT COUNT(*)=1 FROM anon.pg_masking_rules; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +new file mode 100644 +index 0000000..8b090fe +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +@@ -0,0 +1,104 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE oscar_the_owner; ++ALTER DATABASE :DBNAME OWNER TO oscar_the_owner; ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE oscar_the_owner; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++SELECT anon.anonymize_table('t1'); ++ anonymize_table ++----------------- ++ t ++(1 row) ++ ++SELECT * FROM t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++UPDATE t1 SET t='test' WHERE i=1; ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++SELECT * FROM t1; ++ i | t ++---+------ ++ 1 | test ++(1 row) ++ ++--SELECT * FROM mask.t1; ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: permission denied for schema mask ++CONTEXT: SQL statement "DROP VIEW mask.t1;" ++PL/pgSQL function anon.mask_drop_view(oid) line 3 at EXECUTE ++SQL statement "SELECT anon.mask_drop_view(oid) ++ FROM pg_catalog.pg_class ++ WHERE relnamespace=quote_ident(pg_catalog.current_setting('anon.sourceschema'))::REGNAMESPACE ++ AND relkind IN ('r','p','f')" ++PL/pgSQL function anon.stop_dynamic_masking() line 22 at PERFORM ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; diff --git a/patches/pg_cron.patch b/patches/pg_cron.patch new file mode 100644 index 000000000000..c2b648c20cbf --- /dev/null +++ b/patches/pg_cron.patch @@ -0,0 +1,19 @@ +commit b3ea51ee158f113f2f82d0b97c12c54343c9a695 (HEAD -> master) +Author: Alexey Masterov +Date: Fri Jun 7 19:23:42 2024 +0000 + + Disable REGRESS_OPTIONS causing initdb + +diff --git a/ext-src/pg_cron-src/Makefile b/ext-src/pg_cron-src/Makefile +index 053314c..fbd5fb5 100644 +--- a/ext-src/pg_cron-src/Makefile ++++ b/ext-src/pg_cron-src/Makefile +@@ -5,7 +5,7 @@ EXTENSION = pg_cron + DATA_built = $(EXTENSION)--1.0.sql + DATA = $(wildcard $(EXTENSION)--*--*.sql) + +-REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check ++#REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check + REGRESS = pg_cron-test + + # compilation configuration diff --git a/patches/pg_hintplan.patch b/patches/pg_hintplan.patch new file mode 100644 index 000000000000..61a5ecbb9088 --- /dev/null +++ b/patches/pg_hintplan.patch @@ -0,0 +1,39 @@ +commit f7925d4d1406c0f0229e3c691c94b69e381899b1 (HEAD -> master) +Author: Alexey Masterov +Date: Thu Jun 6 08:02:42 2024 +0000 + + Patch expected files to consider Neon's log messages + +diff --git a/ext-src/pg_hint_plan-src/expected/ut-A.out b/ext-src/pg_hint_plan-src/expected/ut-A.out +index da723b8..f8d0102 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-A.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-A.out +@@ -9,13 +9,16 @@ SET search_path TO public; + ---- + -- No.A-1-1-3 + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + -- No.A-1-2-3 + DROP EXTENSION pg_hint_plan; + -- No.A-1-1-4 + CREATE SCHEMA other_schema; + CREATE EXTENSION pg_hint_plan SCHEMA other_schema; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + ERROR: extension "pg_hint_plan" must be installed in schema "hint_plan" + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + DROP SCHEMA other_schema; + ---- + ---- No. A-5-1 comment pattern +diff --git a/ext-src/pg_hint_plan-src/expected/ut-fdw.out b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +index d372459..6282afe 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-fdw.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +@@ -7,6 +7,7 @@ SET pg_hint_plan.debug_print TO on; + SET client_min_messages TO LOG; + SET pg_hint_plan.enable_hint TO on; + CREATE EXTENSION file_fdw; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/file_fdw + CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; + CREATE USER MAPPING FOR PUBLIC SERVER file_server; + CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename :'filename'); From 7515d0f368e14dfb82520c8a493a49e5671e479e Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 11 Jun 2024 15:38:54 +0300 Subject: [PATCH 08/41] fix: stop storing TimelineMetadata in index_part.json as bytes (#7699) We've stored metadata as bytes within the `index_part.json` for long fixed reasons. #7693 added support for reading out normal json serialization of the `TimelineMetadata`. Change the serialization to only write `TimelineMetadata` as json for going forward, keeping the backward compatibility to reading the metadata as bytes. Because of failure to include `alias = "metadata"` in #7693, one more follow-up is required to make the switch from the old name to `"metadata": `, but that affects only the field name in serialized format. In documentation and naming, an effort is made to add enough warning signs around TimelineMetadata so that it will receive no changes in the future. We can add those fields to `IndexPart` directly instead. Additionally, the path to cleaning up `metadata.rs` is documented in the `metadata.rs` module comment. If we must extend `TimelineMetadata` before that, the duplication suggested in [review comment] is the way to go. [review comment]: https://github.com/neondatabase/neon/pull/7699#pullrequestreview-2107081558 --- pageserver/ctl/src/index_part.rs | 22 +- pageserver/src/tenant/metadata.rs | 256 +++++++----------- .../tenant/remote_timeline_client/index.rs | 82 +++++- s3_scrubber/src/checks.rs | 13 +- s3_scrubber/src/scan_pageserver_metadata.rs | 2 +- .../regress/test_layers_from_future.py | 3 +- 6 files changed, 178 insertions(+), 200 deletions(-) diff --git a/pageserver/ctl/src/index_part.rs b/pageserver/ctl/src/index_part.rs index a33cae676919..20018846f8fa 100644 --- a/pageserver/ctl/src/index_part.rs +++ b/pageserver/ctl/src/index_part.rs @@ -1,11 +1,6 @@ -use std::collections::HashMap; - use anyhow::Context; use camino::Utf8PathBuf; -use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; -use pageserver::tenant::storage_layer::LayerName; -use pageserver::tenant::{metadata::TimelineMetadata, IndexPart}; -use utils::lsn::Lsn; +use pageserver::tenant::IndexPart; #[derive(clap::Subcommand)] pub(crate) enum IndexPartCmd { @@ -17,20 +12,7 @@ pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { IndexPartCmd::Dump { path } => { let bytes = tokio::fs::read(path).await.context("read file")?; let des: IndexPart = IndexPart::from_s3_bytes(&bytes).context("deserialize")?; - #[derive(serde::Serialize)] - struct Output<'a> { - layer_metadata: &'a HashMap, - disk_consistent_lsn: Lsn, - timeline_metadata: &'a TimelineMetadata, - } - - let output = Output { - layer_metadata: &des.layer_metadata, - disk_consistent_lsn: des.metadata.disk_consistent_lsn(), - timeline_metadata: &des.metadata, - }; - - let output = serde_json::to_string_pretty(&output).context("serialize output")?; + let output = serde_json::to_string_pretty(&des).context("serialize output")?; println!("{output}"); Ok(()) } diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index c00672895a65..6ba1bdef9b89 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -1,15 +1,23 @@ -//! Every image of a certain timeline from [`crate::tenant::Tenant`] -//! has a metadata that needs to be stored persistently. +//! Describes the legacy now hopefully no longer modified per-timeline metadata stored in +//! `index_part.json` managed by [`remote_timeline_client`]. For many tenants and their timelines, +//! this struct and it's original serialization format is still needed because they were written a +//! long time ago. //! -//! Later, the file gets used in [`remote_timeline_client`] as a part of -//! external storage import and export operations. +//! Instead of changing and adding versioning to this, just change [`IndexPart`] with soft json +//! versioning. //! -//! The module contains all structs and related helper methods related to timeline metadata. +//! To clean up this module we need to migrate all index_part.json files to a later version. +//! While doing this, we need to be mindful about s3 based recovery as well, so it might take +//! however long we keep the old versions to be able to delete the old code. After that, we can +//! remove everything else than [`TimelineMetadataBodyV2`], rename it as `TimelineMetadata` and +//! move it to `index.rs`. Before doing all of this, we need to keep the structures for backwards +//! compatibility. //! //! [`remote_timeline_client`]: super::remote_timeline_client +//! [`IndexPart`]: super::remote_timeline_client::index::IndexPart use anyhow::ensure; -use serde::{de::Error, Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; use utils::bin_ser::SerializeError; use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn}; @@ -17,17 +25,37 @@ use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn}; const METADATA_FORMAT_VERSION: u16 = 4; /// Previous supported format versions. +/// +/// In practice, none of these should remain, all are [`METADATA_FORMAT_VERSION`], but confirming +/// that requires a scrubber run which is yet to be done. const METADATA_OLD_FORMAT_VERSION: u16 = 3; -/// We assume that a write of up to METADATA_MAX_SIZE bytes is atomic. +/// When the file existed on disk we assumed that a write of up to METADATA_MAX_SIZE bytes is atomic. /// /// This is the same assumption that PostgreSQL makes with the control file, +/// /// see PG_CONTROL_MAX_SAFE_SIZE const METADATA_MAX_SIZE: usize = 512; -/// Metadata stored on disk for each timeline +/// Legacy metadata stored as a component of `index_part.json` per timeline. +/// +/// Do not make new changes to this type or the module. In production, we have two different kinds +/// of serializations of this type: bincode and json. Bincode version reflects what used to be +/// stored on disk in earlier versions and does internal crc32 checksumming. /// -/// The fields correspond to the values we hold in memory, in Timeline. +/// This type should not implement `serde::Serialize` or `serde::Deserialize` because there would +/// be a confusion whether you want the old version ([`TimelineMetadata::from_bytes`]) or the modern +/// as-exists in `index_part.json` ([`self::modern_serde`]). +/// +/// ```compile_fail +/// #[derive(serde::Serialize)] +/// struct DoNotDoThis(pageserver::tenant::metadata::TimelineMetadata); +/// ``` +/// +/// ```compile_fail +/// #[derive(serde::Deserialize)] +/// struct NeitherDoThis(pageserver::tenant::metadata::TimelineMetadata); +/// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimelineMetadata { hdr: TimelineMetadataHeader, @@ -40,6 +68,49 @@ struct TimelineMetadataHeader { size: u16, // size of serialized metadata format_version: u16, // metadata format version (used for compatibility checks) } + +impl TryFrom<&TimelineMetadataBodyV2> for TimelineMetadataHeader { + type Error = Crc32CalculationFailed; + + fn try_from(value: &TimelineMetadataBodyV2) -> Result { + #[derive(Default)] + struct Crc32Sink { + crc: u32, + count: usize, + } + + impl std::io::Write for Crc32Sink { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.crc = crc32c::crc32c_append(self.crc, buf); + self.count += buf.len(); + Ok(buf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + } + + // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works + // across serialization versions + let mut sink = Crc32Sink::default(); + ::ser_into(value, &mut sink) + .map_err(Crc32CalculationFailed)?; + + let size = METADATA_HDR_SIZE + sink.count; + + Ok(TimelineMetadataHeader { + checksum: sink.crc, + size: size as u16, + format_version: METADATA_FORMAT_VERSION, + }) + } +} + +#[derive(thiserror::Error, Debug)] +#[error("re-serializing for crc32 failed")] +struct Crc32CalculationFailed(#[source] utils::bin_ser::SerializeError); + const METADATA_HDR_SIZE: usize = std::mem::size_of::(); #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -111,6 +182,12 @@ impl TimelineMetadata { } } + #[cfg(test)] + pub(crate) fn with_recalculated_checksum(mut self) -> anyhow::Result { + self.hdr = TimelineMetadataHeader::try_from(&self.body)?; + Ok(self) + } + fn upgrade_timeline_metadata(metadata_bytes: &[u8]) -> anyhow::Result { let mut hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?; @@ -261,32 +338,8 @@ impl TimelineMetadata { } } -impl<'de> Deserialize<'de> for TimelineMetadata { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let bytes = Vec::::deserialize(deserializer)?; - Self::from_bytes(bytes.as_slice()).map_err(D::Error::custom) - } -} - -impl Serialize for TimelineMetadata { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let bytes = self.to_bytes().map_err(serde::ser::Error::custom)?; - bytes.serialize(serializer) - } -} - pub(crate) mod modern_serde { - use crate::tenant::metadata::METADATA_FORMAT_VERSION; - - use super::{ - TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader, METADATA_HDR_SIZE, - }; + use super::{TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader}; use serde::{Deserialize, Serialize}; pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result @@ -322,71 +375,15 @@ pub(crate) mod modern_serde { let de = serde::de::value::MapAccessDeserializer::new(map); let body = TimelineMetadataBodyV2::deserialize(de)?; + let hdr = TimelineMetadataHeader::try_from(&body).map_err(A::Error::custom)?; - // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works - // across serialization versions - let mut sink = Crc32Sink::default(); - ::ser_into(&body, &mut sink) - .map_err(|e| A::Error::custom(Crc32CalculationFailed(e)))?; - - let size = METADATA_HDR_SIZE + sink.count; - - Ok(TimelineMetadata { - hdr: TimelineMetadataHeader { - checksum: sink.crc, - size: size as u16, - format_version: METADATA_FORMAT_VERSION, - }, - body, - }) + Ok(TimelineMetadata { hdr, body }) } } deserializer.deserialize_any(Visitor) } - #[derive(Default)] - struct Crc32Sink { - crc: u32, - count: usize, - } - - impl std::io::Write for Crc32Sink { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.crc = crc32c::crc32c_append(self.crc, buf); - self.count += buf.len(); - Ok(buf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } - } - - #[derive(thiserror::Error)] - #[error("re-serializing for crc32 failed")] - struct Crc32CalculationFailed(#[source] E); - - // this should be true for one release, after that we can change it to false - // remember to check the IndexPart::metadata field TODO comment as well - const LEGACY_BINCODED_BYTES: bool = true; - - #[derive(serde::Serialize)] - #[serde(transparent)] - struct LegacyPaddedBytes<'a>(&'a TimelineMetadata); - - struct JustTheBodyV2<'a>(&'a TimelineMetadata); - - impl serde::Serialize for JustTheBodyV2<'_> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // header is not needed, upon reading we've upgraded all v1 to v2 - self.0.body.serialize(serializer) - } - } - pub(crate) fn serialize( metadata: &TimelineMetadata, serializer: S, @@ -394,25 +391,23 @@ pub(crate) mod modern_serde { where S: serde::Serializer, { - // we cannot use TimelineMetadata::serialize for now because it'll do - // TimelineMetadata::to_bytes - if LEGACY_BINCODED_BYTES { - LegacyPaddedBytes(metadata).serialize(serializer) - } else { - JustTheBodyV2(metadata).serialize(serializer) - } + // header is not needed, upon reading we've upgraded all v1 to v2 + metadata.body.serialize(serializer) } #[test] fn deserializes_bytes_as_well_as_equivalent_body_v2() { #[derive(serde::Deserialize, serde::Serialize)] - struct Wrapper(#[serde(deserialize_with = "deserialize")] TimelineMetadata); + struct Wrapper( + #[serde(deserialize_with = "deserialize", serialize_with = "serialize")] + TimelineMetadata, + ); let too_many_bytes = "[216,111,252,208,0,54,0,4,0,0,0,0,1,73,253,144,1,0,0,0,0,1,73,253,24,0,0,0,0,0,0,0,0,0,0,0,0,0,1,73,253,24,0,0,0,0,1,73,253,24,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"; let wrapper_from_bytes = serde_json::from_str::(too_many_bytes).unwrap(); - let serialized = serde_json::to_value(JustTheBodyV2(&wrapper_from_bytes.0)).unwrap(); + let serialized = serde_json::to_value(&wrapper_from_bytes).unwrap(); assert_eq!( serialized, @@ -553,59 +548,6 @@ mod tests { ); } - #[test] - fn test_metadata_bincode_serde() { - let original_metadata = TimelineMetadata::new( - Lsn(0x200), - Some(Lsn(0x100)), - Some(TIMELINE_ID), - Lsn(0), - Lsn(0), - Lsn(0), - // Any version will do here, so use the default - crate::DEFAULT_PG_VERSION, - ); - let metadata_bytes = original_metadata - .to_bytes() - .expect("Cannot create bytes array from metadata"); - - let metadata_bincode_be_bytes = original_metadata - .ser() - .expect("Cannot serialize the metadata"); - - // 8 bytes for the length of the vector - assert_eq!(metadata_bincode_be_bytes.len(), 8 + metadata_bytes.len()); - - let expected_bincode_bytes = { - let mut temp = vec![]; - let len_bytes = metadata_bytes.len().to_be_bytes(); - temp.extend_from_slice(&len_bytes); - temp.extend_from_slice(&metadata_bytes); - temp - }; - assert_eq!(metadata_bincode_be_bytes, expected_bincode_bytes); - - let deserialized_metadata = TimelineMetadata::des(&metadata_bincode_be_bytes).unwrap(); - // Deserialized metadata has the metadata header, which is different from the serialized one. - // Reference: TimelineMetaData::to_bytes() - let expected_metadata = { - let mut temp_metadata = original_metadata; - let body_bytes = temp_metadata - .body - .ser() - .expect("Cannot serialize the metadata body"); - let metadata_size = METADATA_HDR_SIZE + body_bytes.len(); - let hdr = TimelineMetadataHeader { - size: metadata_size as u16, - format_version: METADATA_FORMAT_VERSION, - checksum: crc32c::crc32c(&body_bytes), - }; - temp_metadata.hdr = hdr; - temp_metadata - }; - assert_eq!(deserialized_metadata, expected_metadata); - } - #[test] fn test_metadata_bincode_serde_ensure_roundtrip() { let original_metadata = TimelineMetadata::new( @@ -619,8 +561,6 @@ mod tests { crate::DEFAULT_PG_VERSION, ); let expected_bytes = vec![ - /* bincode length encoding bytes */ - 0, 0, 0, 0, 0, 0, 2, 0, // 8 bytes for the length of the serialized vector /* TimelineMetadataHeader */ 4, 37, 101, 34, 0, 70, 0, 4, // checksum, size, format_version (4 + 2 + 2) /* TimelineMetadataBodyV2 */ @@ -650,7 +590,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - let metadata_ser_bytes = original_metadata.ser().unwrap(); + let metadata_ser_bytes = original_metadata.to_bytes().unwrap(); assert_eq!(metadata_ser_bytes, expected_bytes); let expected_metadata = { @@ -668,7 +608,7 @@ mod tests { temp_metadata.hdr = hdr; temp_metadata }; - let des_metadata = TimelineMetadata::des(&metadata_ser_bytes).unwrap(); + let des_metadata = TimelineMetadata::from_bytes(&metadata_ser_bytes).unwrap(); assert_eq!(des_metadata, expected_metadata); } } diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index 7d2e9b9a91c4..6233a3477e4d 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -38,14 +38,17 @@ pub struct IndexPart { /// that latest version stores. pub layer_metadata: HashMap, - // 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata. - // It's duplicated for convenience when reading the serialized structure, but is - // private because internally we would read from metadata instead. + /// Because of the trouble of eyeballing the legacy "metadata" field, we copied the + /// "disk_consistent_lsn" out. After version 7 this is no longer needed, but the name cannot be + /// reused. pub(super) disk_consistent_lsn: Lsn, - // TODO: later make this "rename" to "alias", rename field as "legacy_metadata" + // TODO: rename as "metadata" next week, keep the alias = "metadata_bytes", bump version Adding + // the "alias = metadata" was forgotten in #7693, so we have to use "rewrite = metadata_bytes" + // for backwards compatibility. #[serde( rename = "metadata_bytes", + alias = "metadata", with = "crate::tenant::metadata::modern_serde" )] pub metadata: TimelineMetadata, @@ -76,10 +79,11 @@ impl IndexPart { /// - 4: timeline_layers is fully removed. /// - 5: lineage was added /// - 6: last_aux_file_policy is added. - const LATEST_VERSION: usize = 6; + /// - 7: metadata_bytes is no longer written, but still read + const LATEST_VERSION: usize = 7; // Versions we may see when reading from a bucket. - pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6]; + pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7]; pub const FILE_NAME: &'static str = "index_part.json"; @@ -95,7 +99,7 @@ impl IndexPart { } } - pub fn get_version(&self) -> usize { + pub fn version(&self) -> usize { self.version } @@ -217,9 +221,9 @@ impl Lineage { #[cfg(test)] mod tests { - use std::str::FromStr; - use super::*; + use std::str::FromStr; + use utils::id::TimelineId; #[test] fn v1_indexpart_is_parsed() { @@ -338,8 +342,7 @@ mod tests { ]), disk_consistent_lsn: "0/16960E8".parse::().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), - deleted_at: Some(chrono::NaiveDateTime::parse_from_str( - "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), lineage: Lineage::default(), last_aux_file_policy: None, }; @@ -515,8 +518,7 @@ mod tests { ]), disk_consistent_lsn: "0/16960E8".parse::().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), - deleted_at: Some(chrono::NaiveDateTime::parse_from_str( - "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), lineage: Lineage { reparenting_history_truncated: false, reparenting_history: vec![TimelineId::from_str("e1bfd8c633d713d279e6fcd2bcc15b6d").unwrap()], @@ -529,6 +531,60 @@ mod tests { assert_eq!(part, expected); } + #[test] + fn v7_indexpart_is_parsed() { + let example = r#"{ + "version": 7, + "layer_metadata":{ + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } + }, + "disk_consistent_lsn":"0/16960E8", + "metadata": { + "disk_consistent_lsn": "0/16960E8", + "prev_record_lsn": "0/1696070", + "ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e", + "ancestor_lsn": "0/0", + "latest_gc_cutoff_lsn": "0/1696070", + "initdb_lsn": "0/1696070", + "pg_version": 14 + }, + "deleted_at": "2023-07-31T09:00:00.123" + }"#; + + let expected = IndexPart { + version: 7, + layer_metadata: HashMap::from([ + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { + file_size: 25600000, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }), + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { + file_size: 9007199254741001, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }) + ]), + disk_consistent_lsn: "0/16960E8".parse::().unwrap(), + metadata: TimelineMetadata::new( + Lsn::from_str("0/16960E8").unwrap(), + Some(Lsn::from_str("0/1696070").unwrap()), + Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()), + Lsn::INVALID, + Lsn::from_str("0/1696070").unwrap(), + Lsn::from_str("0/1696070").unwrap(), + 14, + ).with_recalculated_checksum().unwrap(), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), + lineage: Default::default(), + last_aux_file_policy: Default::default(), + }; + + let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); + assert_eq!(part, expected); + } + fn parse_naive_datetime(s: &str) -> NaiveDateTime { chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%f").unwrap() } diff --git a/s3_scrubber/src/checks.rs b/s3_scrubber/src/checks.rs index 44fb53696c00..4eb8580e32cf 100644 --- a/s3_scrubber/src/checks.rs +++ b/s3_scrubber/src/checks.rs @@ -78,17 +78,16 @@ pub(crate) fn branch_cleanup_and_check_errors( index_part_generation: _index_part_generation, s3_layers: _s3_layers, } => { - if !IndexPart::KNOWN_VERSIONS.contains(&index_part.get_version()) { - result.errors.push(format!( - "index_part.json version: {}", - index_part.get_version() - )) + if !IndexPart::KNOWN_VERSIONS.contains(&index_part.version()) { + result + .errors + .push(format!("index_part.json version: {}", index_part.version())) } - if &index_part.get_version() != IndexPart::KNOWN_VERSIONS.last().unwrap() { + if &index_part.version() != IndexPart::KNOWN_VERSIONS.last().unwrap() { result.warnings.push(format!( "index_part.json version is not latest: {}", - index_part.get_version() + index_part.version() )) } diff --git a/s3_scrubber/src/scan_pageserver_metadata.rs b/s3_scrubber/src/scan_pageserver_metadata.rs index 6ff9783875a5..af74ffa4cdbd 100644 --- a/s3_scrubber/src/scan_pageserver_metadata.rs +++ b/s3_scrubber/src/scan_pageserver_metadata.rs @@ -125,7 +125,7 @@ impl MetadataSummary { { *self .indices_by_version - .entry(index_part.get_version()) + .entry(index_part.version()) .or_insert(0) += 1; if let Err(e) = self.update_histograms(index_part) { diff --git a/test_runner/regress/test_layers_from_future.py b/test_runner/regress/test_layers_from_future.py index 18e51117865c..54d3b2d515c5 100644 --- a/test_runner/regress/test_layers_from_future.py +++ b/test_runner/regress/test_layers_from_future.py @@ -37,7 +37,8 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder): """ neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - env = neon_env_builder.init_start() + env = neon_env_builder.init_configs() + env.start() env.pageserver.allowed_errors.extend( [".*Dropped remote consistent LSN updates.*", ".*Dropping stale deletions.*"] ) From d3b892e9ad39c50e869ada51b7f892666d4bf476 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 11 Jun 2024 17:10:05 +0300 Subject: [PATCH 09/41] test: fix duplicated harness name (#8010) We need unique tenant harness names in case you want to inspect the results of the last failing run. We are not using any proc macros to get the test name as there is no stable way of doing that, and there will not be one in the future, so we need to fix these duplicates. Also, clean up the duplicated tests to not mix `?` and `unwrap/assert`. --- pageserver/src/tenant.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 2e3ce45c2bc7..10842c15043f 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -6584,8 +6584,8 @@ mod tests { } #[tokio::test] - async fn test_metadata_tombstone_image_creation() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + async fn test_metadata_tombstone_image_creation() { + let harness = TenantHarness::create("test_metadata_tombstone_image_creation").unwrap(); let (tenant, ctx) = harness.load().await; let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap(); @@ -6613,7 +6613,8 @@ mod tests { vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], Lsn(0x30), ) - .await?; + .await + .unwrap(); let cancel = CancellationToken::new(); @@ -6628,23 +6629,24 @@ mod tests { }, &ctx, ) - .await?; + .await + .unwrap(); // Image layers are created at last_record_lsn let images = tline .inspect_image_layers(Lsn(0x30), &ctx) - .await? + .await + .unwrap() .into_iter() .filter(|(k, _)| k.is_metadata_key()) .collect::>(); assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed. - - Ok(()) } #[tokio::test] - async fn test_metadata_tombstone_empty_image_creation() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + async fn test_metadata_tombstone_empty_image_creation() { + let harness = + TenantHarness::create("test_metadata_tombstone_empty_image_creation").unwrap(); let (tenant, ctx) = harness.load().await; let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap(); @@ -6666,7 +6668,8 @@ mod tests { vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], Lsn(0x30), ) - .await?; + .await + .unwrap(); let cancel = CancellationToken::new(); @@ -6681,17 +6684,17 @@ mod tests { }, &ctx, ) - .await?; + .await + .unwrap(); // Image layers are created at last_record_lsn let images = tline .inspect_image_layers(Lsn(0x30), &ctx) - .await? + .await + .unwrap() .into_iter() .filter(|(k, _)| k.is_metadata_key()) .collect::>(); assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created - - Ok(()) } } From 4c2100794b97c1f635bf9dcc9013d2b2c8733de6 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 11 Jun 2024 10:14:51 -0400 Subject: [PATCH 10/41] feat(pageserver): initial code sketch & test case for combined gc+compaction at gc_horizon (#7948) A demo for a building block for compaction. The GC-compaction operation iterates all layers below/intersect with the GC horizon, and do a full layer rewrite of all of them. The end result will be image layer covering the full keyspace at GC-horizon, and a bunch of delta layers above the GC-horizon. This helps us collect the garbages of the test_gc_feedback test case to reduce space amplification. This operation can be manually triggered using an HTTP API or be triggered based on some metrics. Actual method TBD. The test is very basic and it's very likely that most part of the algorithm will be rewritten. I would like to get this merged so that I can have a basic skeleton for the algorithm and then make incremental changes. image --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 160 ++++++++++++++++ .../src/tenant/storage_layer/delta_layer.rs | 39 ++++ .../src/tenant/storage_layer/image_layer.rs | 28 +++ pageserver/src/tenant/storage_layer/layer.rs | 31 ++++ pageserver/src/tenant/timeline.rs | 13 ++ pageserver/src/tenant/timeline/compaction.rs | 172 ++++++++++++++++++ .../src/tenant/timeline/layer_manager.rs | 12 ++ 7 files changed, 455 insertions(+) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 10842c15043f..f9ed6d3071eb 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4044,10 +4044,12 @@ mod tests { use crate::DEFAULT_PG_VERSION; use bytes::{Bytes, BytesMut}; use hex_literal::hex; + use itertools::Itertools; use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; use rand::{thread_rng, Rng}; + use storage_layer::PersistentLayerKey; use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; use utils::bin_ser::BeSer; @@ -6697,4 +6699,162 @@ mod tests { .collect::>(); assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created } + + #[tokio::test] + async fn test_simple_bottom_most_compaction() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_simple_bottom_most_compaction")?; + let (tenant, ctx) = harness.load().await; + + fn get_key(id: u32) -> Key { + // using aux key here b/c they are guaranteed to be inside `collect_keyspace`. + let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + + // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon. + // + // | D1 | | D3 | + // -| |-- gc horizon ----------------- + // | | | D2 | + // --------- img layer ------------------ + // + // What we should expact from this compaction is: + // | Part of D1 | | D3 | + // --------- img layer with D1+D2 at GC horizon------------------ + + // img layer at 0x10 + let img_layer = (0..10) + .map(|id| (get_key(id), test_img(&format!("value {id}@0x10")))) + .collect_vec(); + + let delta1 = vec![ + // TODO: we should test a real delta record here, which requires us to add a variant of NeonWalRecord for testing purpose. + ( + get_key(1), + Lsn(0x20), + Value::Image(test_img("value 1@0x20")), + ), + ( + get_key(2), + Lsn(0x30), + Value::Image(test_img("value 2@0x30")), + ), + ( + get_key(3), + Lsn(0x40), + Value::Image(test_img("value 3@0x40")), + ), + ]; + let delta2 = vec![ + ( + get_key(5), + Lsn(0x20), + Value::Image(test_img("value 5@0x20")), + ), + ( + get_key(6), + Lsn(0x20), + Value::Image(test_img("value 6@0x20")), + ), + ]; + let delta3 = vec![ + ( + get_key(8), + Lsn(0x40), + Value::Image(test_img("value 8@0x40")), + ), + ( + get_key(9), + Lsn(0x40), + Value::Image(test_img("value 9@0x40")), + ), + ]; + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + vec![delta1, delta2, delta3], // delta layers + vec![(Lsn(0x10), img_layer)], // image layers + Lsn(0x50), + ) + .await?; + { + // Update GC info + let mut guard = tline.gc_info.write().unwrap(); + guard.cutoffs.pitr = Lsn(0x30); + guard.cutoffs.horizon = Lsn(0x30); + } + + let cancel = CancellationToken::new(); + tline.compact_with_gc(&cancel, &ctx).await.unwrap(); + + // Check if the image layer at the GC horizon contains exactly what we want + let image_at_gc_horizon = tline + .inspect_image_layers(Lsn(0x30), &ctx) + .await + .unwrap() + .into_iter() + .filter(|(k, _)| k.is_metadata_key()) + .collect::>(); + + assert_eq!(image_at_gc_horizon.len(), 10); + let expected_lsn = [0x10, 0x20, 0x30, 0x10, 0x10, 0x20, 0x20, 0x10, 0x10, 0x10]; + for idx in 0..10 { + assert_eq!( + image_at_gc_horizon[idx], + ( + get_key(idx as u32), + test_img(&format!("value {idx}@{:#x}", expected_lsn[idx])) + ) + ); + } + + // Check if old layers are removed / new layers have the expected LSN + let mut all_layers = tline.inspect_historic_layers().await.unwrap(); + all_layers.sort_by(|k1, k2| { + ( + k1.is_delta, + k1.key_range.start, + k1.key_range.end, + k1.lsn_range.start, + k1.lsn_range.end, + ) + .cmp(&( + k2.is_delta, + k2.key_range.start, + k2.key_range.end, + k2.lsn_range.start, + k2.lsn_range.end, + )) + }); + assert_eq!( + all_layers, + vec![ + // Image layer at GC horizon + PersistentLayerKey { + key_range: Key::MIN..get_key(10), + lsn_range: Lsn(0x30)..Lsn(0x31), + is_delta: false + }, + // The delta layer that is cut in the middle + PersistentLayerKey { + key_range: Key::MIN..get_key(9), + lsn_range: Lsn(0x30)..Lsn(0x41), + is_delta: true + }, + // The delta layer we created and should not be picked for the compaction + PersistentLayerKey { + key_range: get_key(8)..get_key(10), + lsn_range: Lsn(0x40)..Lsn(0x41), + is_delta: true + } + ] + ); + + Ok(()) + } } diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 999e2e86796e..eb7cf81643f8 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -929,6 +929,45 @@ impl DeltaLayerInner { Ok(()) } + /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. + #[cfg(test)] + pub(super) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let index_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new( + self.index_start_blk, + self.index_root_blk, + block_reader, + ); + let mut result = Vec::new(); + let mut stream = + Box::pin(self.stream_index_forwards(&index_reader, &[0; DELTA_KEY_SIZE], ctx)); + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let cursor = block_reader.block_cursor(); + let mut buf = Vec::new(); + while let Some(item) = stream.next().await { + let (key, lsn, pos) = item?; + // TODO: dedup code with get_reconstruct_value + // TODO: ctx handling and sharding + cursor + .read_blob_into_buf(pos.pos(), &mut buf, ctx) + .await + .with_context(|| { + format!("Failed to read blob from virtual file {}", self.file.path) + })?; + let val = Value::des(&buf).with_context(|| { + format!( + "Failed to deserialize file blob from virtual file {}", + self.file.path + ) + })?; + result.push((key, lsn, val)); + } + Ok(result) + } + async fn plan_reads( keyspace: &KeySpace, lsn_range: Range, diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 285618b14690..06e2f0938437 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -485,6 +485,34 @@ impl ImageLayerInner { Ok(()) } + /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. + #[cfg(test)] + pub(super) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let tree_reader = + DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, &block_reader); + let mut result = Vec::new(); + let mut stream = Box::pin(tree_reader.get_stream_from(&[0; KEY_SIZE], ctx)); + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let cursor = block_reader.block_cursor(); + while let Some(item) = stream.next().await { + // TODO: dedup code with get_reconstruct_value + let (raw_key, offset) = item?; + let key = Key::from_slice(&raw_key[..KEY_SIZE]); + // TODO: ctx handling and sharding + let blob = cursor + .read_blob(offset, ctx) + .await + .with_context(|| format!("failed to read value from offset {}", offset))?; + let value = Bytes::from(blob); + result.push((key, self.lsn, Value::Image(value))); + } + Ok(result) + } + /// Traverse the layer's index to build read operations on the overlap of the input keyspace /// and the keys in this layer. /// diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 18f9ba4ef899..32acb3f0cd0f 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -388,6 +388,23 @@ impl Layer { }) } + /// Get all key/values in the layer. Should be replaced with an iterator-based API in the future. + #[cfg(test)] + pub(crate) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let layer = self + .0 + .get_or_maybe_download(true, Some(ctx)) + .await + .map_err(|err| match err { + DownloadError::DownloadCancelled => GetVectoredError::Cancelled, + other => GetVectoredError::Other(anyhow::anyhow!(other)), + })?; + layer.load_key_values(&self.0, ctx).await + } + /// Download the layer if evicted. /// /// Will not error when the layer is already downloaded. @@ -1757,6 +1774,20 @@ impl DownloadedLayer { } } + #[cfg(test)] + async fn load_key_values( + &self, + owner: &Arc, + ctx: &RequestContext, + ) -> anyhow::Result> { + use LayerKind::*; + + match self.get(owner, ctx).await? { + Delta(d) => d.load_key_values(ctx).await, + Image(i) => i.load_key_values(ctx).await, + } + } + async fn dump(&self, owner: &Arc, ctx: &RequestContext) -> anyhow::Result<()> { use LayerKind::*; match self.get(owner, ctx).await? { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 6da0f9d91c25..54a4ceeaf3a3 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -5549,6 +5549,19 @@ impl Timeline { all_data.sort(); Ok(all_data) } + + /// Get all historic layer descriptors in the layer map + #[cfg(test)] + pub(crate) async fn inspect_historic_layers( + self: &Arc, + ) -> anyhow::Result> { + let mut layers = Vec::new(); + let guard = self.layers.read().await; + for layer in guard.layer_map().iter_historic_layers() { + layers.push(layer.key()); + } + Ok(layers) + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index d8de6aee7cb9..8a95029f33a6 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -952,6 +952,178 @@ impl Timeline { adaptor.flush_updates().await?; Ok(()) } + + /// An experimental compaction building block that combines compaction with garbage collection. + /// + /// The current implementation picks all delta + image layers that are below or intersecting with + /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta + /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon, + /// and create delta layers with all deltas >= gc horizon. + #[cfg(test)] + pub(crate) async fn compact_with_gc( + self: &Arc, + _cancel: &CancellationToken, + ctx: &RequestContext, + ) -> Result<(), CompactionError> { + use crate::tenant::storage_layer::ValueReconstructState; + // Step 0: pick all delta layers + image layers below/intersect with the GC horizon. + // The layer selection has the following properties: + // 1. If a layer is in the selection, all layers below it are in the selection. + // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection. + let (layer_selection, gc_cutoff) = { + let guard = self.layers.read().await; + let layers = guard.layer_map(); + let gc_info = self.gc_info.read().unwrap(); + let gc_cutoff = Lsn::min(gc_info.cutoffs.horizon, gc_info.cutoffs.pitr); + let mut selected_layers = Vec::new(); + // TODO: consider retain_lsns + drop(gc_info); + for desc in layers.iter_historic_layers() { + if desc.get_lsn_range().start <= gc_cutoff { + selected_layers.push(guard.get_from_desc(&desc)); + } + } + (selected_layers, gc_cutoff) + }; + // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs. + let mut all_key_values = Vec::new(); + for layer in &layer_selection { + all_key_values.extend(layer.load_key_values(ctx).await?); + } + // Key small to large, LSN low to high, if the same LSN has both image and delta due to the merge of delta layers and + // image layers, make image appear later than delta. + struct ValueWrapper<'a>(&'a crate::repository::Value); + impl Ord for ValueWrapper<'_> { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + use crate::repository::Value; + use std::cmp::Ordering; + match (self.0, other.0) { + (Value::Image(_), Value::WalRecord(_)) => Ordering::Greater, + (Value::WalRecord(_), Value::Image(_)) => Ordering::Less, + _ => Ordering::Equal, + } + } + } + impl PartialOrd for ValueWrapper<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl PartialEq for ValueWrapper<'_> { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == std::cmp::Ordering::Equal + } + } + impl Eq for ValueWrapper<'_> {} + all_key_values.sort_by(|(k1, l1, v1), (k2, l2, v2)| { + (k1, l1, ValueWrapper(v1)).cmp(&(k2, l2, ValueWrapper(v2))) + }); + let max_lsn = all_key_values + .iter() + .map(|(_, lsn, _)| lsn) + .max() + .copied() + .unwrap() + + 1; + // Step 2: Produce images+deltas. TODO: ensure newly-produced delta does not overlap with other deltas. + // Data of the same key. + let mut accumulated_values = Vec::new(); + let mut last_key = all_key_values.first().unwrap().0; // TODO: assert all_key_values not empty + + /// Take a list of images and deltas, produce an image at the GC horizon, and a list of deltas above the GC horizon. + async fn flush_accumulated_states( + tline: &Arc, + key: Key, + accumulated_values: &[&(Key, Lsn, crate::repository::Value)], + horizon: Lsn, + ) -> anyhow::Result<(Vec<(Key, Lsn, crate::repository::Value)>, bytes::Bytes)> { + let mut base_image = None; + let mut keys_above_horizon = Vec::new(); + let mut delta_above_base_image = Vec::new(); + // We have a list of deltas/images. We want to create image layers while collect garbages. + for (key, lsn, val) in accumulated_values.iter().rev() { + if *lsn > horizon { + keys_above_horizon.push((*key, *lsn, val.clone())); // TODO: ensure one LSN corresponds to either delta or image instead of both + } else if *lsn <= horizon { + match val { + crate::repository::Value::Image(image) => { + if lsn <= &horizon { + base_image = Some((*lsn, image.clone())); + break; + } + } + crate::repository::Value::WalRecord(wal) => { + delta_above_base_image.push((*lsn, wal.clone())); + } + } + } + } + delta_above_base_image.reverse(); + keys_above_horizon.reverse(); + let state = ValueReconstructState { + img: base_image, + records: delta_above_base_image, + }; + let img = tline.reconstruct_value(key, horizon, state).await?; + Ok((keys_above_horizon, img)) + } + + let mut delta_layer_writer = DeltaLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + all_key_values.first().unwrap().0, + gc_cutoff..max_lsn, // TODO: off by one? + ctx, + ) + .await?; + let mut image_layer_writer = ImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + &(all_key_values.first().unwrap().0..all_key_values.last().unwrap().0.next()), + gc_cutoff, + ctx, + ) + .await?; + + for item @ (key, _, _) in &all_key_values { + if &last_key == key { + accumulated_values.push(item); + } else { + let (deltas, image) = + flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff) + .await?; + image_layer_writer.put_image(last_key, image, ctx).await?; + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + accumulated_values.clear(); + accumulated_values.push(item); + last_key = *key; + } + } + let (deltas, image) = + flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff).await?; + image_layer_writer.put_image(last_key, image, ctx).await?; + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + accumulated_values.clear(); + // TODO: split layers + let delta_layer = delta_layer_writer.finish(last_key, self, ctx).await?; + let image_layer = image_layer_writer.finish(self, ctx).await?; + // Step 3: Place back to the layer map. + { + let mut guard = self.layers.write().await; + guard.finish_gc_compaction( + &layer_selection, + &[delta_layer.clone(), image_layer.clone()], + &self.metrics, + ) + }; + Ok(()) + } } struct TimelineAdaptor { diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 21e64d562af7..550a9a567a43 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -226,6 +226,18 @@ impl LayerManager { updates.flush(); } + /// Called when a GC-compaction is completed. + #[cfg(test)] + pub(crate) fn finish_gc_compaction( + &mut self, + compact_from: &[Layer], + compact_to: &[ResidentLayer], + metrics: &TimelineMetrics, + ) { + // We can simply reuse compact l0 logic. Use a different function name to indicate a different type of layer map modification. + self.finish_compact_l0(compact_from, compact_to, metrics) + } + /// Called when compaction is completed. pub(crate) fn rewrite_layers( &mut self, From 126bcc3794a41e3b776108f826c68c6871044876 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Tue, 11 Jun 2024 16:03:25 +0100 Subject: [PATCH 11/41] storcon: track number of attached shards for each node (#8011) ## Problem The storage controller does not track the number of shards attached to a given pageserver. This is a requirement for various scheduling operations (e.g. draining and filling will use this to figure out if the cluster is balanced) ## Summary of Changes Track the number of shards attached to each node. Related https://github.com/neondatabase/neon/issues/7387 --- storage_controller/src/scheduler.rs | 101 ++++++++++++++++++------- storage_controller/src/service.rs | 2 +- storage_controller/src/tenant_shard.rs | 56 +++++++++----- 3 files changed, 114 insertions(+), 45 deletions(-) diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 3ff0d87988d7..4ab85509dc4b 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -29,6 +29,8 @@ pub enum MaySchedule { struct SchedulerNode { /// How many shards are currently scheduled on this node, via their [`crate::tenant_shard::IntentState`]. shard_count: usize, + /// How many shards are currently attached on this node, via their [`crate::tenant_shard::IntentState`]. + attached_shard_count: usize, /// Whether this node is currently elegible to have new shards scheduled (this is derived /// from a node's availability state and scheduling policy). @@ -42,7 +44,9 @@ impl PartialEq for SchedulerNode { (MaySchedule::Yes(_), MaySchedule::Yes(_)) | (MaySchedule::No, MaySchedule::No) ); - may_schedule_matches && self.shard_count == other.shard_count + may_schedule_matches + && self.shard_count == other.shard_count + && self.attached_shard_count == other.attached_shard_count } } @@ -138,6 +142,15 @@ impl ScheduleContext { } } +pub(crate) enum RefCountUpdate { + PromoteSecondary, + Attach, + Detach, + DemoteAttached, + AddSecondary, + RemoveSecondary, +} + impl Scheduler { pub(crate) fn new<'a>(nodes: impl Iterator) -> Self { let mut scheduler_nodes = HashMap::new(); @@ -146,6 +159,7 @@ impl Scheduler { node.get_id(), SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }, ); @@ -171,6 +185,7 @@ impl Scheduler { node.get_id(), SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }, ); @@ -179,7 +194,10 @@ impl Scheduler { for shard in shards { if let Some(node_id) = shard.intent.get_attached() { match expect_nodes.get_mut(node_id) { - Some(node) => node.shard_count += 1, + Some(node) => { + node.shard_count += 1; + node.attached_shard_count += 1; + } None => anyhow::bail!( "Tenant {} references nonexistent node {}", shard.tenant_shard_id, @@ -227,31 +245,42 @@ impl Scheduler { Ok(()) } - /// Increment the reference count of a node. This reference count is used to guide scheduling - /// decisions, not for memory management: it represents one tenant shard whose IntentState targets - /// this node. + /// Update the reference counts of a node. These reference counts are used to guide scheduling + /// decisions, not for memory management: they represent the number of tenant shard whose IntentState + /// targets this node and the number of tenants shars whose IntentState is attached to this + /// node. /// /// It is an error to call this for a node that is not known to the scheduler (i.e. passed into /// [`Self::new`] or [`Self::node_upsert`]) - pub(crate) fn node_inc_ref(&mut self, node_id: NodeId) { - let Some(node) = self.nodes.get_mut(&node_id) else { - tracing::error!("Scheduler missing node {node_id}"); - debug_assert!(false); - return; - }; - - node.shard_count += 1; - } - - /// Decrement a node's reference count. Inverse of [`Self::node_inc_ref`]. - pub(crate) fn node_dec_ref(&mut self, node_id: NodeId) { + pub(crate) fn update_node_ref_counts(&mut self, node_id: NodeId, update: RefCountUpdate) { let Some(node) = self.nodes.get_mut(&node_id) else { debug_assert!(false); tracing::error!("Scheduler missing node {node_id}"); return; }; - node.shard_count -= 1; + match update { + RefCountUpdate::PromoteSecondary => { + node.attached_shard_count += 1; + } + RefCountUpdate::Attach => { + node.shard_count += 1; + node.attached_shard_count += 1; + } + RefCountUpdate::Detach => { + node.shard_count -= 1; + node.attached_shard_count -= 1; + } + RefCountUpdate::DemoteAttached => { + node.attached_shard_count -= 1; + } + RefCountUpdate::AddSecondary => { + node.shard_count += 1; + } + RefCountUpdate::RemoveSecondary => { + node.shard_count -= 1; + } + } } pub(crate) fn node_upsert(&mut self, node: &Node) { @@ -263,6 +292,7 @@ impl Scheduler { Vacant(entry) => { entry.insert(SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }); } @@ -385,6 +415,11 @@ impl Scheduler { pub(crate) fn get_node_shard_count(&self, node_id: NodeId) -> usize { self.nodes.get(&node_id).unwrap().shard_count } + + #[cfg(test)] + pub(crate) fn get_node_attached_shard_count(&self, node_id: NodeId) -> usize { + self.nodes.get(&node_id).unwrap().attached_shard_count + } } #[cfg(test)] @@ -437,18 +472,28 @@ mod tests { let scheduled = scheduler.schedule_shard(&[], &context)?; t2_intent.set_attached(&mut scheduler, Some(scheduled)); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers(), &context)?; t1_intent.push_secondary(&mut scheduler, scheduled); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 2); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); t1_intent.clear(&mut scheduler); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 0); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 1); + + let total_attached = scheduler.get_node_attached_shard_count(NodeId(1)) + + scheduler.get_node_attached_shard_count(NodeId(2)); + assert_eq!(total_attached, 1); if cfg!(debug_assertions) { // Dropping an IntentState without clearing it causes a panic in debug mode, @@ -459,8 +504,12 @@ mod tests { assert!(result.is_err()); } else { t2_intent.clear(&mut scheduler); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 0); + + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 0); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 0); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 0); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 0); } Ok(()) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 756dc10a2ab6..1e81b5c5a2ca 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -4312,7 +4312,7 @@ impl Service { continue; } - if tenant_shard.intent.demote_attached(node_id) { + if tenant_shard.intent.demote_attached(scheduler, node_id) { tenant_shard.sequence = tenant_shard.sequence.next(); // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index dda17f9887dd..77bbf4c60491 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -8,7 +8,7 @@ use crate::{ metrics::{self, ReconcileCompleteLabelGroup, ReconcileOutcome}, persistence::TenantShardPersistence, reconciler::ReconcileUnits, - scheduler::{AffinityScore, MaySchedule, ScheduleContext}, + scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext}, }; use pageserver_api::controller_api::{PlacementPolicy, ShardSchedulingPolicy}; use pageserver_api::{ @@ -153,7 +153,7 @@ impl IntentState { } pub(crate) fn single(scheduler: &mut Scheduler, node_id: Option) -> Self { if let Some(node_id) = node_id { - scheduler.node_inc_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::Attach); } Self { attached: node_id, @@ -164,10 +164,10 @@ impl IntentState { pub(crate) fn set_attached(&mut self, scheduler: &mut Scheduler, new_attached: Option) { if self.attached != new_attached { if let Some(old_attached) = self.attached.take() { - scheduler.node_dec_ref(old_attached); + scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach); } if let Some(new_attached) = &new_attached { - scheduler.node_inc_ref(*new_attached); + scheduler.update_node_ref_counts(*new_attached, RefCountUpdate::Attach); } self.attached = new_attached; } @@ -177,22 +177,27 @@ impl IntentState { /// secondary to attached while maintaining the scheduler's reference counts. pub(crate) fn promote_attached( &mut self, - _scheduler: &mut Scheduler, + scheduler: &mut Scheduler, promote_secondary: NodeId, ) { // If we call this with a node that isn't in secondary, it would cause incorrect // scheduler reference counting, since we assume the node is already referenced as a secondary. debug_assert!(self.secondary.contains(&promote_secondary)); - // TODO: when scheduler starts tracking attached + secondary counts separately, we will - // need to call into it here. self.secondary.retain(|n| n != &promote_secondary); + + let demoted = self.attached; self.attached = Some(promote_secondary); + + scheduler.update_node_ref_counts(promote_secondary, RefCountUpdate::PromoteSecondary); + if let Some(demoted) = demoted { + scheduler.update_node_ref_counts(demoted, RefCountUpdate::DemoteAttached); + } } pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) { debug_assert!(!self.secondary.contains(&new_secondary)); - scheduler.node_inc_ref(new_secondary); + scheduler.update_node_ref_counts(new_secondary, RefCountUpdate::AddSecondary); self.secondary.push(new_secondary); } @@ -200,27 +205,27 @@ impl IntentState { pub(crate) fn remove_secondary(&mut self, scheduler: &mut Scheduler, node_id: NodeId) { let index = self.secondary.iter().position(|n| *n == node_id); if let Some(index) = index { - scheduler.node_dec_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary); self.secondary.remove(index); } } pub(crate) fn clear_secondary(&mut self, scheduler: &mut Scheduler) { for secondary in self.secondary.drain(..) { - scheduler.node_dec_ref(secondary); + scheduler.update_node_ref_counts(secondary, RefCountUpdate::RemoveSecondary); } } /// Remove the last secondary node from the list of secondaries pub(crate) fn pop_secondary(&mut self, scheduler: &mut Scheduler) { if let Some(node_id) = self.secondary.pop() { - scheduler.node_dec_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary); } } pub(crate) fn clear(&mut self, scheduler: &mut Scheduler) { if let Some(old_attached) = self.attached.take() { - scheduler.node_dec_ref(old_attached); + scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach); } self.clear_secondary(scheduler); @@ -251,12 +256,11 @@ impl IntentState { /// forget the location on the offline node. /// /// Returns true if a change was made - pub(crate) fn demote_attached(&mut self, node_id: NodeId) -> bool { + pub(crate) fn demote_attached(&mut self, scheduler: &mut Scheduler, node_id: NodeId) -> bool { if self.attached == Some(node_id) { - // TODO: when scheduler starts tracking attached + secondary counts separately, we will - // need to call into it here. self.attached = None; self.secondary.push(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::DemoteAttached); true } else { false @@ -593,7 +597,7 @@ impl TenantShard { Secondary => { if let Some(node_id) = self.intent.get_attached() { // Populate secondary by demoting the attached node - self.intent.demote_attached(*node_id); + self.intent.demote_attached(scheduler, *node_id); modified = true; } else if self.intent.secondary.is_empty() { // Populate secondary by scheduling a fresh node @@ -783,7 +787,7 @@ impl TenantShard { old_attached_node_id, new_attached_node_id, }) => { - self.intent.demote_attached(old_attached_node_id); + self.intent.demote_attached(scheduler, old_attached_node_id); self.intent .promote_attached(scheduler, new_attached_node_id); } @@ -1321,7 +1325,9 @@ pub(crate) mod tests { assert_ne!(attached_node_id, secondary_node_id); // Notifying the attached node is offline should demote it to a secondary - let changed = tenant_shard.intent.demote_attached(attached_node_id); + let changed = tenant_shard + .intent + .demote_attached(&mut scheduler, attached_node_id); assert!(changed); assert!(tenant_shard.intent.attached.is_none()); assert_eq!(tenant_shard.intent.secondary.len(), 2); @@ -1604,7 +1610,14 @@ pub(crate) mod tests { // We should see equal number of locations on the two nodes. assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 4); + // Scheduling does not consider the number of attachments picking the initial + // pageserver to attach to (hence the assertion that all primaries are on the + // same node) + // TODO: Tweak the scheduling to evenly distribute attachments for new shards. + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 4); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 4); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 0); // Add another two nodes: we should see the shards spread out when their optimize // methods are called @@ -1613,9 +1626,16 @@ pub(crate) mod tests { optimize_til_idle(&nodes, &mut scheduler, &mut shards); assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(3)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(3)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(4)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(4)), 1); for shard in shards.iter_mut() { shard.intent.clear(&mut scheduler); From 7121db3669349ad8be323f55d84906fe1f62af4f Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Tue, 11 Jun 2024 17:39:38 +0100 Subject: [PATCH 12/41] storcon_cli: add 'drain' command (#8007) ## Problem We need the ability to prepare a subset of storage controller managed pageservers for decommisioning. The storage controller cannot currently express this in terms of scheduling constraints (it's a pretty special case, so I'm not sure it even should). ## Summary of Changes A new `drain` command is added to `storcon_cli`. It takes a set of nodes to drain and migrates primary attachments outside of said set. Simple round robing assignment is used under the assumption that nodes outside of the draining set are evenly balanced. Note that secondary locations are not migrated. This is fine for staging, but the migration API will have to be extended for prod in order to allow migration of secondaries as well. I've tested this out against a neon local cluster. The immediate use for this command will be to migrate staging to ARM(Arch64) pageservers. Related https://github.com/neondatabase/cloud/issues/14029 --- Cargo.lock | 1 + control_plane/storcon_cli/Cargo.toml | 1 + control_plane/storcon_cli/src/main.rs | 208 ++++++++++++++++++++++++++ 3 files changed, 210 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index dbbf330cf9eb..66879fd74374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,6 +5820,7 @@ dependencies = [ "anyhow", "clap", "comfy-table", + "futures", "humantime", "hyper 0.14.26", "pageserver_api", diff --git a/control_plane/storcon_cli/Cargo.toml b/control_plane/storcon_cli/Cargo.toml index ed3462961f96..f96f0084b2e3 100644 --- a/control_plane/storcon_cli/Cargo.toml +++ b/control_plane/storcon_cli/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true anyhow.workspace = true clap.workspace = true comfy-table.workspace = true +futures.workspace = true humantime.workspace = true hyper.workspace = true pageserver_api.workspace = true diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 05c4acdf90c8..8c84911d33c9 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -1,3 +1,4 @@ +use futures::StreamExt; use std::{collections::HashMap, str::FromStr, time::Duration}; use clap::{Parser, Subcommand}; @@ -148,6 +149,22 @@ enum Command { #[arg(long)] threshold: humantime::Duration, }, + // Drain a set of specified pageservers by moving the primary attachments to pageservers + // outside of the specified set. + Drain { + // Set of pageserver node ids to drain. + #[arg(long)] + nodes: Vec, + // Optional: migration concurrency (default is 8) + #[arg(long)] + concurrency: Option, + // Optional: maximum number of shards to migrate + #[arg(long)] + max_shards: Option, + // Optional: when set to true, nothing is migrated, but the plan is printed to stdout + #[arg(long)] + dry_run: Option, + }, } #[derive(Parser)] @@ -737,6 +754,197 @@ async fn main() -> anyhow::Result<()> { }) .await?; } + Command::Drain { + nodes, + concurrency, + max_shards, + dry_run, + } => { + // Load the list of nodes, split them up into the drained and filled sets, + // and validate that draining is possible. + let node_descs = storcon_client + .dispatch::<(), Vec>( + Method::GET, + "control/v1/node".to_string(), + None, + ) + .await?; + + let mut node_to_drain_descs = Vec::new(); + let mut node_to_fill_descs = Vec::new(); + + for desc in node_descs { + let to_drain = nodes.iter().any(|id| *id == desc.id); + if to_drain { + node_to_drain_descs.push(desc); + } else { + node_to_fill_descs.push(desc); + } + } + + if nodes.len() != node_to_drain_descs.len() { + anyhow::bail!("Drain requested for node which doesn't exist.") + } + + let can_fill = node_to_fill_descs + .iter() + .filter(|desc| { + matches!(desc.availability, NodeAvailabilityWrapper::Active) + && matches!( + desc.scheduling, + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling + ) + }) + .any(|_| true); + + if !can_fill { + anyhow::bail!("There are no nodes to drain to") + } + + // Set the node scheduling policy to draining for the nodes which + // we plan to drain. + for node_desc in node_to_drain_descs.iter() { + let req = NodeConfigureRequest { + node_id: node_desc.id, + availability: None, + scheduling: Some(NodeSchedulingPolicy::Draining), + }; + + storcon_client + .dispatch::<_, ()>( + Method::PUT, + format!("control/v1/node/{}/config", node_desc.id), + Some(req), + ) + .await?; + } + + // Perform the drain: move each tenant shard scheduled on a node to + // be drained to a node which is being filled. A simple round robin + // strategy is used to pick the new node. + let tenants = storcon_client + .dispatch::<(), Vec>( + Method::GET, + "control/v1/tenant".to_string(), + None, + ) + .await?; + + let mut selected_node_idx = 0; + + struct DrainMove { + tenant_shard_id: TenantShardId, + from: NodeId, + to: NodeId, + } + + let mut moves: Vec = Vec::new(); + + let shards = tenants + .into_iter() + .flat_map(|tenant| tenant.shards.into_iter()); + for shard in shards { + if let Some(max_shards) = max_shards { + if moves.len() >= max_shards { + println!( + "Stop planning shard moves since the requested maximum was reached" + ); + break; + } + } + + let should_migrate = { + if let Some(attached_to) = shard.node_attached { + node_to_drain_descs + .iter() + .map(|desc| desc.id) + .any(|id| id == attached_to) + } else { + false + } + }; + + if !should_migrate { + continue; + } + + moves.push(DrainMove { + tenant_shard_id: shard.tenant_shard_id, + from: shard + .node_attached + .expect("We only migrate attached tenant shards"), + to: node_to_fill_descs[selected_node_idx].id, + }); + selected_node_idx = (selected_node_idx + 1) % node_to_fill_descs.len(); + } + + let total_moves = moves.len(); + + if dry_run == Some(true) { + println!("Dryrun requested. Planned {total_moves} moves:"); + for mv in &moves { + println!("{}: {} -> {}", mv.tenant_shard_id, mv.from, mv.to) + } + + return Ok(()); + } + + const DEFAULT_MIGRATE_CONCURRENCY: usize = 8; + let mut stream = futures::stream::iter(moves) + .map(|mv| { + let client = Client::new(cli.api.clone(), cli.jwt.clone()); + async move { + client + .dispatch::( + Method::PUT, + format!("control/v1/tenant/{}/migrate", mv.tenant_shard_id), + Some(TenantShardMigrateRequest { + tenant_shard_id: mv.tenant_shard_id, + node_id: mv.to, + }), + ) + .await + .map_err(|e| (mv.tenant_shard_id, mv.from, mv.to, e)) + } + }) + .buffered(concurrency.unwrap_or(DEFAULT_MIGRATE_CONCURRENCY)); + + let mut success = 0; + let mut failure = 0; + + while let Some(res) = stream.next().await { + match res { + Ok(_) => { + success += 1; + } + Err((tenant_shard_id, from, to, error)) => { + failure += 1; + println!( + "Failed to migrate {} from node {} to node {}: {}", + tenant_shard_id, from, to, error + ); + } + } + + if (success + failure) % 20 == 0 { + println!( + "Processed {}/{} shards: {} succeeded, {} failed", + success + failure, + total_moves, + success, + failure + ); + } + } + + println!( + "Processed {}/{} shards: {} succeeded, {} failed", + success + failure, + total_moves, + success, + failure + ); + } } Ok(()) From 78a59b94f59a9679a6c8f3759d43b05de238ecbd Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 11 Jun 2024 23:19:18 +0300 Subject: [PATCH 13/41] Copy editor config for the neon extension from PostgreSQL (#8009) This makes IDEs and github diff format the code the same way as PostgreSQL sources, which is the style we try to maintain. --- pgxn/.dir-locals.el | 19 +++++++++++++++++++ pgxn/.editorconfig | 14 ++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 pgxn/.dir-locals.el create mode 100644 pgxn/.editorconfig diff --git a/pgxn/.dir-locals.el b/pgxn/.dir-locals.el new file mode 100644 index 000000000000..ab6208b6983a --- /dev/null +++ b/pgxn/.dir-locals.el @@ -0,0 +1,19 @@ +;; see also src/tools/editors/emacs.samples for more complete settings + +((c-mode . ((c-basic-offset . 4) + (c-file-style . "bsd") + (fill-column . 78) + (indent-tabs-mode . t) + (tab-width . 4))) + (nxml-mode . ((fill-column . 78) + (indent-tabs-mode . nil))) + (perl-mode . ((perl-indent-level . 4) + (perl-continued-statement-offset . 2) + (perl-continued-brace-offset . -2) + (perl-brace-offset . 0) + (perl-brace-imaginary-offset . 0) + (perl-label-offset . -2) + (indent-tabs-mode . t) + (tab-width . 4))) + (sgml-mode . ((fill-column . 78) + (indent-tabs-mode . nil)))) diff --git a/pgxn/.editorconfig b/pgxn/.editorconfig new file mode 100644 index 000000000000..d69a3d1dc4e0 --- /dev/null +++ b/pgxn/.editorconfig @@ -0,0 +1,14 @@ +root = true + +[*.{c,h,l,y,pl,pm}] +indent_style = tab +indent_size = tab +tab_width = 4 + +[*.{sgml,xml}] +indent_style = space +indent_size = 1 + +[*.xsl] +indent_style = space +indent_size = 2 From 27518676d7aebb26ad81bf0a926749c5ed9e75d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Wed, 12 Jun 2024 00:45:22 +0200 Subject: [PATCH 14/41] Rename S3 scrubber to storage scrubber (#8013) The S3 scrubber contains "S3" in its name, but we want to make it generic in terms of which storage is used (#7547). Therefore, rename it to "storage scrubber", following the naming scheme of already existing components "storage broker" and "storage controller". Part of #7547 --- .dockerignore | 2 +- Cargo.lock | 96 +++++++++---------- Cargo.toml | 2 +- {s3_scrubber => storage_scrubber}/Cargo.toml | 2 +- {s3_scrubber => storage_scrubber}/README.md | 2 +- .../src/checks.rs | 0 .../src/cloud_admin_api.rs | 0 .../src/garbage.rs | 0 {s3_scrubber => storage_scrubber}/src/lib.rs | 0 {s3_scrubber => storage_scrubber}/src/main.rs | 10 +- .../src/metadata_stream.rs | 0 .../src/pageserver_physical_gc.rs | 0 .../src/scan_pageserver_metadata.rs | 0 .../src/scan_safekeeper_metadata.rs | 0 .../src/tenant_snapshot.rs | 0 test_runner/fixtures/neon_fixtures.py | 8 +- .../regress/test_pageserver_generations.py | 4 +- .../regress/test_pageserver_secondary.py | 6 +- test_runner/regress/test_sharding.py | 4 +- ...3_scrubber.py => test_storage_scrubber.py} | 10 +- test_runner/regress/test_tenant_delete.py | 4 +- 21 files changed, 75 insertions(+), 75 deletions(-) rename {s3_scrubber => storage_scrubber}/Cargo.toml (98%) rename {s3_scrubber => storage_scrubber}/README.md (99%) rename {s3_scrubber => storage_scrubber}/src/checks.rs (100%) rename {s3_scrubber => storage_scrubber}/src/cloud_admin_api.rs (100%) rename {s3_scrubber => storage_scrubber}/src/garbage.rs (100%) rename {s3_scrubber => storage_scrubber}/src/lib.rs (100%) rename {s3_scrubber => storage_scrubber}/src/main.rs (96%) rename {s3_scrubber => storage_scrubber}/src/metadata_stream.rs (100%) rename {s3_scrubber => storage_scrubber}/src/pageserver_physical_gc.rs (100%) rename {s3_scrubber => storage_scrubber}/src/scan_pageserver_metadata.rs (100%) rename {s3_scrubber => storage_scrubber}/src/scan_safekeeper_metadata.rs (100%) rename {s3_scrubber => storage_scrubber}/src/tenant_snapshot.rs (100%) rename test_runner/regress/{test_s3_scrubber.py => test_storage_scrubber.py} (94%) diff --git a/.dockerignore b/.dockerignore index eead727994a3..c7a2f78e32b9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,7 +21,7 @@ !patches/ !pgxn/ !proxy/ -!s3_scrubber/ +!storage_scrubber/ !safekeeper/ !storage_broker/ !storage_controller/ diff --git a/Cargo.lock b/Cargo.lock index 66879fd74374..1c8a8b0c0fff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5109,54 +5109,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" -[[package]] -name = "s3_scrubber" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-stream", - "aws-config", - "aws-sdk-s3", - "aws-smithy-async", - "bincode", - "bytes", - "camino", - "chrono", - "clap", - "crc32c", - "either", - "futures", - "futures-util", - "hex", - "histogram", - "humantime", - "itertools", - "once_cell", - "pageserver", - "pageserver_api", - "postgres_ffi", - "rand 0.8.5", - "remote_storage", - "reqwest 0.12.4", - "rustls 0.22.4", - "rustls-native-certs 0.7.0", - "serde", - "serde_json", - "serde_with", - "thiserror", - "tokio", - "tokio-postgres", - "tokio-postgres-rustls", - "tokio-rustls 0.25.0", - "tokio-stream", - "tokio-util", - "tracing", - "tracing-appender", - "tracing-subscriber", - "utils", - "workspace_hack", -] - [[package]] name = "safekeeper" version = "0.1.0" @@ -5813,6 +5765,54 @@ dependencies = [ "workspace_hack", ] +[[package]] +name = "storage_scrubber" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-stream", + "aws-config", + "aws-sdk-s3", + "aws-smithy-async", + "bincode", + "bytes", + "camino", + "chrono", + "clap", + "crc32c", + "either", + "futures", + "futures-util", + "hex", + "histogram", + "humantime", + "itertools", + "once_cell", + "pageserver", + "pageserver_api", + "postgres_ffi", + "rand 0.8.5", + "remote_storage", + "reqwest 0.12.4", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "serde", + "serde_json", + "serde_with", + "thiserror", + "tokio", + "tokio-postgres", + "tokio-postgres-rustls", + "tokio-rustls 0.25.0", + "tokio-stream", + "tokio-util", + "tracing", + "tracing-appender", + "tracing-subscriber", + "utils", + "workspace_hack", +] + [[package]] name = "storcon_cli" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 58715db32b2b..dc89c2341bf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ members = [ "safekeeper", "storage_broker", "storage_controller", - "s3_scrubber", + "storage_scrubber", "workspace_hack", "trace", "libs/compute_api", diff --git a/s3_scrubber/Cargo.toml b/storage_scrubber/Cargo.toml similarity index 98% rename from s3_scrubber/Cargo.toml rename to storage_scrubber/Cargo.toml index 48b50ca21c5a..050be66483b9 100644 --- a/s3_scrubber/Cargo.toml +++ b/storage_scrubber/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "s3_scrubber" +name = "storage_scrubber" version = "0.1.0" edition.workspace = true license.workspace = true diff --git a/s3_scrubber/README.md b/storage_scrubber/README.md similarity index 99% rename from s3_scrubber/README.md rename to storage_scrubber/README.md index 8a96542adaff..0930f343eceb 100644 --- a/s3_scrubber/README.md +++ b/storage_scrubber/README.md @@ -1,4 +1,4 @@ -# Neon S3 scrubber +# Neon Storage Scrubber This tool directly accesses the S3 buckets used by the Neon `pageserver` and `safekeeper`, and does housekeeping such as cleaning up objects for tenants & timelines that no longer exist. diff --git a/s3_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs similarity index 100% rename from s3_scrubber/src/checks.rs rename to storage_scrubber/src/checks.rs diff --git a/s3_scrubber/src/cloud_admin_api.rs b/storage_scrubber/src/cloud_admin_api.rs similarity index 100% rename from s3_scrubber/src/cloud_admin_api.rs rename to storage_scrubber/src/cloud_admin_api.rs diff --git a/s3_scrubber/src/garbage.rs b/storage_scrubber/src/garbage.rs similarity index 100% rename from s3_scrubber/src/garbage.rs rename to storage_scrubber/src/garbage.rs diff --git a/s3_scrubber/src/lib.rs b/storage_scrubber/src/lib.rs similarity index 100% rename from s3_scrubber/src/lib.rs rename to storage_scrubber/src/lib.rs diff --git a/s3_scrubber/src/main.rs b/storage_scrubber/src/main.rs similarity index 96% rename from s3_scrubber/src/main.rs rename to storage_scrubber/src/main.rs index ade8ef7d7a87..222bd10ed248 100644 --- a/s3_scrubber/src/main.rs +++ b/storage_scrubber/src/main.rs @@ -1,11 +1,11 @@ use anyhow::bail; use camino::Utf8PathBuf; use pageserver_api::shard::TenantShardId; -use s3_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode}; -use s3_scrubber::pageserver_physical_gc::GcMode; -use s3_scrubber::scan_pageserver_metadata::scan_metadata; -use s3_scrubber::tenant_snapshot::SnapshotDownloader; -use s3_scrubber::{ +use storage_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode}; +use storage_scrubber::pageserver_physical_gc::GcMode; +use storage_scrubber::scan_pageserver_metadata::scan_metadata; +use storage_scrubber::tenant_snapshot::SnapshotDownloader; +use storage_scrubber::{ init_logging, pageserver_physical_gc::pageserver_physical_gc, scan_safekeeper_metadata::scan_safekeeper_metadata, BucketConfig, ConsoleConfig, NodeKind, TraversingDepth, diff --git a/s3_scrubber/src/metadata_stream.rs b/storage_scrubber/src/metadata_stream.rs similarity index 100% rename from s3_scrubber/src/metadata_stream.rs rename to storage_scrubber/src/metadata_stream.rs diff --git a/s3_scrubber/src/pageserver_physical_gc.rs b/storage_scrubber/src/pageserver_physical_gc.rs similarity index 100% rename from s3_scrubber/src/pageserver_physical_gc.rs rename to storage_scrubber/src/pageserver_physical_gc.rs diff --git a/s3_scrubber/src/scan_pageserver_metadata.rs b/storage_scrubber/src/scan_pageserver_metadata.rs similarity index 100% rename from s3_scrubber/src/scan_pageserver_metadata.rs rename to storage_scrubber/src/scan_pageserver_metadata.rs diff --git a/s3_scrubber/src/scan_safekeeper_metadata.rs b/storage_scrubber/src/scan_safekeeper_metadata.rs similarity index 100% rename from s3_scrubber/src/scan_safekeeper_metadata.rs rename to storage_scrubber/src/scan_safekeeper_metadata.rs diff --git a/s3_scrubber/src/tenant_snapshot.rs b/storage_scrubber/src/tenant_snapshot.rs similarity index 100% rename from s3_scrubber/src/tenant_snapshot.rs rename to storage_scrubber/src/tenant_snapshot.rs diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 6fdad2188ccb..394f5283f33f 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -833,7 +833,7 @@ def overlay_cleanup_teardown(self): def enable_scrub_on_exit(self): """ Call this if you would like the fixture to automatically run - s3_scrubber at the end of the test, as a bidirectional test + storage_scrubber at the end of the test, as a bidirectional test that the scrubber is working properly, and that the code within the test didn't produce any invalid remote state. """ @@ -948,7 +948,7 @@ def __exit__( if self.scrub_on_exit: try: - S3Scrubber(self).scan_metadata() + StorageScrubber(self).scan_metadata() except Exception as e: log.error(f"Error during remote storage scrub: {e}") cleanup_error = e @@ -3937,7 +3937,7 @@ def paused(): wait_until(20, 0.5, paused) -class S3Scrubber: +class StorageScrubber: def __init__(self, env: NeonEnvBuilder, log_dir: Optional[Path] = None): self.env = env self.log_dir = log_dir or env.test_output_dir @@ -3957,7 +3957,7 @@ def scrubber_cli(self, args: list[str], timeout) -> str: if s3_storage.endpoint is not None: env.update({"AWS_ENDPOINT_URL": s3_storage.endpoint}) - base_args = [str(self.env.neon_binpath / "s3_scrubber")] + base_args = [str(self.env.neon_binpath / "storage_scrubber")] args = base_args + args (output_path, stdout, status_code) = subprocess_capture( diff --git a/test_runner/regress/test_pageserver_generations.py b/test_runner/regress/test_pageserver_generations.py index 0235cf6d2021..696af24e5c0a 100644 --- a/test_runner/regress/test_pageserver_generations.py +++ b/test_runner/regress/test_pageserver_generations.py @@ -22,7 +22,7 @@ NeonEnv, NeonEnvBuilder, PgBin, - S3Scrubber, + StorageScrubber, generate_uploads_and_deletions, ) from fixtures.pageserver.common_types import parse_layer_file_name @@ -215,7 +215,7 @@ def parse_generation_suffix(key): # Having written a mixture of generation-aware and legacy index_part.json, # ensure the scrubber handles the situation as expected. - metadata_summary = S3Scrubber(neon_env_builder).scan_metadata() + metadata_summary = StorageScrubber(neon_env_builder).scan_metadata() assert metadata_summary["tenant_count"] == 1 # Scrubber should have seen our timeline assert metadata_summary["timeline_count"] == 1 assert metadata_summary["timeline_shard_count"] == 1 diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 757ea60882a9..2782d33e1591 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -7,7 +7,7 @@ import pytest from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserver, S3Scrubber +from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserver, StorageScrubber from fixtures.pageserver.common_types import parse_layer_file_name from fixtures.pageserver.utils import ( assert_prefix_empty, @@ -214,7 +214,7 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): # Having done a bunch of attach/detach cycles, we will have generated some index garbage: check # that the scrubber sees it and cleans it up. We do this before the final attach+validate pass, # to also validate that the scrubber isn't breaking anything. - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] > 0 @@ -536,7 +536,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): # Scrub the remote storage # ======================== # This confirms that the scrubber isn't upset by the presence of the heatmap - S3Scrubber(neon_env_builder).scan_metadata() + StorageScrubber(neon_env_builder).scan_metadata() # Detach secondary and delete tenant # =================================== diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 1996e995574c..56075c597564 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -11,8 +11,8 @@ from fixtures.neon_fixtures import ( NeonEnv, NeonEnvBuilder, - S3Scrubber, StorageControllerApiException, + StorageScrubber, last_flush_lsn_upload, tenant_get_shards, wait_for_last_flush_lsn, @@ -128,7 +128,7 @@ def get_sizes(): # Check the scrubber isn't confused by sharded content, then disable # it during teardown because we'll have deleted by then - S3Scrubber(neon_env_builder).scan_metadata() + StorageScrubber(neon_env_builder).scan_metadata() neon_env_builder.scrub_on_exit = False env.storage_controller.pageserver_api().tenant_delete(tenant_id) diff --git a/test_runner/regress/test_s3_scrubber.py b/test_runner/regress/test_storage_scrubber.py similarity index 94% rename from test_runner/regress/test_s3_scrubber.py rename to test_runner/regress/test_storage_scrubber.py index 6baba190f3ba..35ae61c380df 100644 --- a/test_runner/regress/test_s3_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -6,7 +6,7 @@ from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.neon_fixtures import ( NeonEnvBuilder, - S3Scrubber, + StorageScrubber, ) from fixtures.remote_storage import S3Storage, s3_storage from fixtures.workload import Workload @@ -60,7 +60,7 @@ def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: output_path = neon_env_builder.test_output_dir / "snapshot" os.makedirs(output_path) - scrubber = S3Scrubber(neon_env_builder) + scrubber = StorageScrubber(neon_env_builder) scrubber.tenant_snapshot(tenant_id, output_path) assert len(os.listdir(output_path)) > 0 @@ -143,18 +143,18 @@ def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Opt workload.write_rows(1) # With a high min_age, the scrubber should decline to delete anything - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=3600) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=3600) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == 0 # If targeting a different tenant, the scrubber shouldn't do anything - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc( + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc( min_age_secs=1, tenant_ids=[TenantId.generate()] ) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == 0 # With a low min_age, the scrubber should go ahead and clean up all but the latest 2 generations - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == (expect_indices_per_shard - 2) * shard_count diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index fa7cead1bd29..fd3cc45c3f48 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -10,7 +10,7 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, - S3Scrubber, + StorageScrubber, last_flush_lsn_upload, wait_for_last_flush_lsn, ) @@ -707,7 +707,7 @@ def test_tenant_delete_scrubber(pg_bin: PgBin, neon_env_builder: NeonEnvBuilder) remote_storage_kind = RemoteStorageKind.MOCK_S3 neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind) - scrubber = S3Scrubber(neon_env_builder) + scrubber = StorageScrubber(neon_env_builder) env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) ps_http = env.pageserver.http_client() From b7a0c2b61430eb5f88200b679acdcbee3503f15b Mon Sep 17 00:00:00 2001 From: Sasha Krassovsky Date: Tue, 11 Jun 2024 17:59:32 -0700 Subject: [PATCH 15/41] Add On-demand WAL Download to logicalfuncs (#7960) We implemented on-demand WAL download for walsender, but other things that may want to read the WAL from safekeepers don't do that yet. This PR makes it do that by adding the same set of hooks to logicalfuncs. Addresses https://github.com/neondatabase/neon/issues/7959 Also relies on: https://github.com/neondatabase/postgres/pull/438 https://github.com/neondatabase/postgres/pull/437 https://github.com/neondatabase/postgres/pull/436 --- Makefile | 2 ++ pgxn/neon/neon.c | 2 ++ pgxn/neon/walsender_hooks.c | 27 ++++++++++++++++- .../regress/test_logical_replication.py | 30 +++++++++++++++++++ vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 ++-- 8 files changed, 66 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index dcbfdbcbc15a..37bd19ba4440 100644 --- a/Makefile +++ b/Makefile @@ -124,6 +124,8 @@ postgres-%: postgres-configure-% \ $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install +@echo "Compiling amcheck $*" $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/amcheck install + +@echo "Compiling test_decoding $*" + $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/test_decoding install .PHONY: postgres-clean-% postgres-clean-%: diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index b69a3819c9b5..276d1542fe3b 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -19,6 +19,7 @@ #include "catalog/pg_type.h" #include "postmaster/bgworker.h" #include "postmaster/interrupt.h" +#include "replication/logical.h" #include "replication/slot.h" #include "replication/walsender.h" #include "storage/procsignal.h" @@ -280,6 +281,7 @@ _PG_init(void) pg_init_libpagestore(); pg_init_walproposer(); WalSender_Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; + LogicalFuncs_Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; InitLogicalReplicationMonitor(); diff --git a/pgxn/neon/walsender_hooks.c b/pgxn/neon/walsender_hooks.c index 93dce9de84b0..8f8d1dfc01a4 100644 --- a/pgxn/neon/walsender_hooks.c +++ b/pgxn/neon/walsender_hooks.c @@ -24,8 +24,12 @@ #include "walproposer.h" static NeonWALReader *wal_reader = NULL; + +struct WalSnd; +extern struct WalSnd *MyWalSnd; extern XLogRecPtr WalSndWaitForWal(XLogRecPtr loc); extern bool GetDonorShmem(XLogRecPtr *donor_lsn); +extern XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI); static XLogRecPtr NeonWALReadWaitForWAL(XLogRecPtr loc) @@ -36,7 +40,28 @@ NeonWALReadWaitForWAL(XLogRecPtr loc) CHECK_FOR_INTERRUPTS(); } - return WalSndWaitForWal(loc); + // Walsender sends keepalives and stuff, so better use its normal wait + if (MyWalSnd != NULL) + return WalSndWaitForWal(loc); + + for (;;) + { + XLogRecPtr flush_ptr; + if (!RecoveryInProgress()) +#if PG_VERSION_NUM >= 150000 + flush_ptr = GetFlushRecPtr(NULL); +#else + flush_ptr = GetFlushRecPtr(); +#endif + else + flush_ptr = GetXLogReplayRecPtr(NULL); + + if (loc <= flush_ptr) + return flush_ptr; + + CHECK_FOR_INTERRUPTS(); + pg_usleep(1000); + } } static int diff --git a/test_runner/regress/test_logical_replication.py b/test_runner/regress/test_logical_replication.py index a657d5a03511..ca3c81d6e51d 100644 --- a/test_runner/regress/test_logical_replication.py +++ b/test_runner/regress/test_logical_replication.py @@ -221,6 +221,35 @@ def slot_removed(ep): wait_until(number_of_iterations=10, interval=2, func=partial(slot_removed, endpoint)) +def test_ondemand_wal_download_in_replication_slot_funcs(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_safekeepers = 3 + env = neon_env_builder.init_start() + + env.neon_cli.create_branch("init") + endpoint = env.endpoints.create_start("init") + + with endpoint.connect().cursor() as cur: + cur.execute("create table wal_generator (id serial primary key, data text)") + cur.execute( + "SELECT * FROM pg_create_logical_replication_slot('slotty_mcslotface', 'test_decoding')" + ) + cur.execute( + """ +INSERT INTO wal_generator (data) +SELECT repeat('A', 1024) -- Generates a kilobyte of data per row +FROM generate_series(1, 16384) AS seq; -- Inserts enough rows to exceed 16MB of data +""" + ) + + endpoint.stop_and_destroy() + endpoint = env.endpoints.create_start("init") + + with endpoint.connect().cursor() as cur: + cur.execute( + "SELECT * FROM pg_logical_slot_peek_binary_changes('slotty_mcslotface', NULL, NULL, 'include-xids', '0')" + ) + + # Tests that walsender correctly blocks until WAL is downloaded from safekeepers def test_lr_with_slow_safekeeper(neon_env_builder: NeonEnvBuilder, vanilla_pg): neon_env_builder.num_safekeepers = 3 @@ -247,6 +276,7 @@ def test_lr_with_slow_safekeeper(neon_env_builder: NeonEnvBuilder, vanilla_pg): connstr = endpoint.connstr().replace("'", "''") vanilla_pg.safe_psql(f"create subscription sub1 connection '{connstr}' publication pub") logical_replication_sync(vanilla_pg, endpoint) + vanilla_pg.stop() # Pause the safekeepers so that they can't send WAL (except to pageserver) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 17e0f5ff4e19..4c51945a6167 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 17e0f5ff4e1905691aa40e1e08f9b79b14c99652 +Subproject commit 4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index c2c3d40534db..e22098d86d6c 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit c2c3d40534db97d83dd7e185d1971e707fa2f445 +Subproject commit e22098d86d6c40276b6bd75c29133a33fb283ab6 diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index b228f20372eb..9837db157837 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit b228f20372ebcabfd7946647cb7adbd38bacb14a +Subproject commit 9837db157837fcf43ef7348be0017d3a2238cd27 diff --git a/vendor/revisions.json b/vendor/revisions.json index 5bf4e289ef92..f945ea6d730e 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "b228f20372ebcabfd7946647cb7adbd38bacb14a"], - "v15": ["15.7", "c2c3d40534db97d83dd7e185d1971e707fa2f445"], - "v14": ["14.12", "17e0f5ff4e1905691aa40e1e08f9b79b14c99652"] + "v16": ["16.3", "9837db157837fcf43ef7348be0017d3a2238cd27"], + "v15": ["15.7", "e22098d86d6c40276b6bd75c29133a33fb283ab6"], + "v14": ["14.12", "4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba"] } From 9983ae291bf97fcd4a80fc3be6b00da39aca2663 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 09:18:52 +0300 Subject: [PATCH 16/41] Another attempt at making test_vm_bits less flaky (#7989) - Split the first and second parts of the test to two separate tests - In the first test, disable the aggressive GC, compaction, and autovacuum. They are only needed by the second test. I'd like to get the first test to a point that the VM page is never all-zeros. Disabling autovacuum in the first test is hopefully enough to accomplish that. - Compare the full page images, don't skip page header. After fixing the previous point, there should be no discrepancy. LSN still won't match, though, because of commit 387a36874c. Fixes issue https://github.com/neondatabase/neon/issues/7984 --- test_runner/regress/test_vm_bits.py | 116 +++++++++++++++++++++------- 1 file changed, 86 insertions(+), 30 deletions(-) diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index b549db1af637..225b952e7300 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -1,7 +1,9 @@ import time +from contextlib import closing from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, fork_at_current_lsn +from fixtures.utils import query_scalar # @@ -113,11 +115,88 @@ def test_vm_bit_clear(neon_simple_env: NeonEnv): assert cur_new.fetchall() == [] -# -# Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK -# record. -# -def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): +def test_vm_bit_clear_on_heap_lock_whitebox(neon_env_builder: NeonEnvBuilder): + """ + Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK record. + + This is a repro for the bug fixed in commit 66fa176cc8. + """ + env = neon_env_builder.init_start() + endpoint = env.endpoints.create_start( + "main", + config_lines=[ + # If auto-analyze runs at the same time that we run VACUUM FREEZE, it + # can hold a snasphot that prevent the tuples from being frozen. + "autovacuum=off", + "log_checkpoints=on", + ], + ) + + # Run the tests in a dedicated database, because the activity monitor + # periodically runs some queries on to the 'postgres' database. If that + # happens at the same time that we're trying to freeze, the activity + # monitor's queries can hold back the xmin horizon and prevent freezing. + with closing(endpoint.connect()) as pg_conn: + pg_conn.cursor().execute("CREATE DATABASE vmbitsdb") + pg_conn = endpoint.connect(dbname="vmbitsdb") + cur = pg_conn.cursor() + + # Install extension containing function needed for test + cur.execute("CREATE EXTENSION neon_test_utils") + cur.execute("CREATE EXTENSION pageinspect") + + # Create a test table and freeze it to set the all-frozen VM bit on all pages. + cur.execute("CREATE TABLE vmtest_lock (id integer PRIMARY KEY)") + cur.execute("BEGIN") + cur.execute("INSERT INTO vmtest_lock SELECT g FROM generate_series(1, 50000) g") + xid = int(query_scalar(cur, "SELECT txid_current()")) + cur.execute("COMMIT") + cur.execute("VACUUM (FREEZE, DISABLE_PAGE_SKIPPING true, VERBOSE) vmtest_lock") + for notice in pg_conn.notices: + log.info(f"{notice}") + + # This test has been flaky in the past, because background activity like + # auto-analyze and compute_ctl's activity monitor queries have prevented the + # tuples from being frozen. Check that they were frozen. + relfrozenxid = int( + query_scalar(cur, "SELECT relfrozenxid FROM pg_class WHERE relname='vmtest_lock'") + ) + assert ( + relfrozenxid > xid + ), f"Inserted rows were not frozen. This can be caused by concurrent activity in the database. (XID {xid}, relfrozenxid {relfrozenxid}" + + # Lock a row. This clears the all-frozen VM bit for that page. + cur.execute("BEGIN") + cur.execute("SELECT * FROM vmtest_lock WHERE id = 40000 FOR UPDATE") + cur.execute("COMMIT") + + # The VM page in shared buffer cache, and the same page as reconstructed by + # the pageserver, should be equal. Except for the LSN: Clearing a bit in the + # VM doesn't bump the LSN in PostgreSQL, but the pageserver updates the LSN + # when it replays the VM-bit clearing record (since commit 387a36874c) + # + # This is a bit fragile, we've had lot of flakiness in this test before. For + # example, because all the VM bits were not set because concurrent + # autoanalyze prevented the VACUUM FREEZE from freezing the tuples. Or + # because autoavacuum kicked in and re-froze the page between the + # get_raw_page() and get_raw_page_at_lsn() calls. We disable autovacuum now, + # which should make this deterministic. + cur.execute("select get_raw_page( 'vmtest_lock', 'vm', 0 )") + vm_page_in_cache = (cur.fetchall()[0][0])[8:100].hex() + cur.execute( + "select get_raw_page_at_lsn( 'vmtest_lock', 'vm', 0, pg_current_wal_insert_lsn(), NULL )" + ) + vm_page_at_pageserver = (cur.fetchall()[0][0])[8:100].hex() + + assert vm_page_at_pageserver == vm_page_in_cache + + +def test_vm_bit_clear_on_heap_lock_blackbox(neon_env_builder: NeonEnvBuilder): + """ + The previous test is enough to verify the bug that was fixed in + commit 66fa176cc8. But for good measure, we also reproduce the + original problem that the missing VM page update caused. + """ tenant_conf = { "checkpoint_distance": f"{128 * 1024}", "compaction_target_size": f"{128 * 1024}", @@ -130,9 +209,9 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_vm_bit_clear_on_heap_lock") + timeline_id = env.initial_timeline endpoint = env.endpoints.create_start( - "test_vm_bit_clear_on_heap_lock", + "main", config_lines=[ "log_autovacuum_min_duration = 0", # Perform anti-wraparound vacuuming aggressively @@ -146,12 +225,10 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): # Install extension containing function needed for test cur.execute("CREATE EXTENSION neon_test_utils") - cur.execute("CREATE EXTENSION pageinspect") # Create a test table and freeze it to set the all-frozen VM bit on all pages. cur.execute("CREATE TABLE vmtest_lock (id integer PRIMARY KEY)") cur.execute("INSERT INTO vmtest_lock SELECT g FROM generate_series(1, 50000) g") - cur.execute("VACUUM (FREEZE, DISABLE_PAGE_SKIPPING true) vmtest_lock") # Lock a row. This clears the all-frozen VM bit for that page. @@ -165,27 +242,6 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): cur.execute("COMMIT") - # The VM page in shared buffer cache, and the same page as reconstructed - # by the pageserver, should be equal. - # - # Ignore page header (24 bytes) of visibility map. - # If the dirty VM page is flushed from the cache for some reason, - # it gets WAL-logged, which changes the LSN on the page. - # Also in neon SMGR we can replace empty heap page with zero (uninitialized) heap page. - cur.execute("select get_raw_page( 'vmtest_lock', 'vm', 0 )") - vm_page_in_cache = (cur.fetchall()[0][0])[24:100].hex() - cur.execute( - "select get_raw_page_at_lsn( 'vmtest_lock', 'vm', 0, pg_current_wal_insert_lsn(), NULL )" - ) - vm_page_at_pageserver = (cur.fetchall()[0][0])[24:100].hex() - - assert vm_page_at_pageserver == vm_page_in_cache - - # The above assert is enough to verify the bug that was fixed in - # commit 66fa176cc8. But for good measure, we also reproduce the - # original problem that the missing VM page update caused. The - # rest of the test does that. - # Kill and restart postgres, to clear the buffer cache. # # NOTE: clear_buffer_cache() will not do, because it evicts the dirty pages From 69aa1aca356b1893a48b06627b31de4933a172f9 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 09:19:24 +0300 Subject: [PATCH 17/41] Update default Postgres version in docker-compose.yml (#8019) Let's be modern. --- docker-compose/docker-compose.yml | 4 ++-- docs/docker.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index a395f0331b28..3f097f27003f 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -159,12 +159,12 @@ services: context: ./compute_wrapper/ args: - REPOSITORY=${REPOSITORY:-neondatabase} - - COMPUTE_IMAGE=compute-node-v${PG_VERSION:-14} + - COMPUTE_IMAGE=compute-node-v${PG_VERSION:-16} - TAG=${TAG:-latest} - http_proxy=$http_proxy - https_proxy=$https_proxy environment: - - PG_VERSION=${PG_VERSION:-14} + - PG_VERSION=${PG_VERSION:-16} #- RUST_BACKTRACE=1 # Mount the test files directly, for faster editing cycle. volumes: diff --git a/docs/docker.md b/docs/docker.md index cbf68be3a7a4..ccd2afc27a22 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -34,12 +34,12 @@ You can see a [docker compose](https://docs.docker.com/compose/) example to crea 1. create containers You can specify version of neon cluster using following environment values. -- PG_VERSION: postgres version for compute (default is 14) +- PG_VERSION: postgres version for compute (default is 16) - TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) ``` $ cd docker-compose/ $ docker-compose down # remove the containers if exists -$ PG_VERSION=15 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version +$ PG_VERSION=16 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version Creating network "dockercompose_default" with the default driver Creating docker-compose_storage_broker_1 ... done (...omit...) From 0a256148b0345d4a04e63347947bc93bcacdfb24 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 10:06:00 +0300 Subject: [PATCH 18/41] Update documentation on running locally with Docker (#8020) - Fix the dockerhub URLs - `neondatabase/compute-node` image has been replaced with Postgres version specific images like `neondatabase/compute-node-v16` - Use TAG=latest in the example, rather than some old tag. That's a sensible default for people to copy-past - For convenience, use a Postgres connection URL in the `psql` example that also includes the password. That way, there's no need to set up .pgpass - Update the image names in `docker ps` example to match what you get when you follow the example --- docs/docker.md | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index ccd2afc27a22..ce806c4e6cfd 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -4,18 +4,18 @@ Currently we build two main images: -- [neondatabase/neon](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). -- [neondatabase/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [neondatabase/postgres](https://github.com/neondatabase/postgres). +- [neondatabase/neon](https://hub.docker.com/repository/docker/neondatabase/neon) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). +- [neondatabase/compute-node-v16](https://hub.docker.com/repository/docker/neondatabase/compute-node-v16) — compute node image with pre-built Postgres binaries from [neondatabase/postgres](https://github.com/neondatabase/postgres). Similar images exist for v15 and v14. And additional intermediate image: - [neondatabase/compute-tools](https://hub.docker.com/repository/docker/neondatabase/compute-tools) — compute node configuration management tools. -## Building pipeline +## Build pipeline We build all images after a successful `release` tests run and push automatically to Docker Hub with two parallel CI jobs -1. `neondatabase/compute-tools` and `neondatabase/compute-node` +1. `neondatabase/compute-tools` and `neondatabase/compute-node-v16` (and -v15 and -v14) 2. `neondatabase/neon` @@ -34,12 +34,12 @@ You can see a [docker compose](https://docs.docker.com/compose/) example to crea 1. create containers You can specify version of neon cluster using following environment values. -- PG_VERSION: postgres version for compute (default is 16) -- TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) +- PG_VERSION: postgres version for compute (default is 16 as of this writing) +- TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags), which is tagged in [CI test](/.github/workflows/build_and_test.yml). Default is 'latest' ``` $ cd docker-compose/ $ docker-compose down # remove the containers if exists -$ PG_VERSION=16 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version +$ PG_VERSION=16 TAG=latest docker-compose up --build -d # You can specify the postgres and image version Creating network "dockercompose_default" with the default driver Creating docker-compose_storage_broker_1 ... done (...omit...) @@ -47,29 +47,31 @@ Creating docker-compose_storage_broker_1 ... done 2. connect compute node ``` -$ echo "localhost:55433:postgres:cloud_admin:cloud_admin" >> ~/.pgpass -$ chmod 600 ~/.pgpass -$ psql -h localhost -p 55433 -U cloud_admin +$ psql postgresql://cloud_admin:cloud_admin@localhost:55433/postgres +psql (16.3) +Type "help" for help. + postgres=# CREATE TABLE t(key int primary key, value text); CREATE TABLE -postgres=# insert into t values(1,1); +postgres=# insert into t values(1, 1); INSERT 0 1 postgres=# select * from t; - key | value + key | value -----+------- 1 | 1 (1 row) + ``` 3. If you want to see the log, you can use `docker-compose logs` command. ``` # check the container name you want to see $ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -d6968a5ae912 dockercompose_compute "/shell/compute.sh" 5 minutes ago Up 5 minutes 0.0.0.0:3080->3080/tcp, 0.0.0.0:55433->55433/tcp dockercompose_compute_1 +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3582f6d76227 docker-compose_compute "/shell/compute.sh" 2 minutes ago Up 2 minutes 0.0.0.0:3080->3080/tcp, :::3080->3080/tcp, 0.0.0.0:55433->55433/tcp, :::55433->55433/tcp docker-compose_compute_1 (...omit...) -$ docker logs -f dockercompose_compute_1 +$ docker logs -f docker-compose_compute_1 2022-10-21 06:15:48.757 GMT [56] LOG: connection authorized: user=cloud_admin database=postgres application_name=psql 2022-10-21 06:17:00.307 GMT [56] LOG: [NEON_SMGR] libpagestore: connected to 'host=pageserver port=6400' (...omit...) From f749437cec056bd4387dc8c17a5591003ef1fff6 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Wed, 12 Jun 2024 12:25:13 +0200 Subject: [PATCH 19/41] Resolve the problem the docker compose caused by the extensions tests (#8024) ## Problem The merging of #7818 caused the problem with the docker-compose file. Running docker compose is now impossible due to the unavailability of the neon-test-extensions:latest image ## Summary of changes Fix the problem: Add the latest tag to the neon-test-extensions image and use the profiles feature of the docker-compose file to avoid loading the neon-test-extensions container if it is not needed. --- .github/workflows/build_and_test.yml | 2 ++ docker-compose/docker-compose.yml | 1 + docker-compose/docker_compose_test.sh | 9 ++++----- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 79a0a77638fa..57635f4920fb 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1101,6 +1101,8 @@ jobs: $repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} done done + docker buildx imagetools create -t neondatabase/neon-test-extensions:latest \ + neondatabase/neon-test-extensions:${{ needs.tag.outputs.build-tag }} trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index 3f097f27003f..5503b6611aa2 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -194,6 +194,7 @@ services: - compute neon-test-extensions: + profiles: ["test-extensions"] image: ${REPOSITORY:-neondatabase}/neon-test-extensions-v${PG_TEST_VERSION:-16}:${TAG:-latest} entrypoint: - "/bin/bash" diff --git a/docker-compose/docker_compose_test.sh b/docker-compose/docker_compose_test.sh index 71e73c2d0ac8..a00591afd081 100755 --- a/docker-compose/docker_compose_test.sh +++ b/docker-compose/docker_compose_test.sh @@ -15,7 +15,6 @@ set -eux -o pipefail COMPOSE_FILE='docker-compose.yml' cd $(dirname $0) -docker compose -f $COMPOSE_FILE COMPUTE_CONTAINER_NAME=docker-compose-compute-1 TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1 PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" @@ -26,16 +25,16 @@ export http_proxy https_proxy cleanup() { echo "show container information" docker ps - docker compose -f $COMPOSE_FILE logs + docker compose --profile test-extensions -f $COMPOSE_FILE logs echo "stop containers..." - docker compose -f $COMPOSE_FILE down + docker compose --profile test-extensions -f $COMPOSE_FILE down } for pg_version in 14 15 16; do echo "clean up containers if exists" cleanup PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version)) - PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose -f $COMPOSE_FILE up --build -d + PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 @@ -47,7 +46,7 @@ for pg_version in 14 15 16; do cleanup exit 1 fi - if docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then + if docker compose --profile test-extensions -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then echo "OK. The compute is ready to connect." echo "execute simple queries." docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION" From 3099e1a787693c7a7b2c694648462fad7eb2bcf5 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Wed, 12 Jun 2024 12:33:54 +0100 Subject: [PATCH 20/41] storcon_cli: do not drain to undesirable nodes (#8027) ## Problem The previous code would attempt to drain to unavailable or unschedulable nodes. ## Summary of Changes Remove such nodes from the list of nodes to fill. --- control_plane/storcon_cli/src/main.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 8c84911d33c9..7b48b75c21ea 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -786,18 +786,15 @@ async fn main() -> anyhow::Result<()> { anyhow::bail!("Drain requested for node which doesn't exist.") } - let can_fill = node_to_fill_descs - .iter() - .filter(|desc| { - matches!(desc.availability, NodeAvailabilityWrapper::Active) - && matches!( - desc.scheduling, - NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling - ) - }) - .any(|_| true); + node_to_fill_descs.retain(|desc| { + matches!(desc.availability, NodeAvailabilityWrapper::Active) + && matches!( + desc.scheduling, + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling + ) + }); - if !can_fill { + if node_to_fill_descs.is_empty() { anyhow::bail!("There are no nodes to drain to") } From 9ba9f32dfe35ad99335497f7d22c14ba02ebea9f Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Wed, 12 Jun 2024 16:10:57 +0200 Subject: [PATCH 21/41] Reactivate page bench test in CI after ignoring CopyFail error in pageserver (#8023) ## Problem Testcase page bench test_pageserver_max_throughput_getpage_at_latest_lsn had been deactivated because it was flaky. We now ignore copy fail error messages like in https://github.com/neondatabase/neon/blob/270d3be507643f068120b52838c497f6c1b45b61/test_runner/regress/test_pageserver_getpage_throttle.py#L17-L20 and want to reactivate it to see it it is still flaky ## Summary of changes - reactivate the test in CI - ignore CopyFail error message during page bench test cases ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --- ...geserver_max_throughput_getpage_at_latest_lsn.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 1a0012397c70..772a39fe357c 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -1,5 +1,4 @@ import json -import os from pathlib import Path from typing import Any, Dict, Tuple @@ -35,10 +34,6 @@ @pytest.mark.timeout( 10000 ) # TODO: this value is just "a really high number"; have this per instance type -@pytest.mark.skipif( - os.getenv("CI", "false") == "true", - reason="The test if flaky on CI: https://github.com/neondatabase/neon/issues/6724", -) def test_pageserver_max_throughput_getpage_at_latest_lsn( neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker, @@ -91,6 +86,14 @@ def setup_wrapper(env: NeonEnv): n_tenants, setup_wrapper, ) + + env.pageserver.allowed_errors.append( + # https://github.com/neondatabase/neon/issues/6925 + # https://github.com/neondatabase/neon/issues/6390 + # https://github.com/neondatabase/neon/issues/6724 + r".*query handler for.*pagestream.*failed: unexpected message: CopyFail during COPY.*" + ) + run_benchmark_max_throughput_latest_lsn(env, pg_bin, record, duration) From 9dda13eccec8e2043566be1069709709dfec425a Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:15:20 +0200 Subject: [PATCH 22/41] Add the image version to the neon-test-extensions image (#8032) ## Problem The version was missing in the image name causing the error during the workflow ## Summary of changes Added the version to the image name --- .github/workflows/build_and_test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 57635f4920fb..71ca7329ee0d 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1101,8 +1101,8 @@ jobs: $repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} done done - docker buildx imagetools create -t neondatabase/neon-test-extensions:latest \ - neondatabase/neon-test-extensions:${{ needs.tag.outputs.build-tag }} + docker buildx imagetools create -t neondatabase/neon-test-extensions-v16:latest \ + neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }} trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] From 836d1f4af79cb74b69178213c76ef66993903307 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 12 Jun 2024 13:42:43 -0400 Subject: [PATCH 23/41] test(pageserver): add test keyspace into collect_keyspace (#8016) Some test cases add random keys into the timeline, but it is not part of the `collect_keyspace`, this will cause compaction remove the keys. The pull request adds a field to supply extra keyspaces during unit tests. --------- Signed-off-by: Alex Chi Z --- pageserver/src/pgdatadir_mapping.rs | 8 ++++++++ pageserver/src/tenant.rs | 8 ++++++-- pageserver/src/tenant/timeline.rs | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 336d1c3fb8a5..25d00d6dfd0d 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -919,6 +919,14 @@ impl Timeline { result.add_key(AUX_FILES_KEY); } + #[cfg(test)] + { + let guard = self.extra_test_dense_keyspace.load(); + for kr in &guard.ranges { + result.add_range(kr.clone()); + } + } + Ok(( result.to_keyspace(), /* AUX sparse key space */ diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index f9ed6d3071eb..d556f7233545 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -5264,6 +5264,9 @@ mod tests { let cancel = CancellationToken::new(); let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap(); + let mut test_key_end = test_key; + test_key_end.field6 = NUM_KEYS as u32; + tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end)); let mut keyspace = KeySpaceAccum::new(); @@ -6223,8 +6226,8 @@ mod tests { let cancel = CancellationToken::new(); - let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); - base_key.field1 = AUX_KEY_PREFIX; + let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix... let mut test_key = base_key; let mut lsn = Lsn(0x10); @@ -6329,6 +6332,7 @@ mod tests { Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN ) .await?; + tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next()))); let child = tenant .branch_timeline_test_with_layers( diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 54a4ceeaf3a3..28627e79117c 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -426,6 +426,14 @@ pub struct Timeline { /// Indicate whether aux file v2 storage is enabled. pub(crate) last_aux_file_policy: AtomicAuxFilePolicy, + + /// Some test cases directly place keys into the timeline without actually modifying the directory + /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that + /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense + /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and + /// in the future, add `extra_test_sparse_keyspace` if necessary. + #[cfg(test)] + pub(crate) extra_test_dense_keyspace: ArcSwap, } pub struct WalReceiverInfo { @@ -2344,6 +2352,9 @@ impl Timeline { aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics), last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy), + + #[cfg(test)] + extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())), }; result.repartition_threshold = result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE; @@ -5562,6 +5573,13 @@ impl Timeline { } Ok(layers) } + + #[cfg(test)] + pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) { + let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone(); + keyspace.merge(&ks); + self.extra_test_dense_keyspace.store(Arc::new(keyspace)); + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); From ad0ab3b81bacc35e73a6902b6ce53db18f5e6293 Mon Sep 17 00:00:00 2001 From: MMeent Date: Wed, 12 Jun 2024 20:25:04 +0200 Subject: [PATCH 24/41] Fix query error in vm-image-spec.yaml (#8028) This query causes metrics exporter to complain about missing data because it can't find the correct column. Issue was introduced with https://github.com/neondatabase/neon/pull/7761 --- vm-image-spec.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 73a24c42d67e..15f820bebdb5 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -304,7 +304,9 @@ files: - slot_name values: [restart_lsn] query: | - select slot_name, (restart_lsn - '0/0')::FLOAT8 from pg_replication_slots where slot_type = 'logical'; + select slot_name, (restart_lsn - '0/0')::FLOAT8 as restart_lsn + from pg_replication_slots + where slot_type = 'logical'; - metric_name: retained_wal type: gauge From dc2ab4407f8b9636a6e570818154f21fde14b9ce Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 13 Jun 2024 00:31:31 +0300 Subject: [PATCH 25/41] Fix on-demand SLRU download on standby starting at WAL segment boundary (#8031) If a standby is started right after switching to a new WAL segment, the request in the SLRU download request would point to the beginning of the segment (e.g. 0/5000000), while the not-modified-since LSN would point to just after the page header (e.g. 0/5000028). It's effectively the same position, as there cannot be any WAL records in between, but the pageserver rightly errors out on any request where the request LSN < not-modified since LSN. To fix, round down the not-modified since LSN to the beginning of the page like the request LSN. Fixes issue #8030 --- pgxn/neon/pagestore_smgr.c | 4 +-- .../regress/test_ondemand_slru_download.py | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 6305f2ec922d..8edaf65639f7 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -3112,12 +3112,12 @@ neon_read_slru_segment(SMgrRelation reln, const char* path, int segno, void* buf request_lsn = UINT64_MAX; /* - * GetRedoStartLsn() returns LSN of basebackup. We know that the SLRU + * GetRedoStartLsn() returns LSN of the basebackup. We know that the SLRU * segment has not changed since the basebackup, because in order to * modify it, we would have had to download it already. And once * downloaded, we never evict SLRU segments from local disk. */ - not_modified_since = GetRedoStartLsn(); + not_modified_since = nm_adjust_lsn(GetRedoStartLsn()); SlruKind kind; diff --git a/test_runner/regress/test_ondemand_slru_download.py b/test_runner/regress/test_ondemand_slru_download.py index 4af7dcdfc368..d6babe439339 100644 --- a/test_runner/regress/test_ondemand_slru_download.py +++ b/test_runner/regress/test_ondemand_slru_download.py @@ -129,3 +129,33 @@ def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count cur_replica = conn_replica.cursor() cur_replica.execute("SELECT * FROM clogtest") assert cur_replica.fetchall() == [(1,), (3,)] + + +def test_ondemand_download_after_wal_switch(neon_env_builder: NeonEnvBuilder): + """ + Test on-demand SLRU download on standby, when starting right after + WAL segment switch. + + This is a repro for a bug in how the LSN at WAL page/segment + boundary was handled (https://github.com/neondatabase/neon/issues/8030) + """ + + tenant_conf = { + "lazy_slru_download": "true", + } + env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) + + endpoint = env.endpoints.create_start("main") + pg_conn = endpoint.connect() + cur = pg_conn.cursor() + + # Create a test table + cur.execute("CREATE TABLE clogtest (id integer)") + cur.execute("INSERT INTO clogtest VALUES (1)") + + # Start standby at WAL segment boundary + cur.execute("SELECT pg_switch_wal()") + lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")) + _endpoint_at_lsn = env.endpoints.create_start( + branch_name="main", endpoint_id="ep-at-lsn", lsn=lsn + ) From fbccd1e6762f61686a810ba6b4654da7da1247d9 Mon Sep 17 00:00:00 2001 From: Anna Khanova <32508607+khanova@users.noreply.github.com> Date: Thu, 13 Jun 2024 14:42:26 +0200 Subject: [PATCH 26/41] Proxy process updated errors (#8026) ## Problem Respect errors classification from cplane --- proxy/src/console/messages.rs | 169 ++++++++++++++++++++++++++++- proxy/src/console/provider.rs | 133 ++++++++++------------- proxy/src/console/provider/neon.rs | 23 ++-- proxy/src/proxy/tests.rs | 20 ++-- proxy/src/proxy/wake_compute.rs | 101 ++++++++++++----- 5 files changed, 324 insertions(+), 122 deletions(-) diff --git a/proxy/src/console/messages.rs b/proxy/src/console/messages.rs index 9869b95768c3..3b7d681a41d1 100644 --- a/proxy/src/console/messages.rs +++ b/proxy/src/console/messages.rs @@ -1,16 +1,183 @@ use measured::FixedCardinalityLabel; use serde::{Deserialize, Serialize}; -use std::fmt; +use std::fmt::{self, Display}; use crate::auth::IpPattern; use crate::intern::{BranchIdInt, EndpointIdInt, ProjectIdInt}; +use crate::proxy::retry::ShouldRetry; /// Generic error response with human-readable description. /// Note that we can't always present it to user as is. #[derive(Debug, Deserialize)] pub struct ConsoleError { pub error: Box, + #[serde(skip)] + pub http_status_code: http::StatusCode, + pub status: Option, +} + +impl ConsoleError { + pub fn get_reason(&self) -> Reason { + self.status + .as_ref() + .and_then(|s| s.details.error_info.as_ref()) + .map(|e| e.reason) + .unwrap_or(Reason::Unknown) + } + pub fn get_user_facing_message(&self) -> String { + use super::provider::errors::REQUEST_FAILED; + self.status + .as_ref() + .and_then(|s| s.details.user_facing_message.as_ref()) + .map(|m| m.message.clone().into()) + .unwrap_or_else(|| { + // Ask @neondatabase/control-plane for review before adding more. + match self.http_status_code { + http::StatusCode::NOT_FOUND => { + // Status 404: failed to get a project-related resource. + format!("{REQUEST_FAILED}: endpoint cannot be found") + } + http::StatusCode::NOT_ACCEPTABLE => { + // Status 406: endpoint is disabled (we don't allow connections). + format!("{REQUEST_FAILED}: endpoint is disabled") + } + http::StatusCode::LOCKED | http::StatusCode::UNPROCESSABLE_ENTITY => { + // Status 423: project might be in maintenance mode (or bad state), or quotas exceeded. + format!("{REQUEST_FAILED}: endpoint is temporarily unavailable. Check your quotas and/or contact our support.") + } + _ => REQUEST_FAILED.to_owned(), + } + }) + } +} + +impl Display for ConsoleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = self + .status + .as_ref() + .and_then(|s| s.details.user_facing_message.as_ref()) + .map(|m| m.message.as_ref()) + .unwrap_or_else(|| &self.error); + write!(f, "{}", msg) + } +} + +impl ShouldRetry for ConsoleError { + fn could_retry(&self) -> bool { + if self.status.is_none() || self.status.as_ref().unwrap().details.retry_info.is_none() { + // retry some temporary failures because the compute was in a bad state + // (bad request can be returned when the endpoint was in transition) + return match &self { + ConsoleError { + http_status_code: http::StatusCode::BAD_REQUEST, + .. + } => true, + // don't retry when quotas are exceeded + ConsoleError { + http_status_code: http::StatusCode::UNPROCESSABLE_ENTITY, + ref error, + .. + } => !error.contains("compute time quota of non-primary branches is exceeded"), + // locked can be returned when the endpoint was in transition + // or when quotas are exceeded. don't retry when quotas are exceeded + ConsoleError { + http_status_code: http::StatusCode::LOCKED, + ref error, + .. + } => { + !error.contains("quota exceeded") + && !error.contains("the limit for current plan reached") + } + _ => false, + }; + } + + // retry if the response has a retry delay + if let Some(retry_info) = self + .status + .as_ref() + .and_then(|s| s.details.retry_info.as_ref()) + { + retry_info.retry_delay_ms > 0 + } else { + false + } + } +} + +#[derive(Debug, Deserialize)] +pub struct Status { + pub code: Box, + pub message: Box, + pub details: Details, +} + +#[derive(Debug, Deserialize)] +pub struct Details { + pub error_info: Option, + pub retry_info: Option, + pub user_facing_message: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ErrorInfo { + pub reason: Reason, + // Schema could also have `metadata` field, but it's not structured. Skip it for now. +} + +#[derive(Clone, Copy, Debug, Deserialize, Default)] +pub enum Reason { + #[serde(rename = "ROLE_PROTECTED")] + RoleProtected, + #[serde(rename = "RESOURCE_NOT_FOUND")] + ResourceNotFound, + #[serde(rename = "PROJECT_NOT_FOUND")] + ProjectNotFound, + #[serde(rename = "ENDPOINT_NOT_FOUND")] + EndpointNotFound, + #[serde(rename = "BRANCH_NOT_FOUND")] + BranchNotFound, + #[serde(rename = "RATE_LIMIT_EXCEEDED")] + RateLimitExceeded, + #[serde(rename = "NON_PRIMARY_BRANCH_COMPUTE_TIME_EXCEEDED")] + NonPrimaryBranchComputeTimeExceeded, + #[serde(rename = "ACTIVE_TIME_QUOTA_EXCEEDED")] + ActiveTimeQuotaExceeded, + #[serde(rename = "COMPUTE_TIME_QUOTA_EXCEEDED")] + ComputeTimeQuotaExceeded, + #[serde(rename = "WRITTEN_DATA_QUOTA_EXCEEDED")] + WrittenDataQuotaExceeded, + #[serde(rename = "DATA_TRANSFER_QUOTA_EXCEEDED")] + DataTransferQuotaExceeded, + #[serde(rename = "LOGICAL_SIZE_QUOTA_EXCEEDED")] + LogicalSizeQuotaExceeded, + #[default] + #[serde(other)] + Unknown, +} + +impl Reason { + pub fn is_not_found(&self) -> bool { + matches!( + self, + Reason::ResourceNotFound + | Reason::ProjectNotFound + | Reason::EndpointNotFound + | Reason::BranchNotFound + ) + } +} + +#[derive(Debug, Deserialize)] +pub struct RetryInfo { + pub retry_delay_ms: u64, +} + +#[derive(Debug, Deserialize)] +pub struct UserFacingMessage { + pub message: Box, } /// Response which holds client's auth secret, e.g. [`crate::scram::ServerSecret`]. diff --git a/proxy/src/console/provider.rs b/proxy/src/console/provider.rs index 634ec9042cd2..915c2ee7a64e 100644 --- a/proxy/src/console/provider.rs +++ b/proxy/src/console/provider.rs @@ -25,8 +25,8 @@ use tracing::info; pub mod errors { use crate::{ + console::messages::{self, ConsoleError}, error::{io_error, ReportableError, UserFacingError}, - http, proxy::retry::ShouldRetry, }; use thiserror::Error; @@ -34,17 +34,14 @@ pub mod errors { use super::ApiLockError; /// A go-to error message which doesn't leak any detail. - const REQUEST_FAILED: &str = "Console request failed"; + pub const REQUEST_FAILED: &str = "Console request failed"; /// Common console API error. #[derive(Debug, Error)] pub enum ApiError { /// Error returned by the console itself. - #[error("{REQUEST_FAILED} with {}: {}", .status, .text)] - Console { - status: http::StatusCode, - text: Box, - }, + #[error("{REQUEST_FAILED} with {0}")] + Console(ConsoleError), /// Various IO errors like broken pipe or malformed payload. #[error("{REQUEST_FAILED}: {0}")] @@ -53,11 +50,11 @@ pub mod errors { impl ApiError { /// Returns HTTP status code if it's the reason for failure. - pub fn http_status_code(&self) -> Option { + pub fn get_reason(&self) -> messages::Reason { use ApiError::*; match self { - Console { status, .. } => Some(*status), - _ => None, + Console(e) => e.get_reason(), + _ => messages::Reason::Unknown, } } } @@ -67,22 +64,7 @@ pub mod errors { use ApiError::*; match self { // To minimize risks, only select errors are forwarded to users. - // Ask @neondatabase/control-plane for review before adding more. - Console { status, .. } => match *status { - http::StatusCode::NOT_FOUND => { - // Status 404: failed to get a project-related resource. - format!("{REQUEST_FAILED}: endpoint cannot be found") - } - http::StatusCode::NOT_ACCEPTABLE => { - // Status 406: endpoint is disabled (we don't allow connections). - format!("{REQUEST_FAILED}: endpoint is disabled") - } - http::StatusCode::LOCKED | http::StatusCode::UNPROCESSABLE_ENTITY => { - // Status 423: project might be in maintenance mode (or bad state), or quotas exceeded. - format!("{REQUEST_FAILED}: endpoint is temporarily unavailable. Check your quotas and/or contact our support.") - } - _ => REQUEST_FAILED.to_owned(), - }, + Console(c) => c.get_user_facing_message(), _ => REQUEST_FAILED.to_owned(), } } @@ -91,29 +73,56 @@ pub mod errors { impl ReportableError for ApiError { fn get_error_kind(&self) -> crate::error::ErrorKind { match self { - ApiError::Console { - status: http::StatusCode::NOT_FOUND | http::StatusCode::NOT_ACCEPTABLE, - .. - } => crate::error::ErrorKind::User, - ApiError::Console { - status: http::StatusCode::UNPROCESSABLE_ENTITY, - text, - } if text.contains("compute time quota of non-primary branches is exceeded") => { - crate::error::ErrorKind::User - } - ApiError::Console { - status: http::StatusCode::LOCKED, - text, - } if text.contains("quota exceeded") - || text.contains("the limit for current plan reached") => - { - crate::error::ErrorKind::User + ApiError::Console(e) => { + use crate::error::ErrorKind::*; + match e.get_reason() { + crate::console::messages::Reason::RoleProtected => User, + crate::console::messages::Reason::ResourceNotFound => User, + crate::console::messages::Reason::ProjectNotFound => User, + crate::console::messages::Reason::EndpointNotFound => User, + crate::console::messages::Reason::BranchNotFound => User, + crate::console::messages::Reason::RateLimitExceeded => ServiceRateLimit, + crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { + User + } + crate::console::messages::Reason::ActiveTimeQuotaExceeded => User, + crate::console::messages::Reason::ComputeTimeQuotaExceeded => User, + crate::console::messages::Reason::WrittenDataQuotaExceeded => User, + crate::console::messages::Reason::DataTransferQuotaExceeded => User, + crate::console::messages::Reason::LogicalSizeQuotaExceeded => User, + crate::console::messages::Reason::Unknown => match &e { + ConsoleError { + http_status_code: + http::StatusCode::NOT_FOUND | http::StatusCode::NOT_ACCEPTABLE, + .. + } => crate::error::ErrorKind::User, + ConsoleError { + http_status_code: http::StatusCode::UNPROCESSABLE_ENTITY, + error, + .. + } if error.contains( + "compute time quota of non-primary branches is exceeded", + ) => + { + crate::error::ErrorKind::User + } + ConsoleError { + http_status_code: http::StatusCode::LOCKED, + error, + .. + } if error.contains("quota exceeded") + || error.contains("the limit for current plan reached") => + { + crate::error::ErrorKind::User + } + ConsoleError { + http_status_code: http::StatusCode::TOO_MANY_REQUESTS, + .. + } => crate::error::ErrorKind::ServiceRateLimit, + ConsoleError { .. } => crate::error::ErrorKind::ControlPlane, + }, + } } - ApiError::Console { - status: http::StatusCode::TOO_MANY_REQUESTS, - .. - } => crate::error::ErrorKind::ServiceRateLimit, - ApiError::Console { .. } => crate::error::ErrorKind::ControlPlane, ApiError::Transport(_) => crate::error::ErrorKind::ControlPlane, } } @@ -124,31 +133,7 @@ pub mod errors { match self { // retry some transport errors Self::Transport(io) => io.could_retry(), - // retry some temporary failures because the compute was in a bad state - // (bad request can be returned when the endpoint was in transition) - Self::Console { - status: http::StatusCode::BAD_REQUEST, - .. - } => true, - // don't retry when quotas are exceeded - Self::Console { - status: http::StatusCode::UNPROCESSABLE_ENTITY, - ref text, - } => !text.contains("compute time quota of non-primary branches is exceeded"), - // locked can be returned when the endpoint was in transition - // or when quotas are exceeded. don't retry when quotas are exceeded - Self::Console { - status: http::StatusCode::LOCKED, - ref text, - } => { - // written data quota exceeded - // data transfer quota exceeded - // compute time quota exceeded - // logical size quota exceeded - !text.contains("quota exceeded") - && !text.contains("the limit for current plan reached") - } - _ => false, + Self::Console(e) => e.could_retry(), } } } @@ -509,7 +494,7 @@ impl ApiLocks { self.metrics .semaphore_acquire_seconds .observe(now.elapsed().as_secs_f64()); - + info!("acquired permit {:?}", now.elapsed().as_secs_f64()); Ok(WakeComputePermit { permit: permit? }) } diff --git a/proxy/src/console/provider/neon.rs b/proxy/src/console/provider/neon.rs index d72229b02934..41bd2f49567e 100644 --- a/proxy/src/console/provider/neon.rs +++ b/proxy/src/console/provider/neon.rs @@ -94,12 +94,14 @@ impl Api { let body = match parse_body::(response).await { Ok(body) => body, // Error 404 is special: it's ok not to have a secret. - Err(e) => match e.http_status_code() { - Some(http::StatusCode::NOT_FOUND) => { + // TODO(anna): retry + Err(e) => { + if e.get_reason().is_not_found() { return Ok(AuthInfo::default()); + } else { + return Err(e.into()); } - _otherwise => return Err(e.into()), - }, + } }; let secret = if body.role_secret.is_empty() { @@ -328,19 +330,24 @@ async fn parse_body serde::Deserialize<'a>>( info!("request succeeded, processing the body"); return Ok(response.json().await?); } + let s = response.bytes().await?; + // Log plaintext to be able to detect, whether there are some cases not covered by the error struct. + info!("response_error plaintext: {:?}", s); // Don't throw an error here because it's not as important // as the fact that the request itself has failed. - let body = response.json().await.unwrap_or_else(|e| { + let mut body = serde_json::from_slice(&s).unwrap_or_else(|e| { warn!("failed to parse error body: {e}"); ConsoleError { error: "reason unclear (malformed error message)".into(), + http_status_code: status, + status: None, } }); + body.http_status_code = status; - let text = body.error; - error!("console responded with an error ({status}): {text}"); - Err(ApiError::Console { status, text }) + error!("console responded with an error ({status}): {body:?}"); + Err(ApiError::Console(body)) } fn parse_host_port(input: &str) -> Option<(&str, u16)> { diff --git a/proxy/src/proxy/tests.rs b/proxy/src/proxy/tests.rs index ad48af0093bd..96683511fec5 100644 --- a/proxy/src/proxy/tests.rs +++ b/proxy/src/proxy/tests.rs @@ -12,7 +12,7 @@ use crate::auth::backend::{ }; use crate::config::{CertResolver, RetryConfig}; use crate::console::caches::NodeInfoCache; -use crate::console::messages::MetricsAuxInfo; +use crate::console::messages::{ConsoleError, MetricsAuxInfo}; use crate::console::provider::{CachedAllowedIps, CachedRoleSecret, ConsoleBackend}; use crate::console::{self, CachedNodeInfo, NodeInfo}; use crate::error::ErrorKind; @@ -484,18 +484,20 @@ impl TestBackend for TestConnectMechanism { match action { ConnectAction::Wake => Ok(helper_create_cached_node_info(self.cache)), ConnectAction::WakeFail => { - let err = console::errors::ApiError::Console { - status: http::StatusCode::FORBIDDEN, - text: "TEST".into(), - }; + let err = console::errors::ApiError::Console(ConsoleError { + http_status_code: http::StatusCode::FORBIDDEN, + error: "TEST".into(), + status: None, + }); assert!(!err.could_retry()); Err(console::errors::WakeComputeError::ApiError(err)) } ConnectAction::WakeRetry => { - let err = console::errors::ApiError::Console { - status: http::StatusCode::BAD_REQUEST, - text: "TEST".into(), - }; + let err = console::errors::ApiError::Console(ConsoleError { + http_status_code: http::StatusCode::BAD_REQUEST, + error: "TEST".into(), + status: None, + }); assert!(err.could_retry()); Err(console::errors::WakeComputeError::ApiError(err)) } diff --git a/proxy/src/proxy/wake_compute.rs b/proxy/src/proxy/wake_compute.rs index 94b03e1ccc10..c166cf4389dc 100644 --- a/proxy/src/proxy/wake_compute.rs +++ b/proxy/src/proxy/wake_compute.rs @@ -1,4 +1,5 @@ use crate::config::RetryConfig; +use crate::console::messages::ConsoleError; use crate::console::{errors::WakeComputeError, provider::CachedNodeInfo}; use crate::context::RequestMonitoring; use crate::metrics::{ @@ -88,36 +89,76 @@ fn report_error(e: &WakeComputeError, retry: bool) { let kind = match e { WakeComputeError::BadComputeAddress(_) => WakeupFailureKind::BadComputeAddress, WakeComputeError::ApiError(ApiError::Transport(_)) => WakeupFailureKind::ApiTransportError, - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::LOCKED, - ref text, - }) if text.contains("written data quota exceeded") - || text.contains("the limit for current plan reached") => - { - WakeupFailureKind::QuotaExceeded - } - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::UNPROCESSABLE_ENTITY, - ref text, - }) if text.contains("compute time quota of non-primary branches is exceeded") => { - WakeupFailureKind::QuotaExceeded - } - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::LOCKED, - .. - }) => WakeupFailureKind::ApiConsoleLocked, - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::BAD_REQUEST, - .. - }) => WakeupFailureKind::ApiConsoleBadRequest, - WakeComputeError::ApiError(ApiError::Console { status, .. }) - if status.is_server_error() => - { - WakeupFailureKind::ApiConsoleOtherServerError - } - WakeComputeError::ApiError(ApiError::Console { .. }) => { - WakeupFailureKind::ApiConsoleOtherError - } + WakeComputeError::ApiError(ApiError::Console(e)) => match e.get_reason() { + crate::console::messages::Reason::RoleProtected => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::ResourceNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::ProjectNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::EndpointNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::BranchNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::RateLimitExceeded => { + WakeupFailureKind::ApiConsoleLocked + } + crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::ActiveTimeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::ComputeTimeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::WrittenDataQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::DataTransferQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::LogicalSizeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::Unknown => match e { + ConsoleError { + http_status_code: StatusCode::LOCKED, + ref error, + .. + } if error.contains("written data quota exceeded") + || error.contains("the limit for current plan reached") => + { + WakeupFailureKind::QuotaExceeded + } + ConsoleError { + http_status_code: StatusCode::UNPROCESSABLE_ENTITY, + ref error, + .. + } if error.contains("compute time quota of non-primary branches is exceeded") => { + WakeupFailureKind::QuotaExceeded + } + ConsoleError { + http_status_code: StatusCode::LOCKED, + .. + } => WakeupFailureKind::ApiConsoleLocked, + ConsoleError { + http_status_code: StatusCode::BAD_REQUEST, + .. + } => WakeupFailureKind::ApiConsoleBadRequest, + ConsoleError { + http_status_code, .. + } if http_status_code.is_server_error() => { + WakeupFailureKind::ApiConsoleOtherServerError + } + ConsoleError { .. } => WakeupFailureKind::ApiConsoleOtherError, + }, + }, WakeComputeError::TooManyConnections => WakeupFailureKind::ApiConsoleLocked, WakeComputeError::TooManyConnectionAttempts(_) => WakeupFailureKind::TimeoutError, }; From d25f7e3dd575878df49925bead4c797a61757751 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 13 Jun 2024 09:44:37 -0400 Subject: [PATCH 27/41] test(pageserver): add test wal record for unit testing (#8015) https://github.com/neondatabase/neon/issues/8002 We need mock WAL record to make it easier to write unit tests. This pull request adds such a record. It has `clear` flag and `append` field. The tests for legacy-enhanced compaction are not modified yet and will be part of the next pull request. --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 80 +++++++++++++++++++++++++++- pageserver/src/walrecord.rs | 43 ++++++++++++++- pageserver/src/walredo/apply_neon.rs | 14 +++++ 3 files changed, 134 insertions(+), 3 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index d556f7233545..0bd3ece2e361 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4041,6 +4041,7 @@ mod tests { use crate::repository::{Key, Value}; use crate::tenant::harness::*; use crate::tenant::timeline::CompactFlags; + use crate::walrecord::NeonWalRecord; use crate::DEFAULT_PG_VERSION; use bytes::{Bytes, BytesMut}; use hex_literal::hex; @@ -6705,8 +6706,8 @@ mod tests { } #[tokio::test] - async fn test_simple_bottom_most_compaction() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_simple_bottom_most_compaction")?; + async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_simple_bottom_most_compaction_images")?; let (tenant, ctx) = harness.load().await; fn get_key(id: u32) -> Key { @@ -6861,4 +6862,79 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_neon_test_record() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_neon_test_record")?; + let (tenant, ctx) = harness.load().await; + + fn get_key(id: u32) -> Key { + // using aux key here b/c they are guaranteed to be inside `collect_keyspace`. + let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + + let delta1 = vec![ + ( + get_key(1), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append(",0x20")), + ), + ( + get_key(1), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append(",0x30")), + ), + (get_key(2), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(2), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append(",0x20")), + ), + ( + get_key(2), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append(",0x30")), + ), + (get_key(3), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(3), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_clear()), + ), + (get_key(4), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(4), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_init()), + ), + ]; + let image1 = vec![(get_key(1), "0x10".into())]; + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + vec![delta1], // delta layers + vec![(Lsn(0x10), image1)], // image layers + Lsn(0x50), + ) + .await?; + + assert_eq!( + tline.get(get_key(1), Lsn(0x50), &ctx).await?, + Bytes::from_static(b"0x10,0x20,0x30") + ); + assert_eq!( + tline.get(get_key(2), Lsn(0x50), &ctx).await?, + Bytes::from_static(b"0x10,0x20,0x30") + ); + // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new()); + // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new()); + + Ok(()) + } } diff --git a/pageserver/src/walrecord.rs b/pageserver/src/walrecord.rs index 205f8dee4df4..62a3a91b0be6 100644 --- a/pageserver/src/walrecord.rs +++ b/pageserver/src/walrecord.rs @@ -49,6 +49,19 @@ pub enum NeonWalRecord { file_path: String, content: Option, }, + + /// A testing record for unit testing purposes. It supports append data to an existing image, or clear it. + #[cfg(test)] + Test { + /// Append a string to the image. + append: String, + /// Clear the image before appending. + clear: bool, + /// Treat this record as an init record. `clear` should be set to true if this field is set + /// to true. This record does not need the history WALs to reconstruct. See [`NeonWalRecord::will_init`] and + /// its references in `timeline.rs`. + will_init: bool, + }, } impl NeonWalRecord { @@ -58,11 +71,39 @@ impl NeonWalRecord { // If you change this function, you'll also need to change ValueBytes::will_init match self { NeonWalRecord::Postgres { will_init, rec: _ } => *will_init, - + #[cfg(test)] + NeonWalRecord::Test { will_init, .. } => *will_init, // None of the special neon record types currently initialize the page _ => false, } } + + #[cfg(test)] + pub(crate) fn wal_append(s: impl AsRef) -> Self { + Self::Test { + append: s.as_ref().to_string(), + clear: false, + will_init: false, + } + } + + #[cfg(test)] + pub(crate) fn wal_clear() -> Self { + Self::Test { + append: "".to_string(), + clear: true, + will_init: false, + } + } + + #[cfg(test)] + pub(crate) fn wal_init() -> Self { + Self::Test { + append: "".to_string(), + clear: true, + will_init: true, + } + } } /// DecodedBkpBlock represents per-page data contained in a WAL record. diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index 24e8d8b01cf5..facf01004c81 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -244,6 +244,20 @@ pub(crate) fn apply_in_neon( let mut writer = page.writer(); dir.ser_into(&mut writer)?; } + #[cfg(test)] + NeonWalRecord::Test { + append, + clear, + will_init, + } => { + if *will_init { + assert!(*clear, "init record must be clear to ensure correctness"); + } + if *clear { + page.clear(); + } + page.put_slice(append.as_bytes()); + } } Ok(()) } From 82719542c617a74850e078c99c01a3c7f9e32beb Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 13 Jun 2024 20:20:47 +0200 Subject: [PATCH 28/41] fix: vectored get returns incorrect result on inexact materialized page cache hit (#8050) # Problem Suppose our vectored get starts with an inexact materialized page cache hit ("cached lsn") that is shadowed by a newer image layer image layer. Like so: ``` +-+ < delta layer | | -|-|----- < image layer | | | | -|-|----- < cached lsn for requested key +_+ ``` The correct visitation order is 1. inmemory layers 2. delta layer records in LSN range `[image_layer.lsn, oldest_inmemory_layer.lsn_range.start)` 3. image layer However, the vectored get code, when it visits the delta layer, it (incorrectly!) returns with state `Complete`. The reason why it returns is that it calls `on_lsn_advanced` with `self.lsn_range.start`, i.e., the layer's LSN range. Instead, it should use `lsn_range.start`, i.e., the LSN range from the correct visitation order listed above. # Solution Use `lsn_range.start` instead of `self.lsn_range.start`. # Refs discovered by & fixes https://github.com/neondatabase/neon/issues/6967 Co-authored-by: Vlad Lazar --- pageserver/src/tenant/storage_layer/delta_layer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index eb7cf81643f8..5e01ecd71de0 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -219,7 +219,6 @@ pub struct DeltaLayerInner { // values copied from summary index_start_blk: u32, index_root_blk: u32, - lsn_range: Range, file: VirtualFile, file_id: FileId, @@ -785,7 +784,6 @@ impl DeltaLayerInner { file_id, index_start_blk: actual_summary.index_start_blk, index_root_blk: actual_summary.index_root_blk, - lsn_range: actual_summary.lsn_range, max_vectored_read_bytes, })) } @@ -911,7 +909,7 @@ impl DeltaLayerInner { let reads = Self::plan_reads( &keyspace, - lsn_range, + lsn_range.clone(), data_end_offset, index_reader, planner, @@ -924,7 +922,7 @@ impl DeltaLayerInner { self.do_reads_and_update_state(reads, reconstruct_state, ctx) .await; - reconstruct_state.on_lsn_advanced(&keyspace, self.lsn_range.start); + reconstruct_state.on_lsn_advanced(&keyspace, lsn_range.start); Ok(()) } From 0c3e3a8667294a3dc345b0f03364aa359a5154de Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 13 Jun 2024 10:31:58 -0500 Subject: [PATCH 29/41] Set application_name for internal connections to computes This will help when analyzing the origins of connections to a compute like in [0]. [0]: https://github.com/neondatabase/cloud/issues/14247 --- compute_tools/src/bin/compute_ctl.rs | 2 +- vm-image-spec.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 9295f091d58e..7bf5db5a57be 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -735,7 +735,7 @@ fn cli() -> clap::Command { Arg::new("filecache-connstr") .long("filecache-connstr") .default_value( - "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable", + "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor", ) .value_name("FILECACHE_CONNSTR"), ) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 15f820bebdb5..99164645a730 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -18,7 +18,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres" /bin/postgres_exporter' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter' - name: sql-exporter user: nobody sysvInitAction: respawn @@ -93,7 +93,7 @@ files: target: # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable' + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' # Collectors (referenced by name) to execute on the target. # Glob patterns are supported (see for syntax). @@ -128,7 +128,7 @@ files: target: # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable' + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' # Collectors (referenced by name) to execute on the target. # Glob patterns are supported (see for syntax). From f67010109f488cd20e7aae9b8feaf0c2016b63e9 Mon Sep 17 00:00:00 2001 From: James Broadhead Date: Fri, 14 Jun 2024 09:17:43 +0100 Subject: [PATCH 30/41] extensions: pgvector-0.7.2 (#8037) Update pgvector to 0.7.2 Purely mechanical update to pgvector.patch, just as a place to start from --- Dockerfile.compute-node | 6 +++--- patches/pgvector.patch | 32 ++++++++------------------------ 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index a86fdd0bc3d1..3a73ac71b06d 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -246,8 +246,8 @@ COPY patches/pgvector.patch /pgvector.patch # By default, pgvector Makefile uses `-march=native`. We don't want that, # because we build the images on different machines than where we run them. # Pass OPTFLAGS="" to remove it. -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.1.tar.gz -O pgvector.tar.gz && \ - echo "fe6c8cb4e0cd1a8cb60f5badf9e1701e0fcabcfc260931c26d01e155c4dd21d1 pgvector.tar.gz" | sha256sum --check && \ +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.2.tar.gz -O pgvector.tar.gz && \ + echo "617fba855c9bcb41a2a9bc78a78567fd2e147c72afd5bf9d37b31b9591632b30 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \ patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ @@ -979,7 +979,7 @@ RUN cd /ext-src/ && for f in *.tar.gz; \ do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \ rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \ || exit 1; rm -f $f; done -RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch +RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch # cmake is required for the h3 test RUN apt-get update && apt-get install -y cmake RUN patch -p1 < /ext-src/pg_hintplan.patch diff --git a/patches/pgvector.patch b/patches/pgvector.patch index 84ac6644c514..3e1ffcaaaf2c 100644 --- a/patches/pgvector.patch +++ b/patches/pgvector.patch @@ -1,19 +1,8 @@ -From 0b0194a57bd0f3598bd57dbedd0df3932330169d Mon Sep 17 00:00:00 2001 -From: Heikki Linnakangas -Date: Fri, 2 Feb 2024 22:26:45 +0200 -Subject: [PATCH 1/1] Make v0.6.0 work with Neon - -Now that the WAL-logging happens as a separate step at the end of the -build, we need a few neon-specific hints to make it work. ---- - src/hnswbuild.c | 36 ++++++++++++++++++++++++++++++++++++ - 1 file changed, 36 insertions(+) - diff --git a/src/hnswbuild.c b/src/hnswbuild.c -index 680789b..ec54dea 100644 +index dcfb2bd..d5189ee 100644 --- a/src/hnswbuild.c +++ b/src/hnswbuild.c -@@ -840,9 +840,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) +@@ -860,9 +860,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false); @@ -31,7 +20,7 @@ index 680789b..ec54dea 100644 /* Close relations within worker */ index_close(indexRel, indexLockmode); table_close(heapRel, heapLockmode); -@@ -1089,13 +1097,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, +@@ -1117,12 +1125,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, SeedRandom(42); #endif @@ -43,14 +32,13 @@ index 680789b..ec54dea 100644 BuildGraph(buildstate, forkNum); +- if (RelationNeedsWAL(index) || forkNum == INIT_FORKNUM) +#ifdef NEON_SMGR + smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); +#endif + - if (RelationNeedsWAL(index)) -+ { - log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); - ++ if (RelationNeedsWAL(index) || forkNum == INIT_FORKNUM) { + log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocksInFork(index, forkNum), true); +#ifdef NEON_SMGR + { +#if PG_VERSION_NUM >= 160000 @@ -60,7 +48,7 @@ index 680789b..ec54dea 100644 +#endif + + SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, -+ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); ++ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); + SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); + } +#endif @@ -69,10 +57,6 @@ index 680789b..ec54dea 100644 +#ifdef NEON_SMGR + smgr_end_unlogged_build(RelationGetSmgr(index)); +#endif -+ + FreeBuildState(buildstate); } - --- -2.39.2 - From 425eed24e896a99d0ed03d118ee07fb0ae339bde Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 09:39:31 +0100 Subject: [PATCH 31/41] pageserver: refine shutdown handling in secondary download (#8052) ## Problem Some code paths during secondary mode download are returning Ok() rather than UpdateError::Cancelled. This is functionally okay, but it means that the end of TenantDownloader::download has a sanity check that the progress is 100% on success, and prints a "Correcting drift..." warning if not. This warning can be emitted in a test, e.g. https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8049/9503642976/index.html#/testresult/fff1624ba6adae9e. ## Summary of changes - In secondary download cancellation paths, use Err(UpdateError::Cancelled) rather than Ok(), so that we drop out of the download function and do not reach the progress sanity check. --- pageserver/src/tenant/secondary/downloader.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 62803c7838ab..24176ecf1956 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -513,7 +513,7 @@ impl<'a> TenantDownloader<'a> { // cover our access to local storage. let Ok(_guard) = self.secondary_state.gate.enter() else { // Shutting down - return Ok(()); + return Err(UpdateError::Cancelled); }; let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); @@ -846,7 +846,7 @@ impl<'a> TenantDownloader<'a> { for layer in timeline.layers { if self.secondary_state.cancel.is_cancelled() { tracing::debug!("Cancelled -- dropping out of layer loop"); - return Ok(()); + return Err(UpdateError::Cancelled); } // Existing on-disk layers: just update their access time. From 789196572e5e2921371a053ae8cd70dd31b27c5b Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 14 Jun 2024 11:51:12 +0300 Subject: [PATCH 32/41] Fix test_replica_query_race flakiness (#8038) This failed once with `relation "test" does not exist` when trying to run the query on the standby. It's possible that the standby is started before the CREATE TABLE is processed in the pageserver, and the standby opens up for queries before it has received the CREATE TABLE transaction from the primary. To fix, wait for the standby to catch up to the primary before starting to run the queries. https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8025/9483658488/index.html --- test_runner/regress/test_hot_standby.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 1d1b2fb48564..8edc8c554c82 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -300,7 +300,7 @@ def test_replica_query_race(neon_simple_env: NeonEnv): p_cur.execute("CREATE TABLE test AS SELECT 0 AS counter") standby_ep = env.endpoints.new_replica_start(origin=primary_ep, endpoint_id="standby") - time.sleep(1) + wait_replica_caughtup(primary_ep, standby_ep) # In primary, run a lot of UPDATEs on a single page finished = False From edc900028e5440bb500d16c1a05cef554d92f692 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Fri, 14 Jun 2024 10:24:13 +0100 Subject: [PATCH 33/41] CI: Update outdated GitHub Actions (#8042) ## Problem We have some amount of outdated action in the CI pipeline, GitHub complains about some of them. ## Summary of changes - Update `actions/checkout@1` (a really old one) in `vm-compute-node-image` - Update `actions/checkout@3` in `build-build-tools-image` - Update `docker/setup-buildx-action` in all workflows / jobs, it was downgraded in https://github.com/neondatabase/neon/pull/7445, but it it seems it works fine now --- .github/workflows/build-build-tools-image.yml | 4 ++-- .github/workflows/build_and_test.yml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 9aacb09d10c2..da1efe95715f 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -55,7 +55,7 @@ jobs: exit 1 fi - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Use custom DOCKER_CONFIG directory to avoid conflicts with default settings # The default value is ~/.docker @@ -64,7 +64,7 @@ jobs: mkdir -p /tmp/.docker-custom echo DOCKER_CONFIG=/tmp/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v2 with: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 71ca7329ee0d..1b433a703324 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -744,7 +744,7 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v3 with: @@ -822,11 +822,11 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 with: # Disable parallelism for docker buildkit. # As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner. - config-inline: | + buildkitd-config-inline: | [worker.oci] max-parallelism = 1 @@ -858,7 +858,7 @@ jobs: cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} - + - name: Build neon extensions test image if: matrix.version == 'v16' uses: docker/build-push-action@v5 @@ -965,7 +965,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 with: fetch-depth: 0 From 6843fd8f89a24aa08ad71bcabbb320a3211c979e Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 10:37:30 +0100 Subject: [PATCH 34/41] storage controller: always wait for tenant detach before delete (#8049) ## Problem This test could fail with a timeout waiting for tenant deletions. Tenant deletions could get tripped up on nodes transitioning from offline to online at the moment of the deletion. In a previous reconciliation, the reconciler would skip detaching a particular location because the node was offline, but then when we do the delete the node is marked as online and can be picked as the node to use for issuing a deletion request. This hits the "Unexpectedly still attached path", which would still work if the caller kept calling DELETE, but if a caller does a Delete,get,get,get poll, then it doesn't work because the GET calls fail after we've marked the tenant as detached. ## Summary of changes Fix the undesirable storage controller behavior highlighted by this test failure: - Change tenant deletion flow to _always_ wait for reconciliation to succeed: it was unsound to proceed and return 202 if something was still attached, because after the 202 callers can no longer GET the tenant. Stabilize the test: - Add a reconcile_until_idle to the test, so that it will not have reconciliations running in the background while we mark a node online. This test is not meant to be a chaos test: we should test that kind of complexity elsewhere. - This reconcile_until_idle also fixes another failure mode where the test might see a None for a tenant location because a reconcile was mutating it (https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7288/9500177581/index.html#suites/8fc5d1648d2225380766afde7c428d81/4acece42ae00c442/) It remains the case that a motivated tester could produce a situation where a DELETE gives a 500, when precisely the wrong node transitions from offline to available at the precise moment of a deletion (but the 500 is better than returning 202 and then failing all subsequent GETs). Note that nodes don't go through the offline state during normal restarts, so this is super rare. We should eventually fix this by making DELETE to the pageserver implicitly detach the tenant if it's attached, but that should wait until nobody is using the legacy-style deletes (the ones that use 202 + polling) --- storage_controller/src/service.rs | 26 +++++++++++-------- .../regress/test_storage_controller.py | 3 +++ 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 1e81b5c5a2ca..cf6a95bf0b85 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -2409,11 +2409,17 @@ impl Service { (detach_waiters, shard_ids, node.clone()) }; - if let Err(e) = self.await_waiters(detach_waiters, RECONCILE_TIMEOUT).await { - // Failing to detach shouldn't hold up deletion, e.g. if a node is offline we should be able - // to use some other node to run the remote deletion. - tracing::warn!("Failed to detach some locations: {e}"); - } + // This reconcile wait can fail in a few ways: + // A there is a very long queue for the reconciler semaphore + // B some pageserver is failing to handle a detach promptly + // C some pageserver goes offline right at the moment we send it a request. + // + // A and C are transient: the semaphore will eventually become available, and once a node is marked offline + // the next attempt to reconcile will silently skip detaches for an offline node and succeed. If B happens, + // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while + // deleting the underlying data). + self.await_waiters(detach_waiters, RECONCILE_TIMEOUT) + .await?; let locations = shard_ids .into_iter() @@ -2431,13 +2437,11 @@ impl Service { for result in results { match result { Ok(StatusCode::ACCEPTED) => { - // This could happen if we failed detach above, and hit a pageserver where the tenant - // is still attached: it will accept the deletion in the background - tracing::warn!( - "Unexpectedly still attached on {}, client should retry", + // This should never happen: we waited for detaches to finish above + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "Unexpectedly still attached on {}", node - ); - return Ok(StatusCode::ACCEPTED); + ))); } Ok(_) => {} Err(mgmt_api::Error::Cancelled) => { diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 2031feaa8307..f41468210c2b 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -133,6 +133,9 @@ def node_evacuated(node_id: int) -> None: wait_until(10, 1, lambda: node_evacuated(env.pageservers[0].id)) + # Let all the reconciliations after marking the node offline complete + env.storage_controller.reconcile_until_idle() + # Marking pageserver active should not migrate anything to it # immediately env.storage_controller.node_configure(env.pageservers[0].id, {"availability": "Active"}) From eb0ca9b648b745142969913a262b2aa4fccbf55a Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 11:08:11 +0100 Subject: [PATCH 35/41] pageserver: improved synthetic size & find_gc_cutoff error handling (#8051) ## Problem This PR refactors some error handling to avoid log spam on tenant/timeline shutdown. - "ignoring failure to find gc cutoffs: timeline shutting down." logs (https://github.com/neondatabase/neon/issues/8012) - "synthetic_size_worker: failed to calculate synthetic size for tenant ...: Failed to refresh gc_info before gathering inputs: tenant shutting down", for example here: https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8049/9502988669/index.html#suites/3fc871d9ee8127d8501d607e03205abb/1a074a66548bbcea Closes: https://github.com/neondatabase/neon/issues/8012 ## Summary of changes - Refactor: Add a PageReconstructError variant to GcError: this is the only kind of error that find_gc_cutoffs can emit. - Functional change: only ignore shutdown PageReconstructError variant: for other variants, treat it as a real error - Refactor: add a structured CalculateSyntheticSizeError type and use it instead of anyhow::Error in synthetic size calculations - Functional change: while iterating through timelines gathering logical sizes, only drop out if the whole tenant is cancelled: individual timeline cancellations indicate deletion in progress and we can just ignore those. --- pageserver/src/consumption_metrics.rs | 26 ++--- pageserver/src/http/routes.rs | 9 +- pageserver/src/tenant.rs | 37 +++--- pageserver/src/tenant/size.rs | 106 +++++++++++++----- pageserver/src/tenant/timeline.rs | 2 +- .../fixtures/pageserver/allowed_errors.py | 2 - test_runner/regress/test_tenant_size.py | 4 - 7 files changed, 115 insertions(+), 71 deletions(-) diff --git a/pageserver/src/consumption_metrics.rs b/pageserver/src/consumption_metrics.rs index 540d0d2e8c2c..18c1a6cd9bc2 100644 --- a/pageserver/src/consumption_metrics.rs +++ b/pageserver/src/consumption_metrics.rs @@ -2,10 +2,9 @@ //! and push them to a HTTP endpoint. use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME}; +use crate::tenant::size::CalculateSyntheticSizeError; use crate::tenant::tasks::BackgroundLoopKind; -use crate::tenant::{ - mgr::TenantManager, LogicalSizeCalculationCause, PageReconstructError, Tenant, -}; +use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, Tenant}; use camino::Utf8PathBuf; use consumption_metrics::EventType; use pageserver_api::models::TenantState; @@ -350,19 +349,12 @@ async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &Re // Same for the loop that fetches computed metrics. // By using the same limiter, we centralize metrics collection for "start" and "finished" counters, // which turns out is really handy to understand the system. - let Err(e) = tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await else { - return; - }; - - // this error can be returned if timeline is shutting down, but it does not - // mean the synthetic size worker should terminate. - let shutting_down = matches!( - e.downcast_ref::(), - Some(PageReconstructError::Cancelled) - ); - - if !shutting_down { - let tenant_shard_id = tenant.tenant_shard_id(); - error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}"); + match tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await { + Ok(_) => {} + Err(CalculateSyntheticSizeError::Cancelled) => {} + Err(e) => { + let tenant_shard_id = tenant.tenant_shard_id(); + error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}"); + } } } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 12d02c52fedd..657708c0d613 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1135,7 +1135,10 @@ async fn tenant_size_handler( &ctx, ) .await - .map_err(ApiError::InternalServerError)?; + .map_err(|e| match e { + crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown, + other => ApiError::InternalServerError(anyhow::anyhow!(other)), + })?; let mut sizes = None; let accepts_html = headers @@ -1143,9 +1146,7 @@ async fn tenant_size_handler( .map(|v| v == "text/html") .unwrap_or_default(); if !inputs_only.unwrap_or(false) { - let storage_model = inputs - .calculate_model() - .map_err(ApiError::InternalServerError)?; + let storage_model = inputs.calculate_model(); let size = storage_model.calculate(); // If request header expects html, return html diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 0bd3ece2e361..a31fea1e5840 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -509,11 +509,24 @@ pub(crate) enum GcError { #[error(transparent)] Remote(anyhow::Error), + // An error reading while calculating GC cutoffs + #[error(transparent)] + GcCutoffs(PageReconstructError), + // If GC was invoked for a particular timeline, this error means it didn't exist #[error("timeline not found")] TimelineNotFound, } +impl From for GcError { + fn from(value: PageReconstructError) -> Self { + match value { + PageReconstructError::Cancelled => Self::TimelineCancelled, + other => Self::GcCutoffs(other), + } + } +} + impl Tenant { /// Yet another helper for timeline initialization. /// @@ -2921,17 +2934,9 @@ impl Tenant { .checked_sub(horizon) .unwrap_or(Lsn(0)); - let res = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await; - - match res { - Ok(cutoffs) => { - let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs); - assert!(old.is_none()); - } - Err(e) => { - tracing::warn!(timeline_id = %timeline.timeline_id, "ignoring failure to find gc cutoffs: {e:#}"); - } - } + let cutoffs = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await?; + let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs); + assert!(old.is_none()); } if !self.is_active() || self.cancel.is_cancelled() { @@ -3553,7 +3558,7 @@ impl Tenant { cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let logical_sizes_at_once = self .conf .concurrent_tenant_size_logical_size_queries @@ -3568,8 +3573,8 @@ impl Tenant { // See more for on the issue #2748 condenced out of the initial PR review. let mut shared_cache = tokio::select! { locked = self.cached_logical_sizes.lock() => locked, - _ = cancel.cancelled() => anyhow::bail!("cancelled"), - _ = self.cancel.cancelled() => anyhow::bail!("tenant is shutting down"), + _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled), + _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled), }; size::gather_inputs( @@ -3593,10 +3598,10 @@ impl Tenant { cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?; - let size = inputs.calculate()?; + let size = inputs.calculate(); self.set_cached_synthetic_size(size); diff --git a/pageserver/src/tenant/size.rs b/pageserver/src/tenant/size.rs index 64fff5536c23..cdd5b0cbe73f 100644 --- a/pageserver/src/tenant/size.rs +++ b/pageserver/src/tenant/size.rs @@ -3,7 +3,6 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use anyhow::{bail, Context}; use tokio::sync::oneshot::error::RecvError; use tokio::sync::Semaphore; use tokio_util::sync::CancellationToken; @@ -11,7 +10,7 @@ use tokio_util::sync::CancellationToken; use crate::context::RequestContext; use crate::pgdatadir_mapping::CalculateLogicalSizeError; -use super::{LogicalSizeCalculationCause, Tenant}; +use super::{GcError, LogicalSizeCalculationCause, Tenant}; use crate::tenant::Timeline; use utils::id::TimelineId; use utils::lsn::Lsn; @@ -43,6 +42,44 @@ pub struct SegmentMeta { pub kind: LsnKind, } +#[derive(thiserror::Error, Debug)] +pub(crate) enum CalculateSyntheticSizeError { + /// Something went wrong internally to the calculation of logical size at a particular branch point + #[error("Failed to calculated logical size on timeline {timeline_id} at {lsn}: {error}")] + LogicalSize { + timeline_id: TimelineId, + lsn: Lsn, + error: CalculateLogicalSizeError, + }, + + /// Something went wrong internally when calculating GC parameters at start of size calculation + #[error(transparent)] + GcInfo(GcError), + + /// Totally unexpected errors, like panics joining a task + #[error(transparent)] + Fatal(anyhow::Error), + + /// The LSN we are trying to calculate a size at no longer exists at the point we query it + #[error("Could not find size at {lsn} in timeline {timeline_id}")] + LsnNotFound { timeline_id: TimelineId, lsn: Lsn }, + + /// Tenant shut down while calculating size + #[error("Cancelled")] + Cancelled, +} + +impl From for CalculateSyntheticSizeError { + fn from(value: GcError) -> Self { + match value { + GcError::TenantCancelled | GcError::TimelineCancelled => { + CalculateSyntheticSizeError::Cancelled + } + other => CalculateSyntheticSizeError::GcInfo(other), + } + } +} + impl SegmentMeta { fn size_needed(&self) -> bool { match self.kind { @@ -116,12 +153,9 @@ pub(super) async fn gather_inputs( cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, -) -> anyhow::Result { +) -> Result { // refresh is needed to update gc related pitr_cutoff and horizon_cutoff - tenant - .refresh_gc_info(cancel, ctx) - .await - .context("Failed to refresh gc_info before gathering inputs")?; + tenant.refresh_gc_info(cancel, ctx).await?; // Collect information about all the timelines let mut timelines = tenant.list_timelines(); @@ -327,6 +361,12 @@ pub(super) async fn gather_inputs( ) .await?; + if tenant.cancel.is_cancelled() { + // If we're shutting down, return an error rather than a sparse result that might include some + // timelines from before we started shutting down + return Err(CalculateSyntheticSizeError::Cancelled); + } + Ok(ModelInputs { segments, timeline_inputs, @@ -345,7 +385,7 @@ async fn fill_logical_sizes( logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>, cause: LogicalSizeCalculationCause, ctx: &RequestContext, -) -> anyhow::Result<()> { +) -> Result<(), CalculateSyntheticSizeError> { let timeline_hash: HashMap> = HashMap::from_iter( timelines .iter() @@ -387,7 +427,7 @@ async fn fill_logical_sizes( } // Perform the size lookups - let mut have_any_error = false; + let mut have_any_error = None; while let Some(res) = joinset.join_next().await { // each of these come with Result, JoinError> // because of spawn + spawn_blocking @@ -398,21 +438,36 @@ async fn fill_logical_sizes( Err(join_error) => { // cannot really do anything, as this panic is likely a bug error!("task that calls spawn_ondemand_logical_size_calculation panicked: {join_error:#}"); - have_any_error = true; + + have_any_error = Some(CalculateSyntheticSizeError::Fatal( + anyhow::anyhow!(join_error) + .context("task that calls spawn_ondemand_logical_size_calculation"), + )); } Ok(Err(recv_result_error)) => { // cannot really do anything, as this panic is likely a bug error!("failed to receive logical size query result: {recv_result_error:#}"); - have_any_error = true; + have_any_error = Some(CalculateSyntheticSizeError::Fatal( + anyhow::anyhow!(recv_result_error) + .context("Receiving logical size query result"), + )); } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Err(error)))) => { - if !matches!(error, CalculateLogicalSizeError::Cancelled) { + if matches!(error, CalculateLogicalSizeError::Cancelled) { + // Skip this: it's okay if one timeline among many is shutting down while we + // calculate inputs for the overall tenant. + continue; + } else { warn!( timeline_id=%timeline.timeline_id, "failed to calculate logical size at {lsn}: {error:#}" ); + have_any_error = Some(CalculateSyntheticSizeError::LogicalSize { + timeline_id: timeline.timeline_id, + lsn, + error, + }); } - have_any_error = true; } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Ok(size)))) => { debug!(timeline_id=%timeline.timeline_id, %lsn, size, "size calculated"); @@ -426,10 +481,10 @@ async fn fill_logical_sizes( // prune any keys not needed anymore; we record every used key and added key. logical_size_cache.retain(|key, _| sizes_needed.contains_key(key)); - if have_any_error { + if let Some(error) = have_any_error { // we cannot complete this round, because we are missing data. // we have however cached all we were able to request calculation on. - anyhow::bail!("failed to calculate some logical_sizes"); + return Err(error); } // Insert the looked up sizes to the Segments @@ -444,32 +499,29 @@ async fn fill_logical_sizes( if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) { seg.segment.size = Some(*size); } else { - bail!("could not find size at {} in timeline {}", lsn, timeline_id); + return Err(CalculateSyntheticSizeError::LsnNotFound { timeline_id, lsn }); } } Ok(()) } impl ModelInputs { - pub fn calculate_model(&self) -> anyhow::Result { + pub fn calculate_model(&self) -> tenant_size_model::StorageModel { // Convert SegmentMetas into plain Segments - let storage = StorageModel { + StorageModel { segments: self .segments .iter() .map(|seg| seg.segment.clone()) .collect(), - }; - - Ok(storage) + } } // calculate total project size - pub fn calculate(&self) -> anyhow::Result { - let storage = self.calculate_model()?; + pub fn calculate(&self) -> u64 { + let storage = self.calculate_model(); let sizes = storage.calculate(); - - Ok(sizes.total_size) + sizes.total_size } } @@ -656,7 +708,7 @@ fn verify_size_for_multiple_branches() { "#; let inputs: ModelInputs = serde_json::from_str(doc).unwrap(); - assert_eq!(inputs.calculate().unwrap(), 37_851_408); + assert_eq!(inputs.calculate(), 37_851_408); } #[test] @@ -711,7 +763,7 @@ fn verify_size_for_one_branch() { let model: ModelInputs = serde_json::from_str(doc).unwrap(); - let res = model.calculate_model().unwrap().calculate(); + let res = model.calculate_model().calculate(); println!("calculated synthetic size: {}", res.total_size); println!("result: {:?}", serde_json::to_string(&res.segments)); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 28627e79117c..324d909dace5 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4823,7 +4823,7 @@ impl Timeline { pitr: Duration, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let _timer = self .metrics .find_gc_cutoffs_histo diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index ef412cade710..147d5705d38a 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -94,8 +94,6 @@ def scan_pageserver_log_for_errors( ".*WARN.*path=/v1/utilization .*request was dropped before completing", # Can happen during shutdown ".*scheduling deletion on drop failed: queue is in state Stopped.*", - # Can happen during shutdown - ".*ignoring failure to find gc cutoffs: timeline shutting down.*", ) diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index d3a228dbebcc..a3dd4229034e 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -678,10 +678,6 @@ def test_synthetic_size_while_deleting(neon_env_builder: NeonEnvBuilder): with pytest.raises(PageserverApiException, match=matcher): completion.result() - # this happens on both cases - env.pageserver.allowed_errors.append( - ".*ignoring failure to find gc cutoffs: timeline shutting down.*" - ) # this happens only in the case of deletion (http response logging) env.pageserver.allowed_errors.append(".*Failed to refresh gc_info before gathering inputs.*") From e6eb0020a10163475115960398f3c206b601d0b8 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Fri, 14 Jun 2024 12:23:52 +0100 Subject: [PATCH 36/41] update rust to 1.79.0 (#8048) ## Problem rust 1.79 new enabled by default lints ## Summary of changes * update to rust 1.79 * `s/default_features/default-features/` * fix proxy dead code. * fix pageserver dead code. --- Cargo.toml | 6 ++-- Dockerfile.build-tools | 2 +- libs/tracing-utils/Cargo.toml | 2 +- pageserver/src/tenant.rs | 3 -- pageserver/src/tenant/timeline.rs | 3 +- pageserver/src/tenant/timeline/delete.rs | 3 -- .../src/rate_limiter/limit_algorithm/aimd.rs | 2 -- proxy/src/scram/messages.rs | 33 ++++++++++--------- rust-toolchain.toml | 2 +- 9 files changed, 25 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dc89c2341bf0..8fddaaef12dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,7 +120,7 @@ num_cpus = "1.15" num-traits = "0.2.15" once_cell = "1.13" opentelemetry = "0.20.0" -opentelemetry-otlp = { version = "0.13.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { version = "0.13.0", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions = "0.12.0" parking_lot = "0.12" parquet = { version = "51.0.0", default-features = false, features = ["zstd"] } @@ -128,7 +128,7 @@ parquet_derive = "51.0.0" pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pin-project-lite = "0.2" procfs = "0.14" -prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency +prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency prost = "0.11" rand = "0.8" redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] } @@ -184,7 +184,7 @@ tower-service = "0.3.2" tracing = "0.1" tracing-error = "0.2.0" tracing-opentelemetry = "0.21.0" -tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] } twox-hash = { version = "1.6.3", default-features = false } url = "2.2" urlencoding = "2.1" diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 460b8c996d0a..e7c61ace0eac 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -141,7 +141,7 @@ WORKDIR /home/nonroot # Rust # Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`) -ENV RUSTC_VERSION=1.78.0 +ENV RUSTC_VERSION=1.79.0 ENV RUSTUP_HOME="/home/nonroot/.rustup" ENV PATH="/home/nonroot/.cargo/bin:${PATH}" RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \ diff --git a/libs/tracing-utils/Cargo.toml b/libs/tracing-utils/Cargo.toml index b285c9b5b0cf..512a7481243b 100644 --- a/libs/tracing-utils/Cargo.toml +++ b/libs/tracing-utils/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true [dependencies] hyper.workspace = true opentelemetry = { workspace = true, features=["rt-tokio"] } -opentelemetry-otlp = { workspace = true, default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { workspace = true, default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions.workspace = true reqwest = { workspace = true, default-features = false, features = ["rustls-tls"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index a31fea1e5840..801321e36df8 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -1046,7 +1046,6 @@ impl Tenant { remote_metadata, TimelineResources { remote_client, - deletion_queue_client: self.deletion_queue_client.clone(), timeline_get_throttle: self.timeline_get_throttle.clone(), }, ctx, @@ -1072,7 +1071,6 @@ impl Tenant { timeline_id, &index_part.metadata, remote_timeline_client, - self.deletion_queue_client.clone(), ) .instrument(tracing::info_span!("timeline_delete", %timeline_id)) .await @@ -3448,7 +3446,6 @@ impl Tenant { ); TimelineResources { remote_client, - deletion_queue_client: self.deletion_queue_client.clone(), timeline_get_throttle: self.timeline_get_throttle.clone(), } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 324d909dace5..08bec329e112 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -62,6 +62,7 @@ use std::{ ops::ControlFlow, }; +use crate::metrics::GetKind; use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS; use crate::{ aux_file::AuxFileSizeEstimator, @@ -75,7 +76,6 @@ use crate::{ disk_usage_eviction_task::DiskUsageEvictionInfo, pgdatadir_mapping::CollectKeySpaceError, }; -use crate::{deletion_queue::DeletionQueueClient, metrics::GetKind}; use crate::{ disk_usage_eviction_task::finite_f32, tenant::storage_layer::{ @@ -205,7 +205,6 @@ fn drop_wlock(rlock: tokio::sync::RwLockWriteGuard<'_, T>) { /// The outward-facing resources required to build a Timeline pub struct TimelineResources { pub remote_client: RemoteTimelineClient, - pub deletion_queue_client: DeletionQueueClient, pub timeline_get_throttle: Arc< crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>, >, diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index 5ca8544d49d3..441298f3e9a2 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -11,7 +11,6 @@ use utils::{crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; use crate::{ config::PageServerConf, - deletion_queue::DeletionQueueClient, task_mgr::{self, TaskKind}, tenant::{ metadata::TimelineMetadata, @@ -263,7 +262,6 @@ impl DeleteTimelineFlow { timeline_id: TimelineId, local_metadata: &TimelineMetadata, remote_client: RemoteTimelineClient, - deletion_queue_client: DeletionQueueClient, ) -> anyhow::Result<()> { // Note: here we even skip populating layer map. Timeline is essentially uninitialized. // RemoteTimelineClient is the only functioning part. @@ -274,7 +272,6 @@ impl DeleteTimelineFlow { None, // Ancestor is not needed for deletion. TimelineResources { remote_client, - deletion_queue_client, timeline_get_throttle: tenant.timeline_get_throttle.clone(), }, // Important. We dont pass ancestor above because it can be missing. diff --git a/proxy/src/rate_limiter/limit_algorithm/aimd.rs b/proxy/src/rate_limiter/limit_algorithm/aimd.rs index ccc9c42420ee..b39740bb212a 100644 --- a/proxy/src/rate_limiter/limit_algorithm/aimd.rs +++ b/proxy/src/rate_limiter/limit_algorithm/aimd.rs @@ -1,5 +1,3 @@ -use std::usize; - use super::{LimitAlgorithm, Outcome, Sample}; /// Loss-based congestion avoidance. diff --git a/proxy/src/scram/messages.rs b/proxy/src/scram/messages.rs index f9372540ca47..cf677a3334ce 100644 --- a/proxy/src/scram/messages.rs +++ b/proxy/src/scram/messages.rs @@ -32,8 +32,6 @@ pub struct ClientFirstMessage<'a> { pub bare: &'a str, /// Channel binding mode. pub cbind_flag: ChannelBinding<&'a str>, - /// (Client username)[]. - pub username: &'a str, /// Client nonce. pub nonce: &'a str, } @@ -58,6 +56,14 @@ impl<'a> ClientFirstMessage<'a> { // In theory, these might be preceded by "reserved-mext" (i.e. "m=") let username = parts.next()?.strip_prefix("n=")?; + + // https://github.com/postgres/postgres/blob/f83908798f78c4cafda217ca875602c88ea2ae28/src/backend/libpq/auth-scram.c#L13-L14 + if !username.is_empty() { + tracing::warn!(username, "scram username provided, but is not expected") + // TODO(conrad): + // return None; + } + let nonce = parts.next()?.strip_prefix("r=")?; // Validate but ignore auth extensions @@ -66,7 +72,6 @@ impl<'a> ClientFirstMessage<'a> { Some(Self { bare, cbind_flag, - username, nonce, }) } @@ -188,19 +193,18 @@ mod tests { // (Almost) real strings captured during debug sessions let cases = [ - (NotSupportedClient, "n,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju"), - (NotSupportedServer, "y,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju"), + (NotSupportedClient, "n,,n=,r=t8JwklwKecDLwSsA72rHmVju"), + (NotSupportedServer, "y,,n=,r=t8JwklwKecDLwSsA72rHmVju"), ( Required("tls-server-end-point"), - "p=tls-server-end-point,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju", + "p=tls-server-end-point,,n=,r=t8JwklwKecDLwSsA72rHmVju", ), ]; for (cb, input) in cases { let msg = ClientFirstMessage::parse(input).unwrap(); - assert_eq!(msg.bare, "n=pepe,r=t8JwklwKecDLwSsA72rHmVju"); - assert_eq!(msg.username, "pepe"); + assert_eq!(msg.bare, "n=,r=t8JwklwKecDLwSsA72rHmVju"); assert_eq!(msg.nonce, "t8JwklwKecDLwSsA72rHmVju"); assert_eq!(msg.cbind_flag, cb); } @@ -208,14 +212,13 @@ mod tests { #[test] fn parse_client_first_message_with_invalid_gs2_authz() { - assert!(ClientFirstMessage::parse("n,authzid,n=user,r=nonce").is_none()) + assert!(ClientFirstMessage::parse("n,authzid,n=,r=nonce").is_none()) } #[test] fn parse_client_first_message_with_extra_params() { - let msg = ClientFirstMessage::parse("n,,n=user,r=nonce,a=foo,b=bar,c=baz").unwrap(); - assert_eq!(msg.bare, "n=user,r=nonce,a=foo,b=bar,c=baz"); - assert_eq!(msg.username, "user"); + let msg = ClientFirstMessage::parse("n,,n=,r=nonce,a=foo,b=bar,c=baz").unwrap(); + assert_eq!(msg.bare, "n=,r=nonce,a=foo,b=bar,c=baz"); assert_eq!(msg.nonce, "nonce"); assert_eq!(msg.cbind_flag, ChannelBinding::NotSupportedClient); } @@ -223,9 +226,9 @@ mod tests { #[test] fn parse_client_first_message_with_extra_params_invalid() { // must be of the form `=<...>` - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,abc=foo").is_none()); - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,1=foo").is_none()); - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,a").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,abc=foo").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,1=foo").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,a").is_none()); } #[test] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 214de0a77d74..dcae25a28770 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.78.0" +channel = "1.79.0" profile = "default" # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. # https://rust-lang.github.io/rustup/concepts/profiles.html From a71f58e69c762e55f0fe9055a088f6232facbf28 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2024 05:47:09 +0300 Subject: [PATCH 37/41] Fix test_segment_init_failure. Graceful shutdown broke it. --- test_runner/regress/test_wal_acceptor_async.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index 715d22eed83d..971fad787a22 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -601,13 +601,16 @@ async def run_segment_init_failure(env: NeonEnv): conn = await ep.connect_async() ep.safe_psql("select pg_switch_wal()") # jump to the segment boundary # next insertion should hang until failpoint is disabled. - asyncio.create_task(conn.execute("insert into t select generate_series(1,1), 'payload'")) + bg_query = asyncio.create_task( + conn.execute("insert into t select generate_series(1,1), 'payload'") + ) sleep_sec = 2 await asyncio.sleep(sleep_sec) - # also restart ep at segment boundary to make test more interesting - ep.stop() # it must still be not finished - # assert not bg_query.done() + assert not bg_query.done() + # Also restart ep at segment boundary to make test more interesting. Do it in immediate mode; + # fast will hang because it will try to gracefully finish sending WAL. + ep.stop(mode="immediate") # Without segment rename during init (#6402) previous statement created # partially initialized 16MB segment, so sk restart also triggers #6401. sk.stop().start() From 83eb02b07af20c27842231c995fe883f6d9a6299 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Fri, 14 Jun 2024 12:43:51 +0100 Subject: [PATCH 38/41] CI: downgrade docker/setup-buildx-action (#8062) ## Problem I've bumped `docker/setup-buildx-action` in #8042 because I wasn't able to reproduce the issue from #7445. But now the issue appears again in https://github.com/neondatabase/neon/actions/runs/9514373620/job/26226626923?pr=8059 The steps to reproduce aren't clear, it required `docker/setup-buildx-action@v3` and rebuilding the image without cache, probably ## Summary of changes - Downgrade `docker/setup-buildx-action@v3` to `docker/setup-buildx-action@v2` --- .github/workflows/build-build-tools-image.yml | 2 +- .github/workflows/build_and_test.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index da1efe95715f..2c994b08ae69 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -64,7 +64,7 @@ jobs: mkdir -p /tmp/.docker-custom echo DOCKER_CONFIG=/tmp/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v2 with: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1b433a703324..703fc8d14516 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -744,7 +744,7 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v3 with: @@ -822,11 +822,11 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 with: # Disable parallelism for docker buildkit. # As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner. - buildkitd-config-inline: | + config-inline: | [worker.oci] max-parallelism = 1 From 81892199f627a1021b3f4f5f8043d35281501c1a Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 14 Jun 2024 11:57:58 -0400 Subject: [PATCH 39/41] chore(pageserver): vectored get target_keyspace directly accums (#8055) follow up on https://github.com/neondatabase/neon/pull/7904 avoid a layer of indirection introduced by `Vec>` Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/keyspace.rs | 6 ++++++ pageserver/src/tenant/storage_layer.rs | 24 +++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index 12c6dc3a6d01..9a61f2ad81ae 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -558,6 +558,12 @@ impl KeySpaceRandomAccum { self.ranges.push(range); } + pub fn add_keyspace(&mut self, keyspace: KeySpace) { + for range in keyspace.ranges { + self.add_range(range); + } + } + pub fn to_keyspace(mut self) -> KeySpace { let mut ranges = Vec::new(); if !self.ranges.is_empty() { diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 0b3f841ccf8e..9607546ce0f2 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -318,7 +318,7 @@ pub(crate) struct LayerFringe { #[derive(Debug)] struct LayerKeyspace { layer: ReadableLayer, - target_keyspace: Vec, + target_keyspace: KeySpaceRandomAccum, } impl LayerFringe { @@ -342,17 +342,13 @@ impl LayerFringe { _, LayerKeyspace { layer, - target_keyspace, + mut target_keyspace, }, - )) => { - let mut keyspace = KeySpaceRandomAccum::new(); - for ks in target_keyspace { - for part in ks.ranges { - keyspace.add_range(part); - } - } - Some((layer, keyspace.consume_keyspace(), read_desc.lsn_range)) - } + )) => Some(( + layer, + target_keyspace.consume_keyspace(), + read_desc.lsn_range, + )), None => unreachable!("fringe internals are always consistent"), } } @@ -367,16 +363,18 @@ impl LayerFringe { let entry = self.layers.entry(layer_id.clone()); match entry { Entry::Occupied(mut entry) => { - entry.get_mut().target_keyspace.push(keyspace); + entry.get_mut().target_keyspace.add_keyspace(keyspace); } Entry::Vacant(entry) => { self.planned_reads_by_lsn.push(ReadDesc { lsn_range, layer_id: layer_id.clone(), }); + let mut accum = KeySpaceRandomAccum::new(); + accum.add_keyspace(keyspace); entry.insert(LayerKeyspace { layer, - target_keyspace: vec![keyspace], + target_keyspace: accum, }); } } From 46210035c551212a1b9383fe5249d547f284c39a Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Fri, 14 Jun 2024 18:36:50 +0200 Subject: [PATCH 40/41] add halfvec indexing and queries to periodic pgvector performance tests (#8057) ## Problem halfvec data type was introduced in pgvector 0.7.0 and is popular because it allows smaller vectors, smaller indexes and potentially better performance. So far we have not tested halfvec in our periodic performance tests. This PR adds halfvec indexing and halfvec queries to the test. --- .github/workflows/benchmarking.yml | 8 ++--- .../performance/pgvector/halfvec_build.sql | 15 +++++++++ ...custom_script_pgvector_halfvec_queries.sql | 13 ++++++++ .../pgvector/pgbench_hnsw_queries.sql | 13 -------- test_runner/performance/test_perf_olap.py | 5 +++ test_runner/performance/test_perf_pgbench.py | 31 +++++++++++++------ .../performance/test_perf_pgvector_queries.py | 24 ++++++++++++++ 7 files changed, 82 insertions(+), 27 deletions(-) create mode 100644 test_runner/performance/pgvector/halfvec_build.sql create mode 100644 test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql delete mode 100644 test_runner/performance/pgvector/pgbench_hnsw_queries.sql create mode 100644 test_runner/performance/test_perf_pgvector_queries.py diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 57d24063bfb3..9eff4836809d 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -99,7 +99,7 @@ jobs: # Set --sparse-ordering option of pytest-order plugin # to ensure tests are running in order of appears in the file. # It's important for test_perf_pgbench.py::test_pgbench_remote_* tests - extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py + extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py --ignore test_runner/performance/test_perf_pgvector_queries.py env: BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }} VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" @@ -410,14 +410,14 @@ jobs: PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} - - name: Benchmark pgvector hnsw queries + - name: Benchmark pgvector queries uses: ./.github/actions/run-python-test-set with: build_type: ${{ env.BUILD_TYPE }} - test_selection: performance + test_selection: performance/test_perf_pgvector_queries.py run_in_parallel: false save_perf_report: ${{ env.SAVE_PERF_REPORT }} - extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_pgvector + extra_params: -m remote_cluster --timeout 21600 env: BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" diff --git a/test_runner/performance/pgvector/halfvec_build.sql b/test_runner/performance/pgvector/halfvec_build.sql new file mode 100644 index 000000000000..7e923e4bde1a --- /dev/null +++ b/test_runner/performance/pgvector/halfvec_build.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS halfvec_test_table; + +CREATE TABLE halfvec_test_table ( + _id text NOT NULL, + title text, + text text, + embeddings halfvec(1536), + PRIMARY KEY (_id) +); + +INSERT INTO halfvec_test_table (_id, title, text, embeddings) +SELECT _id, title, text, embeddings::halfvec +FROM documents; + +CREATE INDEX documents_half_precision_hnsw_idx ON halfvec_test_table USING hnsw (embeddings halfvec_cosine_ops) WITH (m = 64, ef_construction = 128); \ No newline at end of file diff --git a/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql new file mode 100644 index 000000000000..70d0c181494b --- /dev/null +++ b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql @@ -0,0 +1,13 @@ +-- run with pooled connection +-- pgbench -T 300 -c 100 -j20 -f pgbench_halfvec_queries.sql -postgresql://neondb_owner:@ep-floral-thunder-w1gzhaxi-pooler.eu-west-1.aws.neon.build/neondb?sslmode=require" + +with x (x) as ( + select "embeddings" as x + from halfvec_test_table + TABLESAMPLE SYSTEM (1) + LIMIT 1 +) +SELECT title, "embeddings" <=> (select x from x) as distance +FROM halfvec_test_table +ORDER BY 2 +LIMIT 30; \ No newline at end of file diff --git a/test_runner/performance/pgvector/pgbench_hnsw_queries.sql b/test_runner/performance/pgvector/pgbench_hnsw_queries.sql deleted file mode 100644 index 5034063c1b61..000000000000 --- a/test_runner/performance/pgvector/pgbench_hnsw_queries.sql +++ /dev/null @@ -1,13 +0,0 @@ --- run with pooled connection --- pgbench -T 300 -c 100 -j20 -f pgbench_hnsw_queries.sql -postgresql://neondb_owner:@ep-floral-thunder-w1gzhaxi-pooler.eu-west-1.aws.neon.build/neondb?sslmode=require" - -with x (x) as ( - select "embeddings" as x - from hnsw_test_table - TABLESAMPLE SYSTEM (1) - LIMIT 1 -) -SELECT title, "embeddings" <=> (select x from x) as distance -FROM hnsw_test_table -ORDER BY 2 -LIMIT 30; diff --git a/test_runner/performance/test_perf_olap.py b/test_runner/performance/test_perf_olap.py index 2367676e6707..aaa2f8fec283 100644 --- a/test_runner/performance/test_perf_olap.py +++ b/test_runner/performance/test_perf_olap.py @@ -106,6 +106,7 @@ def test_clickbench_create_pg_stat_statements(remote_compare: RemoteCompare): # Disable auto formatting for the list of queries so that it's easier to read # fmt: off PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( + LabelledQuery("PGVPREP", r"ALTER EXTENSION VECTOR UPDATE;"), LabelledQuery("PGV0", r"DROP TABLE IF EXISTS hnsw_test_table;"), LabelledQuery("PGV1", r"CREATE TABLE hnsw_test_table AS TABLE documents WITH NO DATA;"), LabelledQuery("PGV2", r"INSERT INTO hnsw_test_table SELECT * FROM documents;"), @@ -115,6 +116,10 @@ def test_clickbench_create_pg_stat_statements(remote_compare: RemoteCompare): LabelledQuery("PGV6", r"CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_l1_ops);"), LabelledQuery("PGV7", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_hamming_ops);"), LabelledQuery("PGV8", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_jaccard_ops);"), + LabelledQuery("PGV9", r"DROP TABLE IF EXISTS halfvec_test_table;"), + LabelledQuery("PGV10", r"CREATE TABLE halfvec_test_table (_id text NOT NULL, title text, text text, embeddings halfvec(1536), PRIMARY KEY (_id));"), + LabelledQuery("PGV11", r"INSERT INTO halfvec_test_table (_id, title, text, embeddings) SELECT _id, title, text, embeddings::halfvec FROM documents;"), + LabelledQuery("PGV12", r"CREATE INDEX documents_half_precision_hnsw_idx ON halfvec_test_table USING hnsw (embeddings halfvec_cosine_ops) WITH (m = 64, ef_construction = 128);"), ) # fmt: on diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index d756d6eeca5b..6eaa29e4f811 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -18,6 +18,7 @@ class PgBenchLoadType(enum.Enum): SIMPLE_UPDATE = "simple-update" SELECT_ONLY = "select-only" PGVECTOR_HNSW = "pgvector-hnsw" + PGVECTOR_HALFVEC = "pgvector-halfvec" def utc_now_timestamp() -> int: @@ -153,6 +154,26 @@ def run_test_pgbench(env: PgCompare, scale: int, duration: int, workload_type: P password=password, ) + if workload_type == PgBenchLoadType.PGVECTOR_HALFVEC: + # Run simple-update workload + run_pgbench( + env, + "pgvector-halfvec", + [ + "pgbench", + "-f", + "test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql", + "-c100", + "-j20", + f"-T{duration}", + "-P2", + "--protocol=prepared", + "--progress-timestamp", + connstr, + ], + password=password, + ) + env.report_size() @@ -222,13 +243,3 @@ def test_pgbench_remote_simple_update(remote_compare: PgCompare, scale: int, dur @pytest.mark.remote_cluster def test_pgbench_remote_select_only(remote_compare: PgCompare, scale: int, duration: int): run_test_pgbench(remote_compare, scale, duration, PgBenchLoadType.SELECT_ONLY) - - -# The following test runs on an existing database that has pgvector extension installed -# and a table with 1 million embedding vectors loaded and indexed with HNSW. -# -# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. -@pytest.mark.parametrize("duration", get_durations_matrix()) -@pytest.mark.remote_cluster -def test_pgbench_remote_pgvector(remote_compare: PgCompare, duration: int): - run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HNSW) diff --git a/test_runner/performance/test_perf_pgvector_queries.py b/test_runner/performance/test_perf_pgvector_queries.py new file mode 100644 index 000000000000..bb3db16305de --- /dev/null +++ b/test_runner/performance/test_perf_pgvector_queries.py @@ -0,0 +1,24 @@ +import pytest +from fixtures.compare_fixtures import PgCompare + +from performance.test_perf_pgbench import PgBenchLoadType, get_durations_matrix, run_test_pgbench + + +# The following test runs on an existing database that has pgvector extension installed +# and a table with 1 million embedding vectors loaded and indexed with HNSW. +# +# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_pgbench_remote_pgvector_hnsw(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HNSW) + + +# The following test runs on an existing database that has pgvector extension installed +# and a table with 1 million embedding vectors loaded and indexed with halfvec. +# +# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_pgbench_remote_pgvector_halfvec(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HALFVEC) From 2ba414525e7605c8570eadaccef576321d601571 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2024 22:56:01 +0300 Subject: [PATCH 41/41] Install rust binaries before running rust tests. cargo test (or nextest) might rebuild the binaries with different features/flags, so do install immediately after the build. Triggered by the particular case of nextest invocations missing $CARGO_FEATURES, which recompiled safekeeper without 'testing' feature which made python tests needing it (failpoints) not run in the CI. Also add CARGO_FEATURES to the nextest runs anyway because there doesn't seem to be an important reason not to. --- .github/workflows/build_and_test.yml | 56 ++++++++++++++-------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 703fc8d14516..bd2996ec4c89 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -337,34 +337,8 @@ jobs: run: | ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins --tests - - name: Run rust tests - env: - NEXTEST_RETRIES: 3 - run: | - #nextest does not yet support running doctests - cargo test --doc $CARGO_FLAGS $CARGO_FEATURES - - for io_engine in std-fs tokio-epoll-uring ; do - NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES - done - - # Run separate tests for real S3 - export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty - export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests - export REMOTE_STORAGE_S3_REGION=eu-central-1 - # Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now - ${cov_prefix} cargo nextest run $CARGO_FLAGS -E 'package(remote_storage)' -E 'test(test_real_s3)' - - # Run separate tests for real Azure Blob Storage - # XXX: replace region with `eu-central-1`-like region - export ENABLE_REAL_AZURE_REMOTE_STORAGE=y - export AZURE_STORAGE_ACCOUNT="${{ secrets.AZURE_STORAGE_ACCOUNT_DEV }}" - export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" - export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" - export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" - # Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now - ${cov_prefix} cargo nextest run $CARGO_FLAGS -E 'package(remote_storage)' -E 'test(test_real_azure)' - + # Do install *before* running rust tests because they might recompile the + # binaries with different features/flags. - name: Install rust binaries run: | # Install target binaries @@ -405,6 +379,32 @@ jobs: done fi + - name: Run rust tests + env: + NEXTEST_RETRIES: 3 + run: | + #nextest does not yet support running doctests + cargo test --doc $CARGO_FLAGS $CARGO_FEATURES + + for io_engine in std-fs tokio-epoll-uring ; do + NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES + done + + # Run separate tests for real S3 + export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty + export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests + export REMOTE_STORAGE_S3_REGION=eu-central-1 + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_s3)' + + # Run separate tests for real Azure Blob Storage + # XXX: replace region with `eu-central-1`-like region + export ENABLE_REAL_AZURE_REMOTE_STORAGE=y + export AZURE_STORAGE_ACCOUNT="${{ secrets.AZURE_STORAGE_ACCOUNT_DEV }}" + export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" + export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" + export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_azure)' + - name: Install postgres binaries run: cp -a pg_install /tmp/neon/pg_install