diff --git a/.github/actions/run-python-test-set/action.yml b/.github/actions/run-python-test-set/action.yml index d9e543d4bb77..d5c1fcf524de 100644 --- a/.github/actions/run-python-test-set/action.yml +++ b/.github/actions/run-python-test-set/action.yml @@ -183,8 +183,7 @@ runs: # Run the tests. # - # The junit.xml file allows CI tools to display more fine-grained test information - # in its "Tests" tab in the results page. + # --alluredir saves test results in Allure format (in a specified directory) # --verbose prints name of each test (helpful when there are # multiple tests in one file) # -rA prints summary in the end @@ -193,7 +192,6 @@ runs: # mkdir -p $TEST_OUTPUT/allure/results "${cov_prefix[@]}" ./scripts/pytest \ - --junitxml=$TEST_OUTPUT/junit.xml \ --alluredir=$TEST_OUTPUT/allure/results \ --tb=short \ --verbose \ diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index 078c7f88c460..34fd8b1d155d 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -36,15 +36,16 @@ jobs: fail_on_error: true filter_mode: nofilter level: error - - run: | + + - name: Disallow 'ubuntu-latest' runners + run: | PAT='^\s*runs-on:.*-latest' - if grep -ERq $PAT .github/workflows - then + if grep -ERq $PAT .github/workflows; then grep -ERl $PAT .github/workflows |\ while read -r f do l=$(grep -nE $PAT .github/workflows/release.yml | awk -F: '{print $1}' | head -1) - echo "::error file=$f,line=$l::Please, do not use ubuntu-latest images to run on, use LTS instead." + echo "::error file=$f,line=$l::Please use 'ubuntu-22.04' instead of 'ubuntu-latest'" done exit 1 fi diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 2c994b08ae69..6e90a80ab7ab 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -30,7 +30,6 @@ jobs: check-image: uses: ./.github/workflows/check-build-tools-image.yml - # This job uses older version of GitHub Actions because it's run on gen2 runners, which don't support node 20 (for newer versions) build-image: needs: [ check-image ] if: needs.check-image.outputs.found == 'false' diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index bd2996ec4c89..e9adf28b9998 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -299,21 +299,21 @@ jobs: uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' @@ -1023,6 +1023,18 @@ jobs: with: fetch-depth: 0 + # Use custom DOCKER_CONFIG directory to avoid conflicts with default settings + # The default value is ~/.docker + - name: Set custom docker config directory + run: | + mkdir -p .docker-custom + echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV + + - uses: docker/login-action@v3 + with: + username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} + password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} + # `neondatabase/neon` contains multiple binaries, all of them use the same input for the version into the same version formatting library. # Pick pageserver as currently the only binary with extra "version" features printed in the string to verify. # Regular pageserver version string looks like @@ -1057,6 +1069,11 @@ jobs: docker compose -f ./docker-compose/docker-compose.yml logs || 0 docker compose -f ./docker-compose/docker-compose.yml down + - name: Remove custom docker config directory + if: always() + run: | + rm -rf .docker-custom + promote-images: needs: [ check-permissions, tag, test-images, vm-compute-node-image ] runs-on: ubuntu-22.04 @@ -1070,7 +1087,8 @@ jobs: username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - - uses: docker/login-action@v3 + - name: Login to dev ECR + uses: docker/login-action@v3 with: registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com username: ${{ secrets.AWS_ACCESS_KEY_DEV }} @@ -1104,6 +1122,22 @@ jobs: docker buildx imagetools create -t neondatabase/neon-test-extensions-v16:latest \ neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }} + - name: Login to prod ECR + uses: docker/login-action@v3 + if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' + with: + registry: 093970136003.dkr.ecr.eu-central-1.amazonaws.com + username: ${{ secrets.PROD_GHA_RUNNER_LIMITED_AWS_ACCESS_KEY_ID }} + password: ${{ secrets.PROD_GHA_RUNNER_LIMITED_AWS_SECRET_ACCESS_KEY }} + + - name: Copy all images to prod ECR + if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' + run: | + for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16}; do + docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \ + 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} + done + trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] runs-on: ubuntu-22.04 diff --git a/.github/workflows/check-build-tools-image.yml b/.github/workflows/check-build-tools-image.yml index 97116940a005..807a9ef3bd6a 100644 --- a/.github/workflows/check-build-tools-image.yml +++ b/.github/workflows/check-build-tools-image.yml @@ -25,26 +25,17 @@ jobs: found: ${{ steps.check-image.outputs.found }} steps: + - uses: actions/checkout@v4 + - name: Get build-tools image tag for the current commit id: get-build-tools-tag env: - # Usually, for COMMIT_SHA, we use `github.event.pull_request.head.sha || github.sha`, but here, even for PRs, - # we want to use `github.sha` i.e. point to a phantom merge commit to determine the image tag correctly. - COMMIT_SHA: ${{ github.sha }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IMAGE_TAG: | + ${{ hashFiles('Dockerfile.build-tools', + '.github/workflows/check-build-tools-image.yml', + '.github/workflows/build-build-tools-image.yml') }} run: | - LAST_BUILD_TOOLS_SHA=$( - gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - --method GET \ - --field path=Dockerfile.build-tools \ - --field sha=${COMMIT_SHA} \ - --field per_page=1 \ - --jq ".[0].sha" \ - "/repos/${GITHUB_REPOSITORY}/commits" - ) - echo "image-tag=${LAST_BUILD_TOOLS_SHA}" | tee -a $GITHUB_OUTPUT + echo "image-tag=${IMAGE_TAG}" | tee -a $GITHUB_OUTPUT - name: Check if such tag found in the registry id: check-image diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 90a3aaaf2ddf..56ef6f4bbb95 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -52,13 +52,15 @@ jobs: env: GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} run: | + TITLE="Storage & Compute release ${RELEASE_DATE}" + cat << EOF > body.md - ## Storage & Compute release ${RELEASE_DATE} + ## ${TITLE} **Please merge this Pull Request using 'Create a merge commit' button** EOF - gh pr create --title "Release ${RELEASE_DATE}" \ + gh pr create --title "${TITLE}" \ --body-file "body.md" \ --head "${RELEASE_BRANCH}" \ --base "release" @@ -91,13 +93,15 @@ jobs: env: GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} run: | + TITLE="Proxy release ${RELEASE_DATE}" + cat << EOF > body.md - ## Proxy release ${RELEASE_DATE} + ## ${TITLE} **Please merge this Pull Request using 'Create a merge commit' button** EOF - gh pr create --title "Proxy release ${RELEASE_DATE}" \ + gh pr create --title "${TITLE}" \ --body-file "body.md" \ --head "${RELEASE_BRANCH}" \ --base "release-proxy" diff --git a/Cargo.lock b/Cargo.lock index 1c8a8b0c0fff..77bf01240273 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1014,6 +1014,9 @@ name = "camino" version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] [[package]] name = "camino-tempfile" @@ -4647,6 +4650,7 @@ dependencies = [ "futures-util", "http-types", "humantime", + "humantime-serde", "hyper 0.14.26", "itertools", "metrics", @@ -5158,6 +5162,7 @@ dependencies = [ "tokio-io-timeout", "tokio-postgres", "tokio-stream", + "tokio-tar", "tokio-util", "toml_edit", "tracing", @@ -5753,6 +5758,7 @@ dependencies = [ "r2d2", "reqwest 0.12.4", "routerify", + "scopeguard", "serde", "serde_json", "strum", @@ -7365,6 +7371,7 @@ dependencies = [ "base64 0.21.1", "base64ct", "bytes", + "camino", "cc", "chrono", "clap", diff --git a/Dockerfile b/Dockerfile index 5f82df3e1811..b4900d4a94a1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,8 +69,6 @@ RUN set -e \ && apt install -y \ libreadline-dev \ libseccomp-dev \ - libicu67 \ - openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index e7c61ace0eac..5dd2c13c0e2b 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -112,6 +112,45 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && make install \ && rm -rf ../lcov.tar.gz +# Compile and install the static OpenSSL library +ENV OPENSSL_VERSION=3.2.2 +ENV OPENSSL_PREFIX=/usr/local/openssl +RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ + echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ + cd /tmp && \ + tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + cd /tmp/openssl-${OPENSSL_VERSION} && \ + ./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \ + make -j "$(nproc)" && \ + make install && \ + cd /tmp && \ + rm -rf /tmp/openssl-${OPENSSL_VERSION} + +# Use the same version of libicu as the compute nodes so that +# clusters created using inidb on pageserver can be used by computes. +# +# TODO: at this time, Dockerfile.compute-node uses the debian bullseye libicu +# package, which is 67.1. We're duplicating that knowledge here, and also, technically, +# Debian has a few patches on top of 67.1 that we're not adding here. +ENV ICU_VERSION=67.1 +ENV ICU_PREFIX=/usr/local/icu + +# Download and build static ICU +RUN wget -O /tmp/libicu-${ICU_VERSION}.tgz https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \ + echo "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc /tmp/libicu-${ICU_VERSION}.tgz" | sha256sum --check && \ + mkdir /tmp/icu && \ + pushd /tmp/icu && \ + tar -xzf /tmp/libicu-${ICU_VERSION}.tgz && \ + pushd icu/source && \ + ./configure --prefix=${ICU_PREFIX} --enable-static --enable-shared=no CXXFLAGS="-fPIC" CFLAGS="-fPIC" && \ + make -j "$(nproc)" && \ + make install && \ + popd && \ + rm -rf icu && \ + rm -f /tmp/libicu-${ICU_VERSION}.tgz && \ + popd + # Switch to nonroot user USER nonroot:nonroot WORKDIR /home/nonroot @@ -170,3 +209,6 @@ RUN whoami \ && rustup --version --verbose \ && rustc --version --verbose \ && clang --version + +# Set following flag to check in Makefile if its running in Docker +RUN touch /home/nonroot/.docker_build diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 3a73ac71b06d..7ab685625a8b 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -467,31 +467,6 @@ RUN case "${PG_VERSION}" in \ make install -j $(getconf _NPROCESSORS_ONLN) && \ echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control -######################################################################################### -# -# Layer "kq-imcx-pg-build" -# compile kq_imcx extension -# -######################################################################################### -FROM build-deps AS kq-imcx-pg-build -COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ - -ENV PATH "/usr/local/pgsql/bin/:$PATH" -RUN apt-get update && \ - apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \ - wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \ - echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \ - mkdir kq_imcx-src && cd kq_imcx-src && tar xzf ../kq_imcx.tar.gz --strip-components=1 -C . && \ - find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\ - mkdir build && cd build && \ - cmake -DCMAKE_BUILD_TYPE=Release .. && \ - make -j $(getconf _NPROCESSORS_ONLN) && \ - make -j $(getconf _NPROCESSORS_ONLN) install && \ - echo 'trusted = true' >> /usr/local/pgsql/share/extension/kq_imcx.control && \ - find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /after.txt &&\ - mkdir -p /extensions/kq_imcx && cp /usr/local/pgsql/share/extension/kq_imcx.control /extensions/kq_imcx && \ - sort -o /before.txt /before.txt && sort -o /after.txt /after.txt && \ - comm -13 /before.txt /after.txt | tar --directory=/usr/local/pgsql --zstd -cf /extensions/kq_imcx.tar.zst -T - ######################################################################################### # @@ -840,7 +815,6 @@ COPY --from=hll-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=plpgsql-check-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=timescaledb-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-hint-plan-pg-build /usr/local/pgsql/ /usr/local/pgsql/ -COPY --from=kq-imcx-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-cron-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=pg-pgx-ulid-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=rdkit-pg-build /usr/local/pgsql/ /usr/local/pgsql/ @@ -961,7 +935,6 @@ COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src #COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src COPY patches/pg_hintplan.patch /ext-src -#COPY --from=kq-imcx-pg-build /kq_imcx.tar.gz /ext-src COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src COPY patches/pg_cron.patch /ext-src #COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src diff --git a/Makefile b/Makefile index 37bd19ba4440..942867d81a59 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package managers POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +OPENSSL_PREFIX_DIR := /usr/local/openssl +ICU_PREFIX_DIR := /usr/local/icu + # # We differentiate between release / debug build types using the BUILD_TYPE # environment variable. @@ -20,6 +23,16 @@ else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif +ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes) + # Exclude static build openssl, icu for local build (MacOS, Linux) + # Only keep for build type release and debug + PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include + PG_CONFIGURE_OPTS += --with-icu + PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION' + PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm' + PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread' +endif + UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) # Seccomp BPF is only available for Linux @@ -28,7 +41,7 @@ else ifeq ($(UNAME_S),Darwin) ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable - OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) + OPENSSL_PREFIX := $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 40060f411764..a79b666409ae 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -918,38 +918,39 @@ impl ComputeNode { // temporarily reset max_cluster_size in config // to avoid the possibility of hitting the limit, while we are reconfiguring: // creating new extensions, roles, etc... - config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?; - self.pg_reload_conf()?; - - let mut client = Client::connect(self.connstr.as_str(), NoTls)?; + config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || { + self.pg_reload_conf()?; + + let mut client = Client::connect(self.connstr.as_str(), NoTls)?; + + // Proceed with post-startup configuration. Note, that order of operations is important. + // Disable DDL forwarding because control plane already knows about these roles/databases. + if spec.mode == ComputeMode::Primary { + client.simple_query("SET neon.forward_ddl = false")?; + cleanup_instance(&mut client)?; + handle_roles(&spec, &mut client)?; + handle_databases(&spec, &mut client)?; + handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?; + handle_grants( + &spec, + &mut client, + self.connstr.as_str(), + self.has_feature(ComputeFeature::AnonExtension), + )?; + handle_extensions(&spec, &mut client)?; + handle_extension_neon(&mut client)?; + // We can skip handle_migrations here because a new migration can only appear + // if we have a new version of the compute_ctl binary, which can only happen + // if compute got restarted, in which case we'll end up inside of apply_config + // instead of reconfigure. + } - // Proceed with post-startup configuration. Note, that order of operations is important. - // Disable DDL forwarding because control plane already knows about these roles/databases. - if spec.mode == ComputeMode::Primary { - client.simple_query("SET neon.forward_ddl = false")?; - cleanup_instance(&mut client)?; - handle_roles(&spec, &mut client)?; - handle_databases(&spec, &mut client)?; - handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?; - handle_grants( - &spec, - &mut client, - self.connstr.as_str(), - self.has_feature(ComputeFeature::AnonExtension), - )?; - handle_extensions(&spec, &mut client)?; - handle_extension_neon(&mut client)?; - // We can skip handle_migrations here because a new migration can only appear - // if we have a new version of the compute_ctl binary, which can only happen - // if compute got restarted, in which case we'll end up inside of apply_config - // instead of reconfigure. - } + // 'Close' connection + drop(client); - // 'Close' connection - drop(client); + Ok(()) + })?; - // reset max_cluster_size in config back to original value and reload config - config::compute_ctl_temp_override_remove(pgdata_path)?; self.pg_reload_conf()?; let unknown_op = "unknown".to_string(); @@ -1040,12 +1041,17 @@ impl ComputeNode { // temporarily reset max_cluster_size in config // to avoid the possibility of hitting the limit, while we are applying config: // creating new extensions, roles, etc... - config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?; - self.pg_reload_conf()?; + config::with_compute_ctl_tmp_override( + pgdata_path, + "neon.max_cluster_size=-1", + || { + self.pg_reload_conf()?; - self.apply_config(&compute_state)?; + self.apply_config(&compute_state)?; - config::compute_ctl_temp_override_remove(pgdata_path)?; + Ok(()) + }, + )?; self.pg_reload_conf()?; } self.post_apply_config()?; diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 89c866b20c09..2c4aec4116ae 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -131,18 +131,17 @@ pub fn write_postgres_conf( Ok(()) } -/// create file compute_ctl_temp_override.conf in pgdata_dir -/// add provided options to this file -pub fn compute_ctl_temp_override_create(pgdata_path: &Path, options: &str) -> Result<()> { +pub fn with_compute_ctl_tmp_override(pgdata_path: &Path, options: &str, exec: F) -> Result<()> +where + F: FnOnce() -> Result<()>, +{ let path = pgdata_path.join("compute_ctl_temp_override.conf"); let mut file = File::create(path)?; write!(file, "{}", options)?; - Ok(()) -} -/// remove file compute_ctl_temp_override.conf in pgdata_dir -pub fn compute_ctl_temp_override_remove(pgdata_path: &Path) -> Result<()> { - let path = pgdata_path.join("compute_ctl_temp_override.conf"); - std::fs::remove_file(path)?; - Ok(()) + let res = exec(); + + file.set_len(0)?; + + res } diff --git a/compute_tools/src/http/api.rs b/compute_tools/src/http/api.rs index 0286429cf24f..43d29402bcfd 100644 --- a/compute_tools/src/http/api.rs +++ b/compute_tools/src/http/api.rs @@ -17,7 +17,7 @@ use hyper::header::CONTENT_TYPE; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use tokio::task; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use tracing_utils::http::OtelName; use utils::http::request::must_get_query_param; @@ -48,7 +48,7 @@ async fn routes(req: Request, compute: &Arc) -> Response { - info!("serving /status GET request"); + debug!("serving /status GET request"); let state = compute.state.lock().unwrap(); let status_response = status_response_from_state(&state); Response::new(Body::from(serde_json::to_string(&status_response).unwrap())) diff --git a/control_plane/src/background_process.rs b/control_plane/src/background_process.rs index 94666f28706c..a272c306e7fb 100644 --- a/control_plane/src/background_process.rs +++ b/control_plane/src/background_process.rs @@ -36,11 +36,11 @@ use utils::pid_file::{self, PidFileRead}; // it's waiting. If the process hasn't started/stopped after 5 seconds, // it prints a notice that it's taking long, but keeps waiting. // -const RETRY_UNTIL_SECS: u64 = 10; -const RETRIES: u64 = (RETRY_UNTIL_SECS * 1000) / RETRY_INTERVAL_MILLIS; -const RETRY_INTERVAL_MILLIS: u64 = 100; -const DOT_EVERY_RETRIES: u64 = 10; -const NOTICE_AFTER_RETRIES: u64 = 50; +const STOP_RETRY_TIMEOUT: Duration = Duration::from_secs(10); +const STOP_RETRIES: u128 = STOP_RETRY_TIMEOUT.as_millis() / RETRY_INTERVAL.as_millis(); +const RETRY_INTERVAL: Duration = Duration::from_millis(100); +const DOT_EVERY_RETRIES: u128 = 10; +const NOTICE_AFTER_RETRIES: u128 = 50; /// Argument to `start_process`, to indicate whether it should create pidfile or if the process creates /// it itself. @@ -52,6 +52,7 @@ pub enum InitialPidFile { } /// Start a background child process using the parameters given. +#[allow(clippy::too_many_arguments)] pub async fn start_process( process_name: &str, datadir: &Path, @@ -59,6 +60,7 @@ pub async fn start_process( args: AI, envs: EI, initial_pid_file: InitialPidFile, + retry_timeout: &Duration, process_status_check: F, ) -> anyhow::Result<()> where @@ -69,6 +71,10 @@ where // Not generic AsRef, otherwise empty `envs` prevents type inference EI: IntoIterator, { + let retries: u128 = retry_timeout.as_millis() / RETRY_INTERVAL.as_millis(); + if !datadir.metadata().context("stat datadir")?.is_dir() { + anyhow::bail!("`datadir` must be a directory when calling this function: {datadir:?}"); + } let log_path = datadir.join(format!("{process_name}.log")); let process_log_file = fs::OpenOptions::new() .create(true) @@ -85,7 +91,13 @@ where let background_command = command .stdout(process_log_file) .stderr(same_file_for_stderr) - .args(args); + .args(args) + // spawn all child processes in their datadir, useful for all kinds of things, + // not least cleaning up child processes e.g. after an unclean exit from the test suite: + // ``` + // lsof -d cwd -a +D Users/cs/src/neon/test_output + // ``` + .current_dir(datadir); let filled_cmd = fill_env_vars_prefixed_neon(fill_remote_storage_secrets_vars( fill_rust_env_vars(background_command), @@ -121,7 +133,7 @@ where .unwrap(); }); - for retries in 0..RETRIES { + for retries in 0..retries { match process_started(pid, pid_file_to_check, &process_status_check).await { Ok(true) => { println!("\n{process_name} started and passed status check, pid: {pid}"); @@ -139,7 +151,7 @@ where print!("."); io::stdout().flush().unwrap(); } - thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS)); + thread::sleep(RETRY_INTERVAL); } Err(e) => { println!("error starting process {process_name:?}: {e:#}"); @@ -148,9 +160,10 @@ where } } println!(); - anyhow::bail!( - "{process_name} did not start+pass status checks within {RETRY_UNTIL_SECS} seconds" - ); + anyhow::bail!(format!( + "{} did not start+pass status checks within {:?} seconds", + process_name, retry_timeout + )); } /// Stops the process, using the pid file given. Returns Ok also if the process is already not running. @@ -206,7 +219,7 @@ pub fn stop_process( } pub fn wait_until_stopped(process_name: &str, pid: Pid) -> anyhow::Result<()> { - for retries in 0..RETRIES { + for retries in 0..STOP_RETRIES { match process_has_stopped(pid) { Ok(true) => { println!("\n{process_name} stopped"); @@ -222,7 +235,7 @@ pub fn wait_until_stopped(process_name: &str, pid: Pid) -> anyhow::Result<()> { print!("."); io::stdout().flush().unwrap(); } - thread::sleep(Duration::from_millis(RETRY_INTERVAL_MILLIS)); + thread::sleep(RETRY_INTERVAL); } Err(e) => { println!("{process_name} with pid {pid} failed to stop: {e:#}"); @@ -231,7 +244,10 @@ pub fn wait_until_stopped(process_name: &str, pid: Pid) -> anyhow::Result<()> { } } println!(); - anyhow::bail!("{process_name} with pid {pid} did not stop in {RETRY_UNTIL_SECS} seconds"); + anyhow::bail!(format!( + "{} with pid {} did not stop in {:?} seconds", + process_name, pid, STOP_RETRY_TIMEOUT + )); } fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command { diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 18e395e2b5a5..3f656932d5d3 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -36,6 +36,7 @@ use std::collections::{BTreeSet, HashMap}; use std::path::PathBuf; use std::process::exit; use std::str::FromStr; +use std::time::Duration; use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR; use url::Host; use utils::{ @@ -87,7 +88,8 @@ fn main() -> Result<()> { handle_init(sub_args).map(Some) } else { // all other commands need an existing config - let mut env = LocalEnv::load_config().context("Error loading config")?; + let mut env = + LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?; let original_env = env.clone(); let rt = tokio::runtime::Builder::new_current_thread() @@ -98,7 +100,7 @@ fn main() -> Result<()> { let subcommand_result = match sub_name { "tenant" => rt.block_on(handle_tenant(sub_args, &mut env)), "timeline" => rt.block_on(handle_timeline(sub_args, &mut env)), - "start" => rt.block_on(handle_start_all(&env)), + "start" => rt.block_on(handle_start_all(&env, get_start_timeout(sub_args))), "stop" => rt.block_on(handle_stop_all(sub_args, &env)), "pageserver" => rt.block_on(handle_pageserver(sub_args, &env)), "storage_controller" => rt.block_on(handle_storage_controller(sub_args, &env)), @@ -364,7 +366,8 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result { LocalEnv::init(init_conf, force) .context("materialize initial neon_local environment on disk")?; - Ok(LocalEnv::load_config().expect("freshly written config should be loadable")) + Ok(LocalEnv::load_config(&local_env::base_path()) + .expect("freshly written config should be loadable")) } /// The default pageserver is the one where CLI tenant/timeline operations are sent by default. @@ -1046,10 +1049,20 @@ fn get_pageserver(env: &local_env::LocalEnv, args: &ArgMatches) -> Result &Duration { + let humantime_duration = args + .get_one::("start-timeout") + .expect("invalid value for start-timeout"); + humantime_duration.as_ref() +} + async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { match sub_match.subcommand() { Some(("start", subcommand_args)) => { - if let Err(e) = get_pageserver(env, subcommand_args)?.start().await { + if let Err(e) = get_pageserver(env, subcommand_args)? + .start(get_start_timeout(subcommand_args)) + .await + { eprintln!("pageserver start failed: {e}"); exit(1); } @@ -1075,7 +1088,7 @@ async fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> exit(1); } - if let Err(e) = pageserver.start().await { + if let Err(e) = pageserver.start(get_start_timeout(sub_match)).await { eprintln!("pageserver start failed: {e}"); exit(1); } @@ -1103,8 +1116,8 @@ async fn handle_storage_controller( ) -> Result<()> { let svc = StorageController::from_env(env); match sub_match.subcommand() { - Some(("start", _start_match)) => { - if let Err(e) = svc.start().await { + Some(("start", start_match)) => { + if let Err(e) = svc.start(get_start_timeout(start_match)).await { eprintln!("start failed: {e}"); exit(1); } @@ -1163,7 +1176,10 @@ async fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> "start" => { let extra_opts = safekeeper_extra_opts(sub_args); - if let Err(e) = safekeeper.start(extra_opts).await { + if let Err(e) = safekeeper + .start(extra_opts, get_start_timeout(sub_args)) + .await + { eprintln!("safekeeper start failed: {}", e); exit(1); } @@ -1189,7 +1205,10 @@ async fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> } let extra_opts = safekeeper_extra_opts(sub_args); - if let Err(e) = safekeeper.start(extra_opts).await { + if let Err(e) = safekeeper + .start(extra_opts, get_start_timeout(sub_args)) + .await + { eprintln!("safekeeper start failed: {}", e); exit(1); } @@ -1202,15 +1221,18 @@ async fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Ok(()) } -async fn handle_start_all(env: &local_env::LocalEnv) -> anyhow::Result<()> { +async fn handle_start_all( + env: &local_env::LocalEnv, + retry_timeout: &Duration, +) -> anyhow::Result<()> { // Endpoints are not started automatically - broker::start_broker_process(env).await?; + broker::start_broker_process(env, retry_timeout).await?; // Only start the storage controller if the pageserver is configured to need it if env.control_plane_api.is_some() { let storage_controller = StorageController::from_env(env); - if let Err(e) = storage_controller.start().await { + if let Err(e) = storage_controller.start(retry_timeout).await { eprintln!("storage_controller start failed: {:#}", e); try_stop_all(env, true).await; exit(1); @@ -1219,7 +1241,7 @@ async fn handle_start_all(env: &local_env::LocalEnv) -> anyhow::Result<()> { for ps_conf in &env.pageservers { let pageserver = PageServerNode::from_env(env, ps_conf); - if let Err(e) = pageserver.start().await { + if let Err(e) = pageserver.start(retry_timeout).await { eprintln!("pageserver {} start failed: {:#}", ps_conf.id, e); try_stop_all(env, true).await; exit(1); @@ -1228,7 +1250,7 @@ async fn handle_start_all(env: &local_env::LocalEnv) -> anyhow::Result<()> { for node in env.safekeepers.iter() { let safekeeper = SafekeeperNode::from_env(env, node); - if let Err(e) = safekeeper.start(vec![]).await { + if let Err(e) = safekeeper.start(vec![], retry_timeout).await { eprintln!("safekeeper {} start failed: {:#}", safekeeper.id, e); try_stop_all(env, false).await; exit(1); @@ -1288,6 +1310,15 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) { } fn cli() -> Command { + let timeout_arg = Arg::new("start-timeout") + .long("start-timeout") + .short('t') + .global(true) + .help("timeout until we fail the command, e.g. 30s") + .value_parser(value_parser!(humantime::Duration)) + .default_value("10s") + .required(false); + let branch_name_arg = Arg::new("branch-name") .long("branch-name") .help("Name of the branch to be created or used as an alias for other services") @@ -1507,6 +1538,7 @@ fn cli() -> Command { .subcommand(Command::new("status")) .subcommand(Command::new("start") .about("Start local pageserver") + .arg(timeout_arg.clone()) ) .subcommand(Command::new("stop") .about("Stop local pageserver") @@ -1514,13 +1546,15 @@ fn cli() -> Command { ) .subcommand(Command::new("restart") .about("Restart local pageserver") + .arg(timeout_arg.clone()) ) ) .subcommand( Command::new("storage_controller") .arg_required_else_help(true) .about("Manage storage_controller") - .subcommand(Command::new("start").about("Start storage controller")) + .subcommand(Command::new("start").about("Start storage controller") + .arg(timeout_arg.clone())) .subcommand(Command::new("stop").about("Stop storage controller") .arg(stop_mode_arg.clone())) ) @@ -1532,6 +1566,7 @@ fn cli() -> Command { .about("Start local safekeeper") .arg(safekeeper_id_arg.clone()) .arg(safekeeper_extra_opt_arg.clone()) + .arg(timeout_arg.clone()) ) .subcommand(Command::new("stop") .about("Stop local safekeeper") @@ -1543,6 +1578,7 @@ fn cli() -> Command { .arg(safekeeper_id_arg) .arg(stop_mode_arg.clone()) .arg(safekeeper_extra_opt_arg) + .arg(timeout_arg.clone()) ) ) .subcommand( @@ -1577,6 +1613,7 @@ fn cli() -> Command { .arg(remote_ext_config_args) .arg(create_test_user) .arg(allow_multiple.clone()) + .arg(timeout_arg.clone()) ) .subcommand(Command::new("reconfigure") .about("Reconfigure the endpoint") @@ -1628,6 +1665,7 @@ fn cli() -> Command { .subcommand( Command::new("start") .about("Start page server and safekeepers") + .arg(timeout_arg.clone()) ) .subcommand( Command::new("stop") diff --git a/control_plane/src/broker.rs b/control_plane/src/broker.rs index f40705863b16..c3cfc140da2f 100644 --- a/control_plane/src/broker.rs +++ b/control_plane/src/broker.rs @@ -5,13 +5,18 @@ //! ```text //! .neon/safekeepers/ //! ``` +use std::time::Duration; + use anyhow::Context; use camino::Utf8PathBuf; use crate::{background_process, local_env}; -pub async fn start_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<()> { +pub async fn start_broker_process( + env: &local_env::LocalEnv, + retry_timeout: &Duration, +) -> anyhow::Result<()> { let broker = &env.broker; let listen_addr = &broker.listen_addr; @@ -27,6 +32,7 @@ pub async fn start_broker_process(env: &local_env::LocalEnv) -> anyhow::Result<( args, [], background_process::InitialPidFile::Create(storage_broker_pid_file_path(env)), + retry_timeout, || async { let url = broker.client_url(); let status_url = url.join("status").with_context(|| { diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 0edcf1be4ee3..6634274d2a55 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -42,8 +42,8 @@ pub struct LocalEnv { // compute endpoints). // // This is not stored in the config file. Rather, this is the path where the - // config file itself is. It is read from the NEON_REPO_DIR env variable or - // '.neon' if not given. + // config file itself is. It is read from the NEON_REPO_DIR env variable which + // must be an absolute path. If the env var is not set, $PWD/.neon is used. pub base_data_dir: PathBuf, // Path to postgres distribution. It's expected that "bin", "include", @@ -431,9 +431,7 @@ impl LocalEnv { } /// Construct `Self` from on-disk state. - pub fn load_config() -> anyhow::Result { - let repopath = base_path(); - + pub fn load_config(repopath: &Path) -> anyhow::Result { if !repopath.exists() { bail!( "Neon config is not found in {}. You need to run 'neon_local init' first", @@ -461,7 +459,7 @@ impl LocalEnv { branch_name_mappings, } = on_disk_config; LocalEnv { - base_data_dir: repopath.clone(), + base_data_dir: repopath.to_owned(), pg_distrib_dir, neon_distrib_dir, default_tenant_id, @@ -482,7 +480,7 @@ impl LocalEnv { "we ensure this during deserialization" ); env.pageservers = { - let iter = std::fs::read_dir(&repopath).context("open dir")?; + let iter = std::fs::read_dir(repopath).context("open dir")?; let mut pageservers = Vec::new(); for res in iter { let dentry = res?; @@ -719,10 +717,25 @@ impl LocalEnv { } pub fn base_path() -> PathBuf { - match std::env::var_os("NEON_REPO_DIR") { - Some(val) => PathBuf::from(val), - None => PathBuf::from(".neon"), - } + let path = match std::env::var_os("NEON_REPO_DIR") { + Some(val) => { + let path = PathBuf::from(val); + if !path.is_absolute() { + // repeat the env var in the error because our default is always absolute + panic!("NEON_REPO_DIR must be an absolute path, got {path:?}"); + } + path + } + None => { + let pwd = std::env::current_dir() + // technically this can fail but it's quite unlikeley + .expect("determine current directory"); + let pwd_abs = pwd.canonicalize().expect("canonicalize current directory"); + pwd_abs.join(".neon") + } + }; + assert!(path.is_absolute()); + path } /// Generate a public/private key pair for JWT authentication diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index 5a8476369769..da4b98784915 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -158,8 +158,8 @@ impl PageServerNode { .expect("non-Unicode path") } - pub async fn start(&self) -> anyhow::Result<()> { - self.start_node().await + pub async fn start(&self, retry_timeout: &Duration) -> anyhow::Result<()> { + self.start_node(retry_timeout).await } fn pageserver_init(&self, conf: NeonLocalInitPageserverConf) -> anyhow::Result<()> { @@ -214,14 +214,15 @@ impl PageServerNode { Ok(()) } - async fn start_node(&self) -> anyhow::Result<()> { + async fn start_node(&self, retry_timeout: &Duration) -> anyhow::Result<()> { // TODO: using a thread here because start_process() is not async but we need to call check_status() let datadir = self.repo_path(); print!( - "Starting pageserver node {} at '{}' in {:?}", + "Starting pageserver node {} at '{}' in {:?}, retrying for {:?}", self.conf.id, self.pg_connection_config.raw_address(), - datadir + datadir, + retry_timeout ); io::stdout().flush().context("flush stdout")?; @@ -239,6 +240,7 @@ impl PageServerNode { args, self.pageserver_env_variables()?, background_process::InitialPidFile::Expect(self.pid_file()), + retry_timeout, || async { let st = self.check_status().await; match st { @@ -383,6 +385,10 @@ impl PageServerNode { .map(|x| x.parse::()) .transpose() .context("Failed to parse 'switch_aux_file_policy'")?, + lsn_lease_length: settings.remove("lsn_lease_length").map(|x| x.to_string()), + lsn_lease_length_for_ts: settings + .remove("lsn_lease_length_for_ts") + .map(|x| x.to_string()), }; if !settings.is_empty() { bail!("Unrecognized tenant settings: {settings:?}") @@ -506,6 +512,10 @@ impl PageServerNode { .map(|x| x.parse::()) .transpose() .context("Failed to parse 'switch_aux_file_policy'")?, + lsn_lease_length: settings.remove("lsn_lease_length").map(|x| x.to_string()), + lsn_lease_length_for_ts: settings + .remove("lsn_lease_length_for_ts") + .map(|x| x.to_string()), } }; diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index d62a2e80b529..a0a73f56091d 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -7,6 +7,7 @@ //! ``` use std::io::Write; use std::path::PathBuf; +use std::time::Duration; use std::{io, result}; use anyhow::Context; @@ -14,6 +15,7 @@ use camino::Utf8PathBuf; use postgres_connection::PgConnectionConfig; use reqwest::{IntoUrl, Method}; use thiserror::Error; +use utils::auth::{Claims, Scope}; use utils::{http::error::HttpErrorBody, id::NodeId}; use crate::{ @@ -110,11 +112,16 @@ impl SafekeeperNode { .expect("non-Unicode path") } - pub async fn start(&self, extra_opts: Vec) -> anyhow::Result<()> { + pub async fn start( + &self, + extra_opts: Vec, + retry_timeout: &Duration, + ) -> anyhow::Result<()> { print!( - "Starting safekeeper at '{}' in '{}'", + "Starting safekeeper at '{}' in '{}', retrying for {:?}", self.pg_connection_config.raw_address(), - self.datadir_path().display() + self.datadir_path().display(), + retry_timeout, ); io::stdout().flush().unwrap(); @@ -197,8 +204,9 @@ impl SafekeeperNode { &datadir, &self.env.safekeeper_bin(), &args, - [], + self.safekeeper_env_variables()?, background_process::InitialPidFile::Expect(self.pid_file()), + retry_timeout, || async { match self.check_status().await { Ok(()) => Ok(true), @@ -210,6 +218,18 @@ impl SafekeeperNode { .await } + fn safekeeper_env_variables(&self) -> anyhow::Result> { + // Generate a token to connect from safekeeper to peers + if self.conf.auth_enabled { + let token = self + .env + .generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?; + Ok(vec![("SAFEKEEPER_AUTH_TOKEN".to_owned(), token)]) + } else { + Ok(Vec::new()) + } + } + /// /// Stop the server. /// diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index b6b7ea7762cb..1c56d5f80fe4 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -18,7 +18,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt; use postgres_backend::AuthType; use reqwest::Method; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{fs, str::FromStr}; +use std::{fs, str::FromStr, time::Duration}; use tokio::process::Command; use tracing::instrument; use url::Url; @@ -46,6 +46,7 @@ const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; pub struct AttachHookRequest { pub tenant_shard_id: TenantShardId, pub node_id: Option, + pub generation_override: Option, } #[derive(Serialize, Deserialize)] @@ -223,7 +224,7 @@ impl StorageController { Ok(database_url) } - pub async fn start(&self) -> anyhow::Result<()> { + pub async fn start(&self, retry_timeout: &Duration) -> anyhow::Result<()> { // Start a vanilla Postgres process used by the storage controller for persistence. let pg_data_path = Utf8PathBuf::from_path_buf(self.env.base_data_dir.clone()) .unwrap() @@ -271,6 +272,7 @@ impl StorageController { db_start_args, [], background_process::InitialPidFile::Create(self.postgres_pid_file()), + retry_timeout, || self.pg_isready(&pg_bin_dir), ) .await?; @@ -313,16 +315,19 @@ impl StorageController { args.push(format!("--split-threshold={split_threshold}")) } + args.push(format!( + "--neon-local-repo-dir={}", + self.env.base_data_dir.display() + )); + background_process::start_process( COMMAND, &self.env.base_data_dir, &self.env.storage_controller_bin(), args, - [( - "NEON_REPO_DIR".to_string(), - self.env.base_data_dir.to_string_lossy().to_string(), - )], + [], background_process::InitialPidFile::Create(self.pid_file()), + retry_timeout, || async { match self.ready().await { Ok(_) => Ok(true), @@ -440,6 +445,7 @@ impl StorageController { let request = AttachHookRequest { tenant_shard_id, node_id: Some(pageserver_id), + generation_override: None, }; let response = self diff --git a/docs/core_changes.md b/docs/core_changes.md index ea219adae97c..1388317728f8 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -11,15 +11,28 @@ page server. We currently use the same binary for both, with --wal-redo runtime the WAL redo mode. Some PostgreSQL changes are needed in the compute node, while others are just for the WAL redo process. -In addition to core PostgreSQL changes, there is a Neon extension in contrib/neon, to hook into the -smgr interface. Once all the core changes have been submitted to upstream or eliminated some other -way, the extension could live outside the postgres repository and build against vanilla PostgreSQL. +In addition to core PostgreSQL changes, there is a Neon extension in the pgxn/neon directory that +hooks into the smgr interface, and rmgr extension in pgxn/neon_rmgr. The extensions are loaded into +the Postgres processes with shared_preload_libraries. Most of the Neon-specific code is in the +extensions, and for any new features, that is preferred over modifying core PostgreSQL code. Below is a list of all the PostgreSQL source code changes, categorized into changes needed for compute, and changes needed for the WAL redo process: # Changes for Compute node +## Prefetching + +There are changes in many places to perform prefetching, for example for sequential scans. Neon +doesn't benefit from OS readahead, and the latency to pageservers is quite high compared to local +disk, so prefetching is critical for performance, also for sequential scans. + +### How to get rid of the patch + +Upcoming "streaming read" work in v17 might simplify this. And async I/O work in v18 will hopefully +do more. + + ## Add t_cid to heap WAL records ``` @@ -37,54 +50,11 @@ The problem is that the XLOG_HEAP_INSERT record does not include the command id Bite the bullet and submit the patch to PostgreSQL, to add the t_cid to the WAL records. It makes the WAL records larger, which could make this unpopular in the PostgreSQL community. However, it might simplify some logical decoding code; Andres Freund briefly mentioned in PGCon 2022 discussion on Heikki's Neon presentation that logical decoding currently needs to jump through some hoops to reconstruct the same information. +Update from Heikki (2024-04-17): I tried to write an upstream patch for that, to use the t_cid field for logical decoding, but it was not as straightforward as it first sounded. ### Alternatives Perhaps we could write an extra WAL record with the t_cid information, when a page is evicted that contains rows that were touched a transaction that's still running. However, that seems very complicated. -## ginfast.c - -``` -diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c -index e0d9940946..2d964c02e9 100644 ---- a/src/backend/access/gin/ginfast.c -+++ b/src/backend/access/gin/ginfast.c -@@ -285,6 +285,17 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) - memset(&sublist, 0, sizeof(GinMetaPageData)); - makeSublist(index, collector->tuples, collector->ntuples, &sublist); - -+ if (metadata->head != InvalidBlockNumber) -+ { -+ /* -+ * ZENITH: Get buffer before XLogBeginInsert() to avoid recursive call -+ * of XLogBeginInsert(). Reading a new buffer might evict a dirty page from -+ * the buffer cache, and if that page happens to be an FSM or VM page, zenith_write() -+ * will try to WAL-log an image of the page. -+ */ -+ buffer = ReadBuffer(index, metadata->tail); -+ } -+ - if (needWal) - XLogBeginInsert(); - -@@ -316,7 +327,6 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) - data.prevTail = metadata->tail; - data.newRightlink = sublist.head; - -- buffer = ReadBuffer(index, metadata->tail); - LockBuffer(buffer, GIN_EXCLUSIVE); - page = BufferGetPage(buffer); -``` - -The problem is explained in the comment above - -### How to get rid of the patch - -Can we stop WAL-logging FSM or VM pages? Or delay the WAL logging until we're out of the critical -section or something. - -Maybe some bigger rewrite of FSM and VM would help to avoid WAL-logging FSM and VM page images? - - ## Mark index builds that use buffer manager without logging explicitly ``` @@ -95,6 +65,8 @@ Maybe some bigger rewrite of FSM and VM would help to avoid WAL-logging FSM and also some changes in src/backend/storage/smgr/smgr.c ``` +pgvector 0.6.0 also needs a similar change, which would be very nice to get rid of too. + When a GIN index is built, for example, it is built by inserting the entries into the index more or less normally, but without WAL-logging anything. After the index has been built, we iterate through all pages and write them to the WAL. That doesn't work for Neon, because if a page is not WAL-logged @@ -109,6 +81,10 @@ an operation: `smgr_start_unlogged_build`, `smgr_finish_unlogged_build_phase_1` I think it would make sense to be more explicit about that in PostgreSQL too. So extract these changes to a patch and post to pgsql-hackers. +Perhaps we could deduce that an unlogged index build has started when we see a page being evicted +with zero LSN. How to be sure it's an unlogged index build rather than a bug? Currently we have a +check for that and PANIC if we see page with zero LSN being evicted. And how do we detect when the +index build has finished? See https://github.com/neondatabase/neon/pull/7440 for an attempt at that. ## Track last-written page LSN @@ -140,57 +116,6 @@ The old method is still available, though. Wait until v15? -## Cache relation sizes - -The Neon extension contains a little cache for smgrnblocks() and smgrexists() calls, to avoid going -to the page server every time. It might be useful to cache those in PostgreSQL, maybe in the -relcache? (I think we do cache nblocks in relcache already, check why that's not good enough for -Neon) - - -## Use buffer manager when extending VM or FSM - -``` - src/backend/storage/freespace/freespace.c | 14 +- - src/backend/access/heap/visibilitymap.c | 15 +- - -diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c -index e198df65d8..addfe93eac 100644 ---- a/src/backend/access/heap/visibilitymap.c -+++ b/src/backend/access/heap/visibilitymap.c -@@ -652,10 +652,19 @@ vm_extend(Relation rel, BlockNumber vm_nblocks) - /* Now extend the file */ - while (vm_nblocks_now < vm_nblocks) - { -- PageSetChecksumInplace((Page) pg.data, vm_nblocks_now); -+ /* -+ * ZENITH: Initialize VM pages through buffer cache to prevent loading -+ * them from pageserver. -+ */ -+ Buffer buffer = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, P_NEW, -+ RBM_ZERO_AND_LOCK, NULL); -+ Page page = BufferGetPage(buffer); -+ -+ PageInit((Page) page, BLCKSZ, 0); -+ PageSetChecksumInplace(page, vm_nblocks_now); -+ MarkBufferDirty(buffer); -+ UnlockReleaseBuffer(buffer); - -- smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now, -- pg.data, false); - vm_nblocks_now++; - } -``` - -### Problem we're trying to solve - -??? - -### How to get rid of the patch - -Maybe this would be a reasonable change in PostgreSQL too? - - ## Allow startup without reading checkpoint record In Neon, the compute node is stateless. So when we are launching compute node, we need to provide @@ -231,7 +156,7 @@ index 0415df9ccb..9f9db3c8bc 100644 * crash we can lose (skip over) as many values as we pre-logged. */ -#define SEQ_LOG_VALS 32 -+/* Zenith XXX: to ensure sequence order of sequence in Zenith we need to WAL log each sequence update. */ ++/* Neon XXX: to ensure sequence order of sequence in Zenith we need to WAL log each sequence update. */ +/* #define SEQ_LOG_VALS 32 */ +#define SEQ_LOG_VALS 0 ``` @@ -250,66 +175,6 @@ would be weird if the sequence moved backwards though, think of PITR. Or add a GUC for the amount to prefix to PostgreSQL, and force it to 1 in Neon. -## Walproposer - -``` - src/Makefile | 1 + - src/backend/replication/libpqwalproposer/Makefile | 37 + - src/backend/replication/libpqwalproposer/libpqwalproposer.c | 416 ++++++++++++ - src/backend/postmaster/bgworker.c | 4 + - src/backend/postmaster/postmaster.c | 6 + - src/backend/replication/Makefile | 4 +- - src/backend/replication/walproposer.c | 2350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - src/backend/replication/walproposer_utils.c | 402 +++++++++++ - src/backend/replication/walreceiver.c | 7 + - src/backend/replication/walsender.c | 320 ++++++--- - src/backend/storage/ipc/ipci.c | 6 + - src/include/replication/walproposer.h | 565 ++++++++++++++++ -``` - -WAL proposer is communicating with safekeeper and ensures WAL durability by quorum writes. It is -currently implemented as patch to standard WAL sender. - -### How to get rid of the patch - -Refactor into an extension. Submit hooks or APIs into upstream if necessary. - -@MMeent did some work on this already: https://github.com/neondatabase/postgres/pull/96 - -## Ignore unexpected data beyond EOF in bufmgr.c - -``` -@@ -922,11 +928,14 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, - */ - bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr); - if (!PageIsNew((Page) bufBlock)) -- ereport(ERROR, -+ { -+ // XXX-ZENITH -+ MemSet((char *) bufBlock, 0, BLCKSZ); -+ ereport(DEBUG1, - (errmsg("unexpected data beyond EOF in block %u of relation %s", - blockNum, relpath(smgr->smgr_rnode, forkNum)), - errhint("This has been seen to occur with buggy kernels; consider updating your system."))); -- -+ } - /* - * We *must* do smgrextend before succeeding, else the page will not - * be reserved by the kernel, and the next P_NEW call will decide to -``` - -PostgreSQL is a bit sloppy with extending relations. Usually, the relation is extended with zeros -first, then the page is filled, and finally the new page WAL-logged. But if multiple backends extend -a relation at the same time, the pages can be WAL-logged in different order. - -I'm not sure what scenario exactly required this change in Neon, though. - -### How to get rid of the patch - -Submit patches to pgsql-hackers, to tighten up the WAL-logging around relation extension. It's a bit -confusing even in PostgreSQL. Maybe WAL log the intention to extend first, then extend the relation, -and finally WAL-log that the extension succeeded. - ## Make smgr interface available to extensions ``` @@ -321,6 +186,8 @@ and finally WAL-log that the extension succeeded. Submit to upstream. This could be useful for the Disk Encryption patches too, or for compression. +We have submitted this to upstream, but it's moving at glacial a speed. +https://commitfest.postgresql.org/47/4428/ ## Added relpersistence argument to smgropen() @@ -444,6 +311,148 @@ Ignore it. This is only needed for disaster recovery, so once we've eliminated a patches, we can just keep it around as a patch or as separate branch in a repo. +## pg_waldump flags to ignore errors + +After creating a new project or branch in Neon, the first timeline can begin in the middle of a WAL segment. pg_waldump chokes on that, so we added some flags to make it possible to ignore errors. + +### How to get rid of the patch + +Like previous one, ignore it. + + + +## Backpressure if pageserver doesn't ingest WAL fast enough + +``` +@@ -3200,6 +3202,7 @@ ProcessInterrupts(void) + return; + InterruptPending = false; + ++retry: + if (ProcDiePending) + { + ProcDiePending = false; +@@ -3447,6 +3450,13 @@ ProcessInterrupts(void) + + if (ParallelApplyMessagePending) + HandleParallelApplyMessages(); ++ ++ /* Call registered callback if any */ ++ if (ProcessInterruptsCallback) ++ { ++ if (ProcessInterruptsCallback()) ++ goto retry; ++ } + } +``` + + +### How to get rid of the patch + +Submit a patch to upstream, for a hook in ProcessInterrupts. Could be useful for other extensions +too. + + +## SLRU on-demand download + +``` + src/backend/access/transam/slru.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------- + 1 file changed, 92 insertions(+), 13 deletions(-) +``` + +### Problem we're trying to solve + +Previously, SLRU files were included in the basebackup, but the total size of them can be large, +several GB, and downloading them all made the startup time too long. + +### Alternatives + +FUSE hook or LD_PRELOAD trick to intercept the reads on SLRU files + + +## WAL-log an all-zeros page as one large hole + +- In XLogRecordAssemble() + +### Problem we're trying to solve + +This change was made in v16. Starting with v16, when PostgreSQL extends a relation, it first extends +it with zeros, and it can extend the relation more than one block at a time. The all-zeros page is WAL-ogged, but it's very wasteful to include 8 kB of zeros in the WAL for that. This hack was made so that we WAL logged a compact record with a whole-page "hole". However, PostgreSQL has assertions that prevent that such WAL records from being replayed, so this breaks compatibility such that unmodified PostreSQL cannot process Neon-generated WAL. + +### How to get rid of the patch + +Find another compact representation for a full-page image of an all-zeros page. A compressed image perhaps. + + +## Shut down walproposer after checkpointer + +``` ++ /* Neon: Also allow walproposer background worker to be treated like a WAL sender, so that it's shut down last */ ++ if ((bp->bkend_type == BACKEND_TYPE_NORMAL || bp->bkend_type == BACKEND_TYPE_BGWORKER) && +``` + +This changes was needed so that postmaster shuts down the walproposer process only after the shutdown checkpoint record is written. Otherwise, the shutdown record will never make it to the safekeepers. + +### How to get rid of the patch + +Do a bigger refactoring of the postmaster state machine, such that a background worker can specify +the shutdown ordering by itself. The postmaster state machine has grown pretty complicated, and +would benefit from a refactoring for the sake of readability anyway. + + +## EXPLAIN changes for prefetch and LFC + +### How to get rid of the patch + +Konstantin submitted a patch to -hackers already: https://commitfest.postgresql.org/47/4643/. Get that into a committable state. + + +## On-demand download of extensions + +### How to get rid of the patch + +FUSE or LD_PRELOAD trickery to intercept reads? + + +## Publication superuser checks + +We have hacked CreatePublication so that also neon_superuser can create them. + +### How to get rid of the patch + +Create an upstream patch with more fine-grained privileges for publications CREATE/DROP that can be GRANTed to users. + + +## WAL log replication slots + +### How to get rid of the patch + +Utilize the upcoming v17 "slot sync worker", or a similar neon-specific background worker process, to periodically WAL-log the slots, or to export them somewhere else. + + +## WAL-log replication snapshots + +### How to get rid of the patch + +WAL-log them periodically, from a backgound worker. + + +## WAL-log relmapper files + +Similarly to replications snapshot files, the CID mapping files generated during VACUUM FULL of a catalog table are WAL-logged + +### How to get rid of the patch + +WAL-log them periodically, from a backgound worker. + + +## XLogWaitForReplayOf() + +?? + + + + # Not currently committed but proposed ## Disable ring buffer buffer manager strategies @@ -472,23 +481,10 @@ hint bits are set. Wal logging hint bits updates requires FPI which significantl Add special WAL record for setting page hints. -## Prefetching - -### Why? - -As far as pages in Neon are loaded on demand, to reduce node startup time -and also speedup some massive queries we need some mechanism for bulk loading to -reduce page request round-trip overhead. - -Currently Postgres is supporting prefetching only for bitmap scan. -In Neon we should also use prefetch for sequential and index scans, because the OS is not doing it for us. -For sequential scan we could prefetch some number of following pages. For index scan we could prefetch pages -of heap relation addressed by TIDs. - ## Prewarming ### Why? -Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Zenith. +Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Neon. But overhead of request-response round-trip for loading pages on demand can make started node warm-up quite slow. We can capture state of compute node buffer cache and send bulk request for this pages at startup. diff --git a/docs/pageserver-pagecache.md b/docs/pageserver-pagecache.md index d9b120bbb907..d022742dff4f 100644 --- a/docs/pageserver-pagecache.md +++ b/docs/pageserver-pagecache.md @@ -5,4 +5,3 @@ TODO: - shared across tenants - store pages from layer files - store pages from "in-memory layer" -- store materialized pages diff --git a/docs/pageserver-services.md b/docs/pageserver-services.md index ba5d3c423e50..11d984eb0881 100644 --- a/docs/pageserver-services.md +++ b/docs/pageserver-services.md @@ -101,11 +101,12 @@ or ```toml [remote_storage] container_name = 'some-container-name' +storage_account = 'somestorageaccnt' container_region = 'us-east' prefix_in_container = '/test-prefix/' ``` -`AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_ACCESS_KEY` env variables can be used to specify the azure credentials if needed. +The `AZURE_STORAGE_ACCESS_KEY` env variable can be used to specify the azure credentials if needed. ## Repository background tasks diff --git a/docs/settings.md b/docs/settings.md index 817f97d8ba7a..12a6a4c171e9 100644 --- a/docs/settings.md +++ b/docs/settings.md @@ -134,7 +134,7 @@ depends on that, so if you change it, bad things will happen. #### page_cache_size -Size of the page cache, to hold materialized page versions. Unit is +Size of the page cache. Unit is number of 8 kB blocks. The default is 8192, which means 64 MB. #### max_file_descriptors diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index 1278f17ad2ce..a0d10dc665dc 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -209,6 +209,7 @@ pub enum NodeSchedulingPolicy { Active, Filling, Pause, + PauseForRestart, Draining, } @@ -220,6 +221,7 @@ impl FromStr for NodeSchedulingPolicy { "active" => Ok(Self::Active), "filling" => Ok(Self::Filling), "pause" => Ok(Self::Pause), + "pause_for_restart" => Ok(Self::PauseForRestart), "draining" => Ok(Self::Draining), _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")), } @@ -233,6 +235,7 @@ impl From for String { Active => "active", Filling => "filling", Pause => "pause", + PauseForRestart => "pause_for_restart", Draining => "draining", } .to_string() diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 9311dab33cd0..3db75b7d0e39 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -177,6 +177,20 @@ serde_with::serde_conv!( |value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) } ); +impl LsnLease { + /// The default length for an explicit LSN lease request (10 minutes). + pub const DEFAULT_LENGTH: Duration = Duration::from_secs(10 * 60); + + /// The default length for an implicit LSN lease granted during + /// `get_lsn_by_timestamp` request (1 minutes). + pub const DEFAULT_LENGTH_FOR_TS: Duration = Duration::from_secs(60); + + /// Checks whether the lease is expired. + pub fn is_expired(&self, now: &SystemTime) -> bool { + now > &self.valid_until + } +} + /// The only [`TenantState`] variants we could be `TenantState::Activating` from. #[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ActivatingFrom { @@ -279,22 +293,6 @@ pub struct TenantCreateRequest { pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it } -#[derive(Deserialize, Debug)] -#[serde(deny_unknown_fields)] -pub struct TenantLoadRequest { - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub generation: Option, -} - -impl std::ops::Deref for TenantCreateRequest { - type Target = TenantConfig; - - fn deref(&self) -> &Self::Target { - &self.config - } -} - /// An alternative representation of `pageserver::tenant::TenantConf` with /// simpler types. #[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)] @@ -322,6 +320,8 @@ pub struct TenantConfig { pub timeline_get_throttle: Option, pub image_layer_creation_check_threshold: Option, pub switch_aux_file_policy: Option, + pub lsn_lease_length: Option, + pub lsn_lease_length_for_ts: Option, } /// The policy for the aux file storage. It can be switched through `switch_aux_file_policy` diff --git a/libs/remote_storage/Cargo.toml b/libs/remote_storage/Cargo.toml index 78da01c9a001..23d82b90bd0b 100644 --- a/libs/remote_storage/Cargo.toml +++ b/libs/remote_storage/Cargo.toml @@ -14,8 +14,9 @@ aws-config.workspace = true aws-sdk-s3.workspace = true aws-credential-types.workspace = true bytes.workspace = true -camino.workspace = true +camino = { workspace = true, features = ["serde1"] } humantime.workspace = true +humantime-serde.workspace = true hyper = { workspace = true, features = ["stream"] } futures.workspace = true rand.workspace = true diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 2aa05a9d3011..dbd64fb5a631 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -54,7 +54,10 @@ impl AzureBlobStorage { azure_config.container_name ); - let account = env::var("AZURE_STORAGE_ACCOUNT").expect("missing AZURE_STORAGE_ACCOUNT"); + // Use the storage account from the config by default, fall back to env var if not present. + let account = azure_config.storage_account.clone().unwrap_or_else(|| { + env::var("AZURE_STORAGE_ACCOUNT").expect("missing AZURE_STORAGE_ACCOUNT") + }); // If the `AZURE_STORAGE_ACCESS_KEY` env var has an access key, use that, // otherwise try the token based credentials. diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 8c984abed201..e39ac581c758 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -36,7 +36,6 @@ use futures::stream::Stream; use serde::{Deserialize, Serialize}; use tokio::sync::Semaphore; use tokio_util::sync::CancellationToken; -use toml_edit::Item; use tracing::info; pub use self::{ @@ -451,7 +450,7 @@ impl GenericRemoteStorage { pub fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result { let timeout = storage_config.timeout; Ok(match &storage_config.storage { - RemoteStorageKind::LocalFs(path) => { + RemoteStorageKind::LocalFs { local_path: path } => { info!("Using fs root '{path}' as a remote storage"); Self::LocalFs(LocalFs::new(path.clone(), timeout)?) } @@ -466,7 +465,11 @@ impl GenericRemoteStorage { Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout)?)) } RemoteStorageKind::AzureContainer(azure_config) => { - info!("Using azure container '{}' in region '{}' as a remote storage, prefix in container: '{:?}'", + let storage_account = azure_config + .storage_account + .as_deref() + .unwrap_or(""); + info!("Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'", azure_config.container_name, azure_config.container_region, azure_config.prefix_in_container); Self::AzureBlob(Arc::new(AzureBlobStorage::new(azure_config, timeout)?)) } @@ -523,21 +526,28 @@ impl From<[(&str, &str); N]> for StorageMetadata { } /// External backup storage configuration, enough for creating a client for that storage. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub struct RemoteStorageConfig { /// The storage connection configuration. + #[serde(flatten)] pub storage: RemoteStorageKind, /// A common timeout enforced for all requests after concurrency limiter permit has been /// acquired. + #[serde(with = "humantime_serde", default = "default_timeout")] pub timeout: Duration, } +fn default_timeout() -> Duration { + RemoteStorageConfig::DEFAULT_TIMEOUT +} + /// A kind of a remote storage to connect to, with its connection configuration. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[serde(untagged)] pub enum RemoteStorageKind { /// Storage based on local file system. /// Specify a root folder to place all stored files into. - LocalFs(Utf8PathBuf), + LocalFs { local_path: Utf8PathBuf }, /// AWS S3 based storage, storing all files in the S3 bucket /// specified by the config AwsS3(S3Config), @@ -547,7 +557,7 @@ pub enum RemoteStorageKind { } /// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write). -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, serde::Deserialize)] pub struct S3Config { /// Name of the bucket to connect to. pub bucket_name: String, @@ -564,11 +574,24 @@ pub struct S3Config { pub endpoint: Option, /// AWS S3 has various limits on its API calls, we need not to exceed those. /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details. + #[serde(default = "default_remote_storage_s3_concurrency_limit")] pub concurrency_limit: NonZeroUsize, + #[serde(default = "default_max_keys_per_list_response")] pub max_keys_per_list_response: Option, + #[serde(deserialize_with = "deserialize_storage_class", default)] pub upload_storage_class: Option, } +fn default_remote_storage_s3_concurrency_limit() -> NonZeroUsize { + DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT + .try_into() + .unwrap() +} + +fn default_max_keys_per_list_response() -> Option { + DEFAULT_MAX_KEYS_PER_LIST_RESPONSE +} + impl Debug for S3Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("S3Config") @@ -585,26 +608,35 @@ impl Debug for S3Config { } /// Azure bucket coordinates and access credentials to manage the bucket contents (read and write). -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct AzureConfig { /// Name of the container to connect to. pub container_name: String, + /// Name of the storage account the container is inside of + pub storage_account: Option, /// The region where the bucket is located at. pub container_region: String, /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once. pub prefix_in_container: Option, /// Azure has various limits on its API calls, we need not to exceed those. /// See [`DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT`] for more details. + #[serde(default = "default_remote_storage_azure_concurrency_limit")] pub concurrency_limit: NonZeroUsize, + #[serde(default = "default_max_keys_per_list_response")] pub max_keys_per_list_response: Option, } +fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize { + NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap() +} + impl Debug for AzureConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("AzureConfig") .field("bucket_name", &self.container_name) + .field("storage_account", &self.storage_account) .field("bucket_region", &self.container_region) - .field("prefix_in_bucket", &self.prefix_in_container) + .field("prefix_in_container", &self.prefix_in_container) .field("concurrency_limit", &self.concurrency_limit) .field( "max_keys_per_list_response", @@ -614,161 +646,47 @@ impl Debug for AzureConfig { } } +fn deserialize_storage_class<'de, D: serde::Deserializer<'de>>( + deserializer: D, +) -> Result, D::Error> { + Option::::deserialize(deserializer).and_then(|s| { + if let Some(s) = s { + use serde::de::Error; + let storage_class = StorageClass::from_str(&s).expect("infallible"); + #[allow(deprecated)] + if matches!(storage_class, StorageClass::Unknown(_)) { + return Err(D::Error::custom(format!( + "Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", + StorageClass::values() + ))); + } + Ok(Some(storage_class)) + } else { + Ok(None) + } + }) +} + impl RemoteStorageConfig { pub const DEFAULT_TIMEOUT: Duration = std::time::Duration::from_secs(120); pub fn from_toml(toml: &toml_edit::Item) -> anyhow::Result> { - let local_path = toml.get("local_path"); - let bucket_name = toml.get("bucket_name"); - let bucket_region = toml.get("bucket_region"); - let container_name = toml.get("container_name"); - let container_region = toml.get("container_region"); - - let use_azure = container_name.is_some() && container_region.is_some(); - - let default_concurrency_limit = if use_azure { - DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT - } else { - DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT + let document: toml_edit::Document = match toml { + toml_edit::Item::Table(toml) => toml.clone().into(), + toml_edit::Item::Value(toml_edit::Value::InlineTable(toml)) => { + toml.clone().into_table().into() + } + _ => bail!("toml not a table or inline table"), }; - let concurrency_limit = NonZeroUsize::new( - parse_optional_integer("concurrency_limit", toml)?.unwrap_or(default_concurrency_limit), - ) - .context("Failed to parse 'concurrency_limit' as a positive integer")?; - - let max_keys_per_list_response = - parse_optional_integer::("max_keys_per_list_response", toml) - .context("Failed to parse 'max_keys_per_list_response' as a positive integer")? - .or(DEFAULT_MAX_KEYS_PER_LIST_RESPONSE); - - let endpoint = toml - .get("endpoint") - .map(|endpoint| parse_toml_string("endpoint", endpoint)) - .transpose()?; - - let timeout = toml - .get("timeout") - .map(|timeout| { - timeout - .as_str() - .ok_or_else(|| anyhow::Error::msg("timeout was not a string")) - }) - .transpose() - .and_then(|timeout| { - timeout - .map(humantime::parse_duration) - .transpose() - .map_err(anyhow::Error::new) - }) - .context("parse timeout")? - .unwrap_or(Self::DEFAULT_TIMEOUT); - if timeout < Duration::from_secs(1) { - bail!("timeout was specified as {timeout:?} which is too low"); + if document.is_empty() { + return Ok(None); } - let storage = match ( - local_path, - bucket_name, - bucket_region, - container_name, - container_region, - ) { - // no 'local_path' nor 'bucket_name' options are provided, consider this remote storage disabled - (None, None, None, None, None) => return Ok(None), - (_, Some(_), None, ..) => { - bail!("'bucket_region' option is mandatory if 'bucket_name' is given ") - } - (_, None, Some(_), ..) => { - bail!("'bucket_name' option is mandatory if 'bucket_region' is given ") - } - (None, Some(bucket_name), Some(bucket_region), ..) => { - RemoteStorageKind::AwsS3(S3Config { - bucket_name: parse_toml_string("bucket_name", bucket_name)?, - bucket_region: parse_toml_string("bucket_region", bucket_region)?, - prefix_in_bucket: toml - .get("prefix_in_bucket") - .map(|prefix_in_bucket| { - parse_toml_string("prefix_in_bucket", prefix_in_bucket) - }) - .transpose()?, - endpoint, - concurrency_limit, - max_keys_per_list_response, - upload_storage_class: toml - .get("upload_storage_class") - .map(|prefix_in_bucket| -> anyhow::Result<_> { - let s = parse_toml_string("upload_storage_class", prefix_in_bucket)?; - let storage_class = StorageClass::from_str(&s).expect("infallible"); - #[allow(deprecated)] - if matches!(storage_class, StorageClass::Unknown(_)) { - bail!("Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}", StorageClass::values()); - } - Ok(storage_class) - }) - .transpose()?, - }) - } - (_, _, _, Some(_), None) => { - bail!("'container_name' option is mandatory if 'container_region' is given ") - } - (_, _, _, None, Some(_)) => { - bail!("'container_name' option is mandatory if 'container_region' is given ") - } - (None, None, None, Some(container_name), Some(container_region)) => { - RemoteStorageKind::AzureContainer(AzureConfig { - container_name: parse_toml_string("container_name", container_name)?, - container_region: parse_toml_string("container_region", container_region)?, - prefix_in_container: toml - .get("prefix_in_container") - .map(|prefix_in_container| { - parse_toml_string("prefix_in_container", prefix_in_container) - }) - .transpose()?, - concurrency_limit, - max_keys_per_list_response, - }) - } - (Some(local_path), None, None, None, None) => RemoteStorageKind::LocalFs( - Utf8PathBuf::from(parse_toml_string("local_path", local_path)?), - ), - (Some(_), Some(_), ..) => { - bail!("'local_path' and 'bucket_name' are mutually exclusive") - } - (Some(_), _, _, Some(_), Some(_)) => { - bail!("local_path and 'container_name' are mutually exclusive") - } - }; - - Ok(Some(RemoteStorageConfig { storage, timeout })) + Ok(Some(toml_edit::de::from_document(document)?)) } } -// Helper functions to parse a toml Item -fn parse_optional_integer(name: &str, item: &toml_edit::Item) -> anyhow::Result> -where - I: TryFrom, - E: std::error::Error + Send + Sync + 'static, -{ - let toml_integer = match item.get(name) { - Some(item) => item - .as_integer() - .with_context(|| format!("configure option {name} is not an integer"))?, - None => return Ok(None), - }; - - I::try_from(toml_integer) - .map(Some) - .with_context(|| format!("configure option {name} is too large")) -} - -fn parse_toml_string(name: &str, item: &Item) -> anyhow::Result { - let s = item - .as_str() - .with_context(|| format!("configure option {name} is not a string"))?; - Ok(s.to_string()) -} - struct ConcurrencyLimiter { // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded. // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold. @@ -815,6 +733,11 @@ impl ConcurrencyLimiter { mod tests { use super::*; + fn parse(input: &str) -> anyhow::Result> { + let toml = input.parse::().unwrap(); + RemoteStorageConfig::from_toml(toml.as_item()) + } + #[test] fn test_object_name() { let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap(); @@ -842,18 +765,71 @@ mod tests { let input = "local_path = '.' timeout = '5s'"; - let toml = input.parse::().unwrap(); - - let config = RemoteStorageConfig::from_toml(toml.as_item()) - .unwrap() - .expect("it exists"); + let config = parse(input).unwrap().expect("it exists"); assert_eq!( config, RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs(Utf8PathBuf::from(".")), + storage: RemoteStorageKind::LocalFs { + local_path: Utf8PathBuf::from(".") + }, timeout: Duration::from_secs(5) } ); } + + #[test] + fn test_s3_parsing() { + let toml = "\ + bucket_name = 'foo-bar' + bucket_region = 'eu-central-1' + upload_storage_class = 'INTELLIGENT_TIERING' + timeout = '7s' + "; + + let config = parse(toml).unwrap().expect("it exists"); + + assert_eq!( + config, + RemoteStorageConfig { + storage: RemoteStorageKind::AwsS3(S3Config { + bucket_name: "foo-bar".into(), + bucket_region: "eu-central-1".into(), + prefix_in_bucket: None, + endpoint: None, + concurrency_limit: default_remote_storage_s3_concurrency_limit(), + max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, + upload_storage_class: Some(StorageClass::IntelligentTiering), + }), + timeout: Duration::from_secs(7) + } + ); + } + + #[test] + fn test_azure_parsing() { + let toml = "\ + container_name = 'foo-bar' + container_region = 'westeurope' + upload_storage_class = 'INTELLIGENT_TIERING' + timeout = '7s' + "; + + let config = parse(toml).unwrap().expect("it exists"); + + assert_eq!( + config, + RemoteStorageConfig { + storage: RemoteStorageKind::AzureContainer(AzureConfig { + container_name: "foo-bar".into(), + storage_account: None, + container_region: "westeurope".into(), + prefix_in_container: None, + concurrency_limit: default_remote_storage_azure_concurrency_limit(), + max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, + }), + timeout: Duration::from_secs(7) + } + ); + } } diff --git a/libs/remote_storage/tests/test_real_azure.rs b/libs/remote_storage/tests/test_real_azure.rs index cd0b2be4b5fe..23628dfebecc 100644 --- a/libs/remote_storage/tests/test_real_azure.rs +++ b/libs/remote_storage/tests/test_real_azure.rs @@ -212,6 +212,7 @@ fn create_azure_client( let remote_storage_config = RemoteStorageConfig { storage: RemoteStorageKind::AzureContainer(AzureConfig { container_name: remote_storage_azure_container, + storage_account: None, container_region: remote_storage_azure_region, prefix_in_container: Some(format!("test_{millis}_{random:08x}/")), concurrency_limit: NonZeroUsize::new(100).unwrap(), diff --git a/libs/vm_monitor/src/cgroup.rs b/libs/vm_monitor/src/cgroup.rs index 7160a42df256..32237650164a 100644 --- a/libs/vm_monitor/src/cgroup.rs +++ b/libs/vm_monitor/src/cgroup.rs @@ -25,6 +25,8 @@ pub struct Config { /// /// For simplicity, this value must be greater than or equal to `memory_history_len`. memory_history_log_interval: usize, + /// The max number of iterations to skip before logging the next iteration + memory_history_log_noskip_interval: Duration, } impl Default for Config { @@ -33,6 +35,7 @@ impl Default for Config { memory_poll_interval: Duration::from_millis(100), memory_history_len: 5, // use 500ms of history for decision-making memory_history_log_interval: 20, // but only log every ~2s (otherwise it's spammy) + memory_history_log_noskip_interval: Duration::from_secs(15), // but only if it's changed, or 60 seconds have passed } } } @@ -85,7 +88,12 @@ impl CgroupWatcher { // buffer for samples that will be logged. once full, it remains so. let history_log_len = self.config.memory_history_log_interval; + let max_skip = self.config.memory_history_log_noskip_interval; let mut history_log_buf = vec![MemoryStatus::zeroed(); history_log_len]; + let mut last_logged_memusage = MemoryStatus::zeroed(); + + // Ensure that we're tracking a value that's definitely in the past, as Instant::now is only guaranteed to be non-decreasing on Rust's T1-supported systems. + let mut can_skip_logs_until = Instant::now() - max_skip; for t in 0_u64.. { ticker.tick().await; @@ -115,12 +123,24 @@ impl CgroupWatcher { // equal to the logging interval, we can just log the entire buffer every time we set // the last entry, which also means that for this log line, we can ignore that it's a // ring buffer (because all the entries are in order of increasing time). - if i == history_log_len - 1 { + // + // We skip logging the data if data hasn't meaningfully changed in a while, unless + // we've already ignored previous iterations for the last max_skip period. + if i == history_log_len - 1 + && (now > can_skip_logs_until + || !history_log_buf + .iter() + .all(|usage| last_logged_memusage.status_is_close_or_similar(usage))) + { info!( history = ?MemoryStatus::debug_slice(&history_log_buf), summary = ?summary, "Recent cgroup memory statistics history" ); + + can_skip_logs_until = now + max_skip; + + last_logged_memusage = *history_log_buf.last().unwrap(); } updates @@ -232,6 +252,24 @@ impl MemoryStatus { DS(slice) } + + /// Check if the other memory status is a close or similar result. + /// Returns true if the larger value is not larger than the smaller value + /// by 1/8 of the smaller value, and within 128MiB. + /// See tests::check_similarity_behaviour for examples of behaviour + fn status_is_close_or_similar(&self, other: &MemoryStatus) -> bool { + let margin; + let diff; + if self.non_reclaimable >= other.non_reclaimable { + margin = other.non_reclaimable / 8; + diff = self.non_reclaimable - other.non_reclaimable; + } else { + margin = self.non_reclaimable / 8; + diff = other.non_reclaimable - self.non_reclaimable; + } + + diff < margin && diff < 128 * 1024 * 1024 + } } #[cfg(test)] @@ -261,4 +299,65 @@ mod tests { assert_eq!(values(2, 4), [9, 0, 1, 2]); assert_eq!(values(2, 10), [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]); } + + #[test] + fn check_similarity_behaviour() { + // This all accesses private methods, so we can't actually run this + // as doctests, because doctests run as an external crate. + let mut small = super::MemoryStatus { + non_reclaimable: 1024, + }; + let mut large = super::MemoryStatus { + non_reclaimable: 1024 * 1024 * 1024 * 1024, + }; + + // objects are self-similar, no matter the size + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + // inequality is symmetric + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + + small.non_reclaimable = 64; + large.non_reclaimable = (small.non_reclaimable / 8) * 9; + + // objects are self-similar, no matter the size + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + // values are similar if the larger value is larger by less than + // 12.5%, i.e. 1/8 of the smaller value. + // In the example above, large is exactly 12.5% larger, so this doesn't + // match. + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + + large.non_reclaimable -= 1; + assert!(large.status_is_close_or_similar(&large)); + + assert!(small.status_is_close_or_similar(&large)); + assert!(large.status_is_close_or_similar(&small)); + + // The 1/8 rule only applies up to 128MiB of difference + small.non_reclaimable = 1024 * 1024 * 1024 * 1024; + large.non_reclaimable = small.non_reclaimable / 8 * 9; + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + // the large value is put just above the threshold + large.non_reclaimable = small.non_reclaimable + 128 * 1024 * 1024; + assert!(large.status_is_close_or_similar(&large)); + + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + // now below + large.non_reclaimable -= 1; + assert!(large.status_is_close_or_similar(&large)); + + assert!(small.status_is_close_or_similar(&large)); + assert!(large.status_is_close_or_similar(&small)); + } } diff --git a/libs/vm_monitor/src/dispatcher.rs b/libs/vm_monitor/src/dispatcher.rs index c76baf04e7a2..6a965ace9b04 100644 --- a/libs/vm_monitor/src/dispatcher.rs +++ b/libs/vm_monitor/src/dispatcher.rs @@ -12,11 +12,11 @@ use futures::{ stream::{SplitSink, SplitStream}, SinkExt, StreamExt, }; -use tracing::info; +use tracing::{debug, info}; use crate::protocol::{ - OutboundMsg, ProtocolRange, ProtocolResponse, ProtocolVersion, PROTOCOL_MAX_VERSION, - PROTOCOL_MIN_VERSION, + OutboundMsg, OutboundMsgKind, ProtocolRange, ProtocolResponse, ProtocolVersion, + PROTOCOL_MAX_VERSION, PROTOCOL_MIN_VERSION, }; /// The central handler for all communications in the monitor. @@ -118,7 +118,12 @@ impl Dispatcher { /// serialize the wrong thing and send it, since `self.sink.send` will take /// any string. pub async fn send(&mut self, message: OutboundMsg) -> anyhow::Result<()> { - info!(?message, "sending message"); + if matches!(&message.inner, OutboundMsgKind::HealthCheck { .. }) { + debug!(?message, "sending message"); + } else { + info!(?message, "sending message"); + } + let json = serde_json::to_string(&message).context("failed to serialize message")?; self.sink .send(Message::Text(json)) diff --git a/libs/vm_monitor/src/runner.rs b/libs/vm_monitor/src/runner.rs index ca02637ecf9d..36f8573a38ff 100644 --- a/libs/vm_monitor/src/runner.rs +++ b/libs/vm_monitor/src/runner.rs @@ -12,7 +12,7 @@ use axum::extract::ws::{Message, WebSocket}; use futures::StreamExt; use tokio::sync::{broadcast, watch}; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use crate::cgroup::{self, CgroupWatcher}; use crate::dispatcher::Dispatcher; @@ -474,26 +474,29 @@ impl Runner { // there is a message from the agent msg = self.dispatcher.source.next() => { if let Some(msg) = msg { - // Don't use 'message' as a key as the string also uses - // that for its key - info!(?msg, "received message"); - match msg { + match &msg { Ok(msg) => { let message: InboundMsg = match msg { Message::Text(text) => { - serde_json::from_str(&text).context("failed to deserialize text message")? + serde_json::from_str(text).context("failed to deserialize text message")? } other => { warn!( // Don't use 'message' as a key as the // string also uses that for its key msg = ?other, - "agent should only send text messages but received different type" + "problem processing incoming message: agent should only send text messages but received different type" ); continue }, }; + if matches!(&message.inner, InboundMsgKind::HealthCheck { .. }) { + debug!(?msg, "received message"); + } else { + info!(?msg, "received message"); + } + let out = match self.process_message(message.clone()).await { Ok(Some(out)) => out, Ok(None) => continue, @@ -517,7 +520,11 @@ impl Runner { .await .context("failed to send message")?; } - Err(e) => warn!("{e}"), + Err(e) => warn!( + error = format!("{e}"), + msg = ?msg, + "received error message" + ), } } else { anyhow::bail!("dispatcher connection closed") diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index b4a0d1ac0235..feb136384325 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -39,8 +39,8 @@ use crate::tenant::{ use crate::{disk_usage_eviction_task::DiskUsageEvictionTaskConfig, virtual_file::io_engine}; use crate::{tenant::config::TenantConf, virtual_file}; use crate::{ - IGNORED_TENANT_FILE_NAME, TENANT_CONFIG_NAME, TENANT_HEATMAP_BASENAME, - TENANT_LOCATION_CONFIG_NAME, TIMELINE_DELETE_MARK_SUFFIX, + TENANT_CONFIG_NAME, TENANT_HEATMAP_BASENAME, TENANT_LOCATION_CONFIG_NAME, + TIMELINE_DELETE_MARK_SUFFIX, }; use self::defaults::DEFAULT_CONCURRENT_TENANT_WARMUP; @@ -811,11 +811,6 @@ impl PageServerConf { self.tenants_path().join(tenant_shard_id.to_string()) } - pub fn tenant_ignore_mark_file_path(&self, tenant_shard_id: &TenantShardId) -> Utf8PathBuf { - self.tenant_path(tenant_shard_id) - .join(IGNORED_TENANT_FILE_NAME) - } - /// Points to a place in pageserver's local directory, /// where certain tenant's tenantconf file should be located. /// @@ -1468,7 +1463,7 @@ broker_endpoint = '{broker_endpoint}' assert_eq!( parsed_remote_storage_config, RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs(local_storage_path.clone()), + storage: RemoteStorageKind::LocalFs { local_path: local_storage_path.clone() }, timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, }, "Remote storage config should correctly parse the local FS config and fill other storage defaults" diff --git a/pageserver/src/deletion_queue.rs b/pageserver/src/deletion_queue.rs index 3960fc1b999d..e779729f8d48 100644 --- a/pageserver/src/deletion_queue.rs +++ b/pageserver/src/deletion_queue.rs @@ -850,7 +850,9 @@ mod test { std::fs::create_dir_all(remote_fs_dir)?; let remote_fs_dir = harness.conf.workdir.join("remote_fs").canonicalize_utf8()?; let storage_config = RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()), + storage: RemoteStorageKind::LocalFs { + local_path: remote_fs_dir.clone(), + }, timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, }; let storage = GenericRemoteStorage::from_config(&storage_config).unwrap(); diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 71b486a4d3ef..1bc8fe906645 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -78,29 +78,14 @@ paths: delete: description: | - Attempts to delete specified tenant. 500, 503 and 409 errors should be retried until 404 is retrieved. - 404 means that deletion successfully finished" + Attempts to delete specified tenant. 500, 503 and 409 errors should be retried. Deleting + a non-existent tenant is considered successful (returns 200). responses: "200": description: Tenant was successfully deleted, or was already not found. - "404": - description: Tenant not found. This is a success result, equivalent to 200. - content: - application/json: - schema: - $ref: "#/components/schemas/NotFoundError" - "409": - description: Deletion is already in progress, continue polling - content: - application/json: - schema: - $ref: "#/components/schemas/ConflictError" - "412": - description: Deletion may not proceed, tenant is not in Active state - content: - application/json: - schema: - $ref: "#/components/schemas/PreconditionFailedError" + "503": + description: Service is unavailable, or tenant is already being modified (perhaps concurrently deleted) + /v1/tenant/{tenant_id}/time_travel_remote_storage: parameters: @@ -389,48 +374,6 @@ paths: application/json: schema: $ref: "#/components/schemas/ConflictError" - /v1/tenant/{tenant_id}/ignore: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - post: - description: | - Remove tenant data (including all corresponding timelines) from pageserver's memory. - Files on local disk and remote storage are not affected. - - Future pageserver restarts won't load the data back until `load` is called on such tenant. - responses: - "200": - description: Tenant ignored - - - /v1/tenant/{tenant_id}/load: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - post: - description: | - Schedules an operation that attempts to load a tenant from the local disk and - synchronise it with the remote storage (if enabled), repeating pageserver's restart logic for tenant load. - If the tenant was ignored before, removes the ignore mark and continues with load scheduling. - - Errors if the tenant is absent on disk, already present in memory or fails to schedule its load. - Scheduling a load does not mean that the tenant would load successfully, check tenant status to ensure load correctness. - requestBody: - required: false - content: - application/json: - schema: - $ref: "#/components/schemas/TenantLoadRequest" - responses: - "202": - description: Tenant scheduled to load successfully /v1/tenant/{tenant_id}/{timeline_id}/preserve_initdb_archive: parameters: diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 657708c0d613..b5713a8cb441 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -36,7 +36,7 @@ use pageserver_api::models::TopTenantShardsRequest; use pageserver_api::models::TopTenantShardsResponse; use pageserver_api::models::{ DownloadRemoteLayersTaskSpawnRequest, LocationConfigMode, TenantAttachRequest, - TenantLoadRequest, TenantLocationConfigRequest, + TenantLocationConfigRequest, }; use pageserver_api::shard::ShardCount; use pageserver_api::shard::TenantShardId; @@ -205,7 +205,6 @@ impl From for ApiError { NotFound(tenant_id) => { ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into()) } - e @ AlreadyExists(_, _) => ApiError::Conflict(format!("{e}")), InProgress => { ApiError::ResourceUnavailable("Tenant is being modified concurrently".into()) } @@ -335,13 +334,10 @@ impl From for ApiError { use crate::tenant::delete::DeleteTenantError::*; match value { Get(g) => ApiError::from(g), - e @ AlreadyInProgress => ApiError::Conflict(e.to_string()), Timeline(t) => ApiError::from(t), - NotAttached => ApiError::NotFound(anyhow::anyhow!("Tenant is not attached").into()), SlotError(e) => e.into(), SlotUpsertError(e) => e.into(), Other(o) => ApiError::InternalServerError(o), - e @ InvalidState(_) => ApiError::PreconditionFailed(e.to_string().into_boxed_str()), Cancelled => ApiError::ShuttingDown, } } @@ -891,8 +887,6 @@ async fn tenant_detach_handler( ) -> Result, ApiError> { let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; check_permission(&request, Some(tenant_id))?; - let detach_ignored: Option = parse_query_param(&request, "detach_ignored")?; - // This is a legacy API (`/location_conf` is the replacement). It only supports unsharded tenants let tenant_shard_id = TenantShardId::unsharded(tenant_id); @@ -900,12 +894,7 @@ async fn tenant_detach_handler( let conf = state.conf; state .tenant_manager - .detach_tenant( - conf, - tenant_shard_id, - detach_ignored.unwrap_or(false), - &state.deletion_queue_client, - ) + .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client) .instrument(info_span!("tenant_detach", %tenant_id, shard_id=%tenant_shard_id.shard_slug())) .await?; @@ -932,54 +921,6 @@ async fn tenant_reset_handler( json_response(StatusCode::OK, ()) } -async fn tenant_load_handler( - mut request: Request, - _cancel: CancellationToken, -) -> Result, ApiError> { - let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; - check_permission(&request, Some(tenant_id))?; - - let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn); - - let maybe_body: Option = json_request_or_empty_body(&mut request).await?; - - let state = get_state(&request); - - // The /load request is only usable when control_plane_api is not set. Once it is set, callers - // should always use /attach instead. - let generation = get_request_generation(state, maybe_body.as_ref().and_then(|r| r.generation))?; - - mgr::load_tenant( - state.conf, - tenant_id, - generation, - state.broker_client.clone(), - state.remote_storage.clone(), - state.deletion_queue_client.clone(), - &ctx, - ) - .instrument(info_span!("load", %tenant_id)) - .await?; - - json_response(StatusCode::ACCEPTED, ()) -} - -async fn tenant_ignore_handler( - request: Request, - _cancel: CancellationToken, -) -> Result, ApiError> { - let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?; - check_permission(&request, Some(tenant_id))?; - - let state = get_state(&request); - let conf = state.conf; - mgr::ignore_tenant(conf, tenant_id) - .instrument(info_span!("ignore_tenant", %tenant_id)) - .await?; - - json_response(StatusCode::OK, ()) -} - async fn tenant_list_handler( request: Request, _cancel: CancellationToken, @@ -1071,23 +1012,16 @@ async fn tenant_delete_handler( let state = get_state(&request); - let status = state + state .tenant_manager - .delete_tenant(tenant_shard_id, ACTIVE_TENANT_TIMEOUT) + .delete_tenant(tenant_shard_id) .instrument(info_span!("tenant_delete_handler", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug() )) .await?; - // Callers use 404 as success for deletions, for historical reasons. - if status == StatusCode::NOT_FOUND { - return Err(ApiError::NotFound( - anyhow::anyhow!("Deletion complete").into(), - )); - } - - json_response(status, ()) + json_response(StatusCode::OK, ()) } /// HTTP endpoint to query the current tenant_size of a tenant. @@ -1507,7 +1441,7 @@ async fn put_tenant_location_config_handler( if let LocationConfigMode::Detached = request_data.config.mode { if let Err(e) = state .tenant_manager - .detach_tenant(conf, tenant_shard_id, true, &state.deletion_queue_client) + .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client) .instrument(info_span!("tenant_detach", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug() @@ -1730,7 +1664,7 @@ async fn lsn_lease_handler( active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) .await?; let result = timeline - .make_lsn_lease(lsn, &ctx) + .make_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx) .map_err(|e| ApiError::InternalServerError(e.context("lsn lease http handler")))?; json_response(StatusCode::OK, result) @@ -2764,12 +2698,6 @@ pub fn make_router( .post("/v1/tenant/:tenant_shard_id/reset", |r| { api_handler(r, tenant_reset_handler) }) - .post("/v1/tenant/:tenant_id/load", |r| { - api_handler(r, tenant_load_handler) - }) - .post("/v1/tenant/:tenant_id/ignore", |r| { - api_handler(r, tenant_ignore_handler) - }) .post( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive", |r| api_handler(r, timeline_preserve_initdb_handler), diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index c69fb8c83b8e..9e64eafffcab 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -136,13 +136,6 @@ pub(crate) const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit"; pub(crate) const TIMELINE_DELETE_MARK_SUFFIX: &str = "___delete"; -/// A marker file to prevent pageserver from loading a certain tenant on restart. -/// Different from [`TIMELINE_UNINIT_MARK_SUFFIX`] due to semantics of the corresponding -/// `ignore` management API command, that expects the ignored tenant to be properly loaded -/// into pageserver's memory before being ignored. -/// Full path: `tenants//___ignored_tenant`. -pub const IGNORED_TENANT_FILE_NAME: &str = "___ignored_tenant"; - pub fn is_temporary(path: &Utf8Path) -> bool { match path.file_name() { Some(name) => name.ends_with(TEMP_FILE_SUFFIX), diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index e8a1e063c506..5c8f350f7b2a 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -145,14 +145,6 @@ impl ReconstructTimeMetrics { } } -pub(crate) static MATERIALIZED_PAGE_CACHE_HIT_DIRECT: Lazy = Lazy::new(|| { - register_int_counter!( - "pageserver_materialized_cache_hits_direct_total", - "Number of cache hits from materialized page cache without redo", - ) - .expect("failed to define a metric") -}); - pub(crate) struct ReconstructDataTimeMetrics { singular: Histogram, vectored: Histogram, @@ -182,14 +174,6 @@ pub(crate) static GET_RECONSTRUCT_DATA_TIME: Lazy = } }); -pub(crate) static MATERIALIZED_PAGE_CACHE_HIT: Lazy = Lazy::new(|| { - register_int_counter!( - "pageserver_materialized_cache_hits_total", - "Number of cache hits from materialized page cache", - ) - .expect("failed to define a metric") -}); - pub(crate) struct GetVectoredLatency { map: EnumMap>, } @@ -298,12 +282,8 @@ pub(crate) static SCAN_LATENCY: Lazy = Lazy::new(|| { }); pub(crate) struct PageCacheMetricsForTaskKind { - pub read_accesses_materialized_page: IntCounter, pub read_accesses_immutable: IntCounter, - pub read_hits_immutable: IntCounter, - pub read_hits_materialized_page_exact: IntCounter, - pub read_hits_materialized_page_older_lsn: IntCounter, } pub(crate) struct PageCacheMetrics { @@ -336,16 +316,6 @@ pub(crate) static PAGE_CACHE: Lazy = Lazy::new(|| PageCacheMet let content_kind = ::from_usize(content_kind); let content_kind: &'static str = content_kind.into(); PageCacheMetricsForTaskKind { - read_accesses_materialized_page: { - PAGE_CACHE_READ_ACCESSES - .get_metric_with_label_values(&[ - task_kind, - "materialized_page", - content_kind, - ]) - .unwrap() - }, - read_accesses_immutable: { PAGE_CACHE_READ_ACCESSES .get_metric_with_label_values(&[task_kind, "immutable", content_kind]) @@ -357,28 +327,6 @@ pub(crate) static PAGE_CACHE: Lazy = Lazy::new(|| PageCacheMet .get_metric_with_label_values(&[task_kind, "immutable", content_kind, "-"]) .unwrap() }, - - read_hits_materialized_page_exact: { - PAGE_CACHE_READ_HITS - .get_metric_with_label_values(&[ - task_kind, - "materialized_page", - content_kind, - "exact", - ]) - .unwrap() - }, - - read_hits_materialized_page_older_lsn: { - PAGE_CACHE_READ_HITS - .get_metric_with_label_values(&[ - task_kind, - "materialized_page", - content_kind, - "older_lsn", - ]) - .unwrap() - }, } })) })), @@ -394,7 +342,6 @@ pub(crate) struct PageCacheSizeMetrics { pub max_bytes: UIntGauge, pub current_bytes_immutable: UIntGauge, - pub current_bytes_materialized_page: UIntGauge, } static PAGE_CACHE_SIZE_CURRENT_BYTES: Lazy = Lazy::new(|| { @@ -420,11 +367,6 @@ pub(crate) static PAGE_CACHE_SIZE: Lazy = .get_metric_with_label_values(&["immutable"]) .unwrap() }, - current_bytes_materialized_page: { - PAGE_CACHE_SIZE_CURRENT_BYTES - .get_metric_with_label_values(&["materialized_page"]) - .unwrap() - }, }); pub(crate) mod page_cache_eviction_metrics { @@ -1405,17 +1347,23 @@ static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| { .map(|ms| (ms as f64) / 1000.0) }); -pub(crate) struct BasebackupQueryTime(HistogramVec); +pub(crate) struct BasebackupQueryTime { + ok: Histogram, + error: Histogram, +} + pub(crate) static BASEBACKUP_QUERY_TIME: Lazy = Lazy::new(|| { - BasebackupQueryTime({ - register_histogram_vec!( - "pageserver_basebackup_query_seconds", - "Histogram of basebackup queries durations, by result type", - &["result"], - COMPUTE_STARTUP_BUCKETS.to_vec(), - ) - .expect("failed to define a metric") - }) + let vec = register_histogram_vec!( + "pageserver_basebackup_query_seconds", + "Histogram of basebackup queries durations, by result type", + &["result"], + COMPUTE_STARTUP_BUCKETS.to_vec(), + ) + .expect("failed to define a metric"); + BasebackupQueryTime { + ok: vec.get_metric_with_label_values(&["ok"]).unwrap(), + error: vec.get_metric_with_label_values(&["error"]).unwrap(), + } }); pub(crate) struct BasebackupQueryTimeOngoingRecording<'a, 'c> { @@ -1470,12 +1418,11 @@ impl<'a, 'c> BasebackupQueryTimeOngoingRecording<'a, 'c> { elapsed } }; - let label_value = if res.is_ok() { "ok" } else { "error" }; - let metric = self - .parent - .0 - .get_metric_with_label_values(&[label_value]) - .unwrap(); + let metric = if res.is_ok() { + &self.parent.ok + } else { + &self.parent.error + }; metric.observe(ex_throttled.as_secs_f64()); } } @@ -2918,13 +2865,11 @@ pub fn preinitialize_metrics() { // FIXME(4813): make it so that we have no top level metrics as this fn will easily fall out of // order: // - global metrics reside in a Lazy - // - access via crate::metrics::PS_METRICS.materialized_page_cache_hit.inc() + // - access via crate::metrics::PS_METRICS.some_metric.inc() // - could move the statics into TimelineMetrics::new()? // counters [ - &MATERIALIZED_PAGE_CACHE_HIT, - &MATERIALIZED_PAGE_CACHE_HIT_DIRECT, &UNEXPECTED_ONDEMAND_DOWNLOADS, &WALRECEIVER_STARTED_CONNECTIONS, &WALRECEIVER_BROKER_UPDATES, @@ -2986,4 +2931,5 @@ pub fn preinitialize_metrics() { // Custom Lazy::force(&RECONSTRUCT_TIME); Lazy::force(&tenant_throttling::TIMELINE_GET); + Lazy::force(&BASEBACKUP_QUERY_TIME); } diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index 529fb9bb07f9..f386c825b848 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -17,7 +17,6 @@ //! //! Two types of pages are supported: //! -//! * **Materialized pages**, filled & used by page reconstruction //! * **Immutable File pages**, filled & used by [`crate::tenant::block_io`] and [`crate::tenant::ephemeral_file`]. //! //! Note that [`crate::tenant::ephemeral_file::EphemeralFile`] is generally mutable, but, it's append-only. @@ -28,9 +27,6 @@ //! Page cache maps from a cache key to a buffer slot. //! The cache key uniquely identifies the piece of data that is being cached. //! -//! The cache key for **materialized pages** is [`TenantShardId`], [`TimelineId`], [`Key`], and [`Lsn`]. -//! Use [`PageCache::memorize_materialized_page`] and [`PageCache::lookup_materialized_page`] for fill & access. -//! //! The cache key for **immutable file** pages is [`FileId`] and a block number. //! Users of page cache that wish to page-cache an arbitrary (immutable!) on-disk file do the following: //! * Have a mechanism to deterministically associate the on-disk file with a [`FileId`]. @@ -82,13 +78,10 @@ use std::{ use anyhow::Context; use once_cell::sync::OnceCell; -use pageserver_api::shard::TenantShardId; -use utils::{id::TimelineId, lsn::Lsn}; use crate::{ context::RequestContext, metrics::{page_cache_eviction_metrics, PageCacheSizeMetrics}, - repository::Key, }; static PAGE_CACHE: OnceCell = OnceCell::new(); @@ -139,33 +132,7 @@ pub fn next_file_id() -> FileId { #[derive(Debug, PartialEq, Eq, Clone)] #[allow(clippy::enum_variant_names)] enum CacheKey { - MaterializedPage { - hash_key: MaterializedPageHashKey, - lsn: Lsn, - }, - ImmutableFilePage { - file_id: FileId, - blkno: u32, - }, -} - -#[derive(Debug, PartialEq, Eq, Hash, Clone)] -struct MaterializedPageHashKey { - /// Why is this TenantShardId rather than TenantId? - /// - /// Usually, the materialized value of a page@lsn is identical on any shard in the same tenant. However, this - /// this not the case for certain internally-generated pages (e.g. relation sizes). In future, we may make this - /// key smaller by omitting the shard, if we ensure that reads to such pages always skip the cache, or are - /// special-cased in some other way. - tenant_shard_id: TenantShardId, - timeline_id: TimelineId, - key: Key, -} - -#[derive(Clone)] -struct Version { - lsn: Lsn, - slot_idx: usize, + ImmutableFilePage { file_id: FileId, blkno: u32 }, } struct Slot { @@ -236,17 +203,6 @@ impl SlotInner { } pub struct PageCache { - /// This contains the mapping from the cache key to buffer slot that currently - /// contains the page, if any. - /// - /// TODO: This is protected by a single lock. If that becomes a bottleneck, - /// this HashMap can be replaced with a more concurrent version, there are - /// plenty of such crates around. - /// - /// If you add support for caching different kinds of objects, each object kind - /// can have a separate mapping map, next to this field. - materialized_page_map: std::sync::RwLock>>, - immutable_page_map: std::sync::RwLock>, /// The actual buffers with their metadata. @@ -371,175 +327,14 @@ pub enum ReadBufResult<'a> { } impl PageCache { - // - // Section 1.1: Public interface functions for looking up and memorizing materialized page - // versions in the page cache - // - - /// Look up a materialized page version. - /// - /// The 'lsn' is an upper bound, this will return the latest version of - /// the given block, but not newer than 'lsn'. Returns the actual LSN of the - /// returned page. - pub async fn lookup_materialized_page( - &self, - tenant_shard_id: TenantShardId, - timeline_id: TimelineId, - key: &Key, - lsn: Lsn, - ctx: &RequestContext, - ) -> Option<(Lsn, PageReadGuard)> { - let Ok(permit) = self.try_get_pinned_slot_permit().await else { - return None; - }; - - crate::metrics::PAGE_CACHE - .for_ctx(ctx) - .read_accesses_materialized_page - .inc(); - - let mut cache_key = CacheKey::MaterializedPage { - hash_key: MaterializedPageHashKey { - tenant_shard_id, - timeline_id, - key: *key, - }, - lsn, - }; - - if let Some(guard) = self - .try_lock_for_read(&mut cache_key, &mut Some(permit)) - .await - { - if let CacheKey::MaterializedPage { - hash_key: _, - lsn: available_lsn, - } = cache_key - { - if available_lsn == lsn { - crate::metrics::PAGE_CACHE - .for_ctx(ctx) - .read_hits_materialized_page_exact - .inc(); - } else { - crate::metrics::PAGE_CACHE - .for_ctx(ctx) - .read_hits_materialized_page_older_lsn - .inc(); - } - Some((available_lsn, guard)) - } else { - panic!("unexpected key type in slot"); - } - } else { - None - } - } - - /// - /// Store an image of the given page in the cache. - /// - pub async fn memorize_materialized_page( - &self, - tenant_shard_id: TenantShardId, - timeline_id: TimelineId, - key: Key, - lsn: Lsn, - img: &[u8], - ) -> anyhow::Result<()> { - let cache_key = CacheKey::MaterializedPage { - hash_key: MaterializedPageHashKey { - tenant_shard_id, - timeline_id, - key, - }, - lsn, - }; - - let mut permit = Some(self.try_get_pinned_slot_permit().await?); - loop { - // First check if the key already exists in the cache. - if let Some(slot_idx) = self.search_mapping_exact(&cache_key) { - // The page was found in the mapping. Lock the slot, and re-check - // that it's still what we expected (because we don't released the mapping - // lock already, another thread could have evicted the page) - let slot = &self.slots[slot_idx]; - let inner = slot.inner.write().await; - if inner.key.as_ref() == Some(&cache_key) { - slot.inc_usage_count(); - debug_assert!( - { - let guard = inner.permit.lock().unwrap(); - guard.upgrade().is_none() - }, - "we hold a write lock, so, no one else should have a permit" - ); - debug_assert_eq!(inner.buf.len(), img.len()); - // We already had it in cache. Another thread must've put it there - // concurrently. Check that it had the same contents that we - // replayed. - assert!(inner.buf == img); - return Ok(()); - } - } - debug_assert!(permit.is_some()); - - // Not found. Find a victim buffer - let (slot_idx, mut inner) = self - .find_victim(permit.as_ref().unwrap()) - .await - .context("Failed to find evict victim")?; - - // Insert mapping for this. At this point, we may find that another - // thread did the same thing concurrently. In that case, we evicted - // our victim buffer unnecessarily. Put it into the free list and - // continue with the slot that the other thread chose. - if let Some(_existing_slot_idx) = self.try_insert_mapping(&cache_key, slot_idx) { - // TODO: put to free list - - // We now just loop back to start from beginning. This is not - // optimal, we'll perform the lookup in the mapping again, which - // is not really necessary because we already got - // 'existing_slot_idx'. But this shouldn't happen often enough - // to matter much. - continue; - } - - // Make the slot ready - let slot = &self.slots[slot_idx]; - inner.key = Some(cache_key.clone()); - slot.set_usage_count(1); - // Create a write guard for the slot so we go through the expected motions. - debug_assert!( - { - let guard = inner.permit.lock().unwrap(); - guard.upgrade().is_none() - }, - "we hold a write lock, so, no one else should have a permit" - ); - let mut write_guard = PageWriteGuard { - state: PageWriteGuardState::Invalid { - _permit: permit.take().unwrap(), - inner, - }, - }; - write_guard.copy_from_slice(img); - let _ = write_guard.mark_valid(); - return Ok(()); - } - } - - // Section 1.2: Public interface functions for working with immutable file pages. - pub async fn read_immutable_buf( &self, file_id: FileId, blkno: u32, ctx: &RequestContext, ) -> anyhow::Result { - let mut cache_key = CacheKey::ImmutableFilePage { file_id, blkno }; - - self.lock_for_read(&mut cache_key, ctx).await + self.lock_for_read(&(CacheKey::ImmutableFilePage { file_id, blkno }), ctx) + .await } // @@ -573,19 +368,11 @@ impl PageCache { /// Look up a page in the cache. /// - /// If the search criteria is not exact, *cache_key is updated with the key - /// for exact key of the returned page. (For materialized pages, that means - /// that the LSN in 'cache_key' is updated with the LSN of the returned page - /// version.) - /// - /// If no page is found, returns None and *cache_key is left unmodified. - /// async fn try_lock_for_read( &self, - cache_key: &mut CacheKey, + cache_key: &CacheKey, permit: &mut Option, ) -> Option { - let cache_key_orig = cache_key.clone(); if let Some(slot_idx) = self.search_mapping(cache_key) { // The page was found in the mapping. Lock the slot, and re-check // that it's still what we expected (because we released the mapping @@ -598,9 +385,6 @@ impl PageCache { _permit: inner.coalesce_readers_permit(permit.take().unwrap()), slot_guard: inner, }); - } else { - // search_mapping might have modified the search key; restore it. - *cache_key = cache_key_orig; } } None @@ -637,15 +421,12 @@ impl PageCache { /// async fn lock_for_read( &self, - cache_key: &mut CacheKey, + cache_key: &CacheKey, ctx: &RequestContext, ) -> anyhow::Result { let mut permit = Some(self.try_get_pinned_slot_permit().await?); let (read_access, hit) = match cache_key { - CacheKey::MaterializedPage { .. } => { - unreachable!("Materialized pages use lookup_materialized_page") - } CacheKey::ImmutableFilePage { .. } => ( &crate::metrics::PAGE_CACHE .for_ctx(ctx) @@ -717,52 +498,15 @@ impl PageCache { /// Search for a page in the cache using the given search key. /// - /// Returns the slot index, if any. If the search criteria is not exact, - /// *cache_key is updated with the actual key of the found page. + /// Returns the slot index, if any. /// /// NOTE: We don't hold any lock on the mapping on return, so the slot might /// get recycled for an unrelated page immediately after this function /// returns. The caller is responsible for re-checking that the slot still /// contains the page with the same key before using it. /// - fn search_mapping(&self, cache_key: &mut CacheKey) -> Option { + fn search_mapping(&self, cache_key: &CacheKey) -> Option { match cache_key { - CacheKey::MaterializedPage { hash_key, lsn } => { - let map = self.materialized_page_map.read().unwrap(); - let versions = map.get(hash_key)?; - - let version_idx = match versions.binary_search_by_key(lsn, |v| v.lsn) { - Ok(version_idx) => version_idx, - Err(0) => return None, - Err(version_idx) => version_idx - 1, - }; - let version = &versions[version_idx]; - *lsn = version.lsn; - Some(version.slot_idx) - } - CacheKey::ImmutableFilePage { file_id, blkno } => { - let map = self.immutable_page_map.read().unwrap(); - Some(*map.get(&(*file_id, *blkno))?) - } - } - } - - /// Search for a page in the cache using the given search key. - /// - /// Like 'search_mapping, but performs an "exact" search. Used for - /// allocating a new buffer. - fn search_mapping_exact(&self, key: &CacheKey) -> Option { - match key { - CacheKey::MaterializedPage { hash_key, lsn } => { - let map = self.materialized_page_map.read().unwrap(); - let versions = map.get(hash_key)?; - - if let Ok(version_idx) = versions.binary_search_by_key(lsn, |v| v.lsn) { - Some(versions[version_idx].slot_idx) - } else { - None - } - } CacheKey::ImmutableFilePage { file_id, blkno } => { let map = self.immutable_page_map.read().unwrap(); Some(*map.get(&(*file_id, *blkno))?) @@ -775,27 +519,6 @@ impl PageCache { /// fn remove_mapping(&self, old_key: &CacheKey) { match old_key { - CacheKey::MaterializedPage { - hash_key: old_hash_key, - lsn: old_lsn, - } => { - let mut map = self.materialized_page_map.write().unwrap(); - if let Entry::Occupied(mut old_entry) = map.entry(old_hash_key.clone()) { - let versions = old_entry.get_mut(); - - if let Ok(version_idx) = versions.binary_search_by_key(old_lsn, |v| v.lsn) { - versions.remove(version_idx); - self.size_metrics - .current_bytes_materialized_page - .sub_page_sz(1); - if versions.is_empty() { - old_entry.remove_entry(); - } - } - } else { - panic!("could not find old key in mapping") - } - } CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); map.remove(&(*file_id, *blkno)) @@ -812,30 +535,6 @@ impl PageCache { /// of the existing mapping and leaves it untouched. fn try_insert_mapping(&self, new_key: &CacheKey, slot_idx: usize) -> Option { match new_key { - CacheKey::MaterializedPage { - hash_key: new_key, - lsn: new_lsn, - } => { - let mut map = self.materialized_page_map.write().unwrap(); - let versions = map.entry(new_key.clone()).or_default(); - match versions.binary_search_by_key(new_lsn, |v| v.lsn) { - Ok(version_idx) => Some(versions[version_idx].slot_idx), - Err(version_idx) => { - versions.insert( - version_idx, - Version { - lsn: *new_lsn, - slot_idx, - }, - ); - self.size_metrics - .current_bytes_materialized_page - .add_page_sz(1); - None - } - } - } - CacheKey::ImmutableFilePage { file_id, blkno } => { let mut map = self.immutable_page_map.write().unwrap(); match map.entry((*file_id, *blkno)) { @@ -949,7 +648,6 @@ impl PageCache { let size_metrics = &crate::metrics::PAGE_CACHE_SIZE; size_metrics.max_bytes.set_page_sz(num_pages); size_metrics.current_bytes_immutable.set_page_sz(0); - size_metrics.current_bytes_materialized_page.set_page_sz(0); let slots = page_buffer .chunks_exact_mut(PAGE_SZ) @@ -968,7 +666,6 @@ impl PageCache { .collect(); Self { - materialized_page_map: Default::default(), immutable_page_map: Default::default(), slots, next_evict_slot: AtomicUsize::new(0), diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index ae389826d5fc..ebc23e89458e 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -935,7 +935,7 @@ impl PageServerHandler { let timeline = self .get_active_tenant_timeline(tenant_shard_id.tenant_id, timeline_id, shard_selector) .await?; - let lease = timeline.make_lsn_lease(lsn, ctx)?; + let lease = timeline.make_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)?; let valid_until = lease .valid_until .duration_since(SystemTime::UNIX_EPOCH) diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index 7b30c3ecf75a..5a334d029035 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -240,6 +240,7 @@ pub struct GcResult { pub layers_needed_by_cutoff: u64, pub layers_needed_by_pitr: u64, pub layers_needed_by_branches: u64, + pub layers_needed_by_leases: u64, pub layers_not_updated: u64, pub layers_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files. @@ -269,6 +270,7 @@ impl AddAssign for GcResult { self.layers_needed_by_pitr += other.layers_needed_by_pitr; self.layers_needed_by_cutoff += other.layers_needed_by_cutoff; self.layers_needed_by_branches += other.layers_needed_by_branches; + self.layers_needed_by_leases += other.layers_needed_by_leases; self.layers_not_updated += other.layers_not_updated; self.layers_removed += other.layers_removed; diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 801321e36df8..ace95af10ac3 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -31,6 +31,7 @@ use remote_storage::DownloadError; use remote_storage::GenericRemoteStorage; use remote_storage::TimeoutOrCancel; use std::fmt; +use std::time::SystemTime; use storage_broker::BrokerClientChannel; use tokio::io::BufReader; use tokio::sync::watch; @@ -65,9 +66,9 @@ use self::timeline::uninit::TimelineCreateGuard; use self::timeline::uninit::TimelineExclusionError; use self::timeline::uninit::UninitializedTimeline; use self::timeline::EvictionTaskTenantState; +use self::timeline::GcCutoffs; use self::timeline::TimelineResources; use self::timeline::WaitLsnError; -use self::timeline::{GcCutoffs, GcInfo}; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; use crate::deletion_queue::DeletionQueueClient; @@ -2428,6 +2429,13 @@ impl Tenant { } } + pub fn get_lsn_lease_length(&self) -> Duration { + let tenant_conf = self.tenant_conf.load().tenant_conf.clone(); + tenant_conf + .lsn_lease_length + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length) + } + pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) { // Use read-copy-update in order to avoid overwriting the location config // state if this races with [`Tenant::set_new_location_config`]. Note that @@ -3010,12 +3018,13 @@ impl Tenant { { let mut target = timeline.gc_info.write().unwrap(); + let now = SystemTime::now(); + target.leases.retain(|_, lease| !lease.is_expired(&now)); + match gc_cutoffs.remove(&timeline.timeline_id) { Some(cutoffs) => { - *target = GcInfo { - retain_lsns: branchpoints, - cutoffs, - }; + target.retain_lsns = branchpoints; + target.cutoffs = cutoffs; } None => { // reasons for this being unavailable: @@ -3833,6 +3842,8 @@ pub(crate) mod harness { tenant_conf.image_layer_creation_check_threshold, ), switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy), + lsn_lease_length: Some(tenant_conf.lsn_lease_length), + lsn_lease_length_for_ts: Some(tenant_conf.lsn_lease_length_for_ts), } } } @@ -3895,7 +3906,9 @@ pub(crate) mod harness { let remote_fs_dir = conf.workdir.join("localfs"); std::fs::create_dir_all(&remote_fs_dir).unwrap(); let config = RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()), + storage: RemoteStorageKind::LocalFs { + local_path: remote_fs_dir.clone(), + }, timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, }; let remote_storage = GenericRemoteStorage::from_config(&config).unwrap(); @@ -6939,4 +6952,93 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_lsn_lease() -> anyhow::Result<()> { + let (tenant, ctx) = TenantHarness::create("test_lsn_lease")?.load().await; + let key = Key::from_hex("010000000033333333444444445500000000").unwrap(); + + let end_lsn = Lsn(0x100); + let image_layers = (0x20..=0x90) + .step_by(0x10) + .map(|n| { + ( + Lsn(n), + vec![(key, test_img(&format!("data key at {:x}", n)))], + ) + }) + .collect(); + + let timeline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + Vec::new(), + image_layers, + end_lsn, + ) + .await?; + + let leased_lsns = [0x30, 0x50, 0x70]; + let mut leases = Vec::new(); + let _: anyhow::Result<_> = leased_lsns.iter().try_for_each(|n| { + leases.push(timeline.make_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)?); + Ok(()) + }); + + // Renewing with shorter lease should not change the lease. + let updated_lease_0 = + timeline.make_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)?; + assert_eq!(updated_lease_0.valid_until, leases[0].valid_until); + + // Renewing with a long lease should renew lease with later expiration time. + let updated_lease_1 = timeline.make_lsn_lease( + Lsn(leased_lsns[1]), + timeline.get_lsn_lease_length() * 2, + &ctx, + )?; + + assert!(updated_lease_1.valid_until > leases[1].valid_until); + + // Force set disk consistent lsn so we can get the cutoff at `end_lsn`. + info!( + "latest_gc_cutoff_lsn: {}", + *timeline.get_latest_gc_cutoff_lsn() + ); + timeline.force_set_disk_consistent_lsn(end_lsn); + + let res = tenant + .gc_iteration( + Some(TIMELINE_ID), + 0, + Duration::ZERO, + &CancellationToken::new(), + &ctx, + ) + .await?; + + // Keeping everything <= Lsn(0x80) b/c leases: + // 0/10: initdb layer + // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline. + assert_eq!(res.layers_needed_by_leases, 7); + // Keeping 0/90 b/c it is the latest layer. + assert_eq!(res.layers_not_updated, 1); + // Removed 0/80. + assert_eq!(res.layers_removed, 1); + + // Make lease on a already GC-ed LSN. + // 0/80 does not have a valid lease + is below latest_gc_cutoff + assert!(Lsn(0x80) < *timeline.get_latest_gc_cutoff_lsn()); + let res = timeline.make_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx); + assert!(res.is_err()); + + // Should still be able to renew a currently valid lease + // Assumption: original lease to is still valid for 0/50. + let _ = + timeline.make_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)?; + + Ok(()) + } } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index 342d70595468..1b9be1264207 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -13,6 +13,7 @@ use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::CompactionAlgorithm; use pageserver_api::models::CompactionAlgorithmSettings; use pageserver_api::models::EvictionPolicy; +use pageserver_api::models::LsnLease; use pageserver_api::models::{self, ThrottleConfig}; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; use serde::de::IntoDeserializer; @@ -377,6 +378,16 @@ pub struct TenantConf { /// There is a `last_aux_file_policy` flag which gets persisted in `index_part.json` once the first aux /// file is written. pub switch_aux_file_policy: AuxFilePolicy, + + /// The length for an explicit LSN lease request. + /// Layers needed to reconstruct pages at LSN will not be GC-ed during this interval. + #[serde(with = "humantime_serde")] + pub lsn_lease_length: Duration, + + /// The length for an implicit LSN lease granted as part of `get_lsn_by_timestamp` request. + /// Layers needed to reconstruct pages at LSN will not be GC-ed during this interval. + #[serde(with = "humantime_serde")] + pub lsn_lease_length_for_ts: Duration, } /// Same as TenantConf, but this struct preserves the information about @@ -476,6 +487,16 @@ pub struct TenantConfOpt { #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] pub switch_aux_file_policy: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + #[serde(default)] + pub lsn_lease_length: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + #[serde(default)] + pub lsn_lease_length_for_ts: Option, } impl TenantConfOpt { @@ -538,6 +559,12 @@ impl TenantConfOpt { switch_aux_file_policy: self .switch_aux_file_policy .unwrap_or(global_conf.switch_aux_file_policy), + lsn_lease_length: self + .lsn_lease_length + .unwrap_or(global_conf.lsn_lease_length), + lsn_lease_length_for_ts: self + .lsn_lease_length_for_ts + .unwrap_or(global_conf.lsn_lease_length_for_ts), } } } @@ -582,6 +609,8 @@ impl Default for TenantConf { timeline_get_throttle: crate::tenant::throttle::Config::disabled(), image_layer_creation_check_threshold: DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD, switch_aux_file_policy: AuxFilePolicy::default_tenant_config(), + lsn_lease_length: LsnLease::DEFAULT_LENGTH, + lsn_lease_length_for_ts: LsnLease::DEFAULT_LENGTH_FOR_TS, } } } @@ -657,6 +686,8 @@ impl From for models::TenantConfig { timeline_get_throttle: value.timeline_get_throttle.map(ThrottleConfig::from), image_layer_creation_check_threshold: value.image_layer_creation_check_threshold, switch_aux_file_policy: value.switch_aux_file_policy, + lsn_lease_length: value.lsn_lease_length.map(humantime), + lsn_lease_length_for_ts: value.lsn_lease_length_for_ts.map(humantime), } } } diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 8b36aa15e585..d9da3157b7fb 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -6,25 +6,23 @@ use pageserver_api::{models::TenantState, shard::TenantShardId}; use remote_storage::{GenericRemoteStorage, RemotePath, TimeoutOrCancel}; use tokio::sync::OwnedMutexGuard; use tokio_util::sync::CancellationToken; -use tracing::{error, instrument, Instrument}; +use tracing::{error, Instrument}; use utils::{backoff, completion, crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; use crate::{ config::PageServerConf, context::RequestContext, - task_mgr::{self, TaskKind}, + task_mgr::{self}, tenant::{ mgr::{TenantSlot, TenantsMapRemoveResult}, remote_timeline_client::remote_heatmap_path, - timeline::ShutdownMode, }, }; use super::{ mgr::{GetTenantError, TenantSlotError, TenantSlotUpsertError, TenantsMap}, remote_timeline_client::{FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD}, - span, timeline::delete::DeleteTimelineFlow, tree_sort_timelines, DeleteTimelineError, Tenant, TenantPreload, }; @@ -34,15 +32,6 @@ pub(crate) enum DeleteTenantError { #[error("GetTenant {0}")] Get(#[from] GetTenantError), - #[error("Tenant not attached")] - NotAttached, - - #[error("Invalid state {0}. Expected Active or Broken")] - InvalidState(TenantState), - - #[error("Tenant deletion is already in progress")] - AlreadyInProgress, - #[error("Tenant map slot error {0}")] SlotError(#[from] TenantSlotError), @@ -74,56 +63,6 @@ fn remote_tenant_delete_mark_path( Ok(tenant_remote_path.join(Utf8Path::new("timelines/deleted"))) } -async fn create_remote_delete_mark( - conf: &PageServerConf, - remote_storage: &GenericRemoteStorage, - tenant_shard_id: &TenantShardId, - cancel: &CancellationToken, -) -> Result<(), DeleteTenantError> { - let remote_mark_path = remote_tenant_delete_mark_path(conf, tenant_shard_id)?; - - let data: &[u8] = &[]; - backoff::retry( - || async { - let data = bytes::Bytes::from_static(data); - let stream = futures::stream::once(futures::future::ready(Ok(data))); - remote_storage - .upload(stream, 0, &remote_mark_path, None, cancel) - .await - }, - TimeoutOrCancel::caused_by_cancel, - FAILED_UPLOAD_WARN_THRESHOLD, - FAILED_REMOTE_OP_RETRIES, - "mark_upload", - cancel, - ) - .await - .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel)) - .and_then(|x| x) - .context("mark_upload")?; - - Ok(()) -} - -async fn create_local_delete_mark( - conf: &PageServerConf, - tenant_shard_id: &TenantShardId, -) -> Result<(), DeleteTenantError> { - let marker_path = conf.tenant_deleted_mark_file_path(tenant_shard_id); - - // Note: we're ok to replace existing file. - let _ = std::fs::OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&marker_path) - .with_context(|| format!("could not create delete marker file {marker_path:?}"))?; - - crashsafe::fsync_file_and_parent(&marker_path).context("sync_mark")?; - - Ok(()) -} - async fn schedule_ordered_timeline_deletions( tenant: &Arc, ) -> Result>, TimelineId)>, DeleteTenantError> { @@ -262,21 +201,6 @@ async fn cleanup_remaining_fs_traces( Ok(()) } -/// Orchestrates tenant shut down of all tasks, removes its in-memory structures, -/// and deletes its data from both disk and s3. -/// The sequence of steps: -/// 1. Upload remote deletion mark. -/// 2. Create local mark file. -/// 3. Shutdown tasks -/// 4. Run ordered timeline deletions -/// 5. Wait for timeline deletion operations that were scheduled before tenant deletion was requested -/// 6. Remove remote mark -/// 7. Cleanup remaining fs traces, tenant dir, config, timelines dir, local delete mark -/// It is resumable from any step in case a crash/restart occurs. -/// There are two entrypoints to the process: -/// 1. [`DeleteTenantFlow::run`] this is the main one called by a management api handler. -/// 2. [`DeleteTenantFlow::resume_from_attach`] is called when deletion is resumed tenant is found to be deleted during attach process. -/// Note the only other place that messes around timeline delete mark is the `Tenant::spawn_load` function. #[derive(Default)] pub enum DeleteTenantFlow { #[default] @@ -286,91 +210,6 @@ pub enum DeleteTenantFlow { } impl DeleteTenantFlow { - // These steps are run in the context of management api request handler. - // Long running steps are continued to run in the background. - // NB: If this fails half-way through, and is retried, the retry will go through - // all the same steps again. Make sure the code here is idempotent, and don't - // error out if some of the shutdown tasks have already been completed! - // NOTE: static needed for background part. - // We assume that calling code sets up the span with tenant_id. - #[instrument(skip_all)] - pub(crate) async fn run( - conf: &'static PageServerConf, - remote_storage: GenericRemoteStorage, - tenants: &'static std::sync::RwLock, - tenant: Arc, - cancel: &CancellationToken, - ) -> Result<(), DeleteTenantError> { - span::debug_assert_current_span_has_tenant_id(); - - pausable_failpoint!("tenant-delete-before-run"); - - let mut guard = Self::prepare(&tenant).await?; - - if let Err(e) = Self::run_inner(&mut guard, conf, &remote_storage, &tenant, cancel).await { - tenant.set_broken(format!("{e:#}")).await; - return Err(e); - } - - Self::schedule_background(guard, conf, remote_storage, tenants, tenant); - - Ok(()) - } - - // Helper function needed to be able to match once on returned error and transition tenant into broken state. - // This is needed because tenant.shutwodn is not idempotent. If tenant state is set to stopping another call to tenant.shutdown - // will result in an error, but here we need to be able to retry shutdown when tenant deletion is retried. - // So the solution is to set tenant state to broken. - async fn run_inner( - guard: &mut OwnedMutexGuard, - conf: &'static PageServerConf, - remote_storage: &GenericRemoteStorage, - tenant: &Tenant, - cancel: &CancellationToken, - ) -> Result<(), DeleteTenantError> { - guard.mark_in_progress()?; - - fail::fail_point!("tenant-delete-before-create-remote-mark", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-create-remote-mark" - ))? - }); - - create_remote_delete_mark(conf, remote_storage, &tenant.tenant_shard_id, cancel) - .await - .context("remote_mark")?; - - fail::fail_point!("tenant-delete-before-create-local-mark", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-create-local-mark" - ))? - }); - - create_local_delete_mark(conf, &tenant.tenant_shard_id) - .await - .context("local delete mark")?; - - fail::fail_point!("tenant-delete-before-background", |_| { - Err(anyhow::anyhow!( - "failpoint: tenant-delete-before-background" - ))? - }); - - Ok(()) - } - - fn mark_in_progress(&mut self) -> anyhow::Result<()> { - match self { - Self::Finished => anyhow::bail!("Bug. Is in finished state"), - Self::InProgress { .. } => { /* We're in a retry */ } - Self::NotStarted => { /* Fresh start */ } - } - - *self = Self::InProgress; - - Ok(()) - } - pub(crate) async fn should_resume_deletion( conf: &'static PageServerConf, remote_mark_exists: bool, @@ -428,79 +267,6 @@ impl DeleteTenantFlow { .await } - /// Check whether background deletion of this tenant is currently in progress - pub(crate) fn is_in_progress(tenant: &Tenant) -> bool { - tenant.delete_progress.try_lock().is_err() - } - - async fn prepare( - tenant: &Arc, - ) -> Result, DeleteTenantError> { - // FIXME: unsure about active only. Our init jobs may not be cancellable properly, - // so at least for now allow deletions only for active tenants. TODO recheck - // Broken and Stopping is needed for retries. - if !matches!( - tenant.current_state(), - TenantState::Active | TenantState::Broken { .. } - ) { - return Err(DeleteTenantError::InvalidState(tenant.current_state())); - } - - let guard = Arc::clone(&tenant.delete_progress) - .try_lock_owned() - .map_err(|_| DeleteTenantError::AlreadyInProgress)?; - - fail::fail_point!("tenant-delete-before-shutdown", |_| { - Err(anyhow::anyhow!("failpoint: tenant-delete-before-shutdown"))? - }); - - // make pageserver shutdown not to wait for our completion - let (_, progress) = completion::channel(); - - // It would be good to only set stopping here and continue shutdown in the background, but shutdown is not idempotent. - // i e it is an error to do: - // tenant.set_stopping - // tenant.shutdown - // Its also bad that we're holding tenants.read here. - // TODO relax set_stopping to be idempotent? - if tenant.shutdown(progress, ShutdownMode::Hard).await.is_err() { - return Err(DeleteTenantError::Other(anyhow::anyhow!( - "tenant shutdown is already in progress" - ))); - } - - Ok(guard) - } - - fn schedule_background( - guard: OwnedMutexGuard, - conf: &'static PageServerConf, - remote_storage: GenericRemoteStorage, - tenants: &'static std::sync::RwLock, - tenant: Arc, - ) { - let tenant_shard_id = tenant.tenant_shard_id; - - task_mgr::spawn( - task_mgr::BACKGROUND_RUNTIME.handle(), - TaskKind::TimelineDeletionWorker, - Some(tenant_shard_id), - None, - "tenant_delete", - false, - async move { - if let Err(err) = - Self::background(guard, conf, remote_storage, tenants, &tenant).await - { - error!("Error: {err:#}"); - tenant.set_broken(format!("{err:#}")).await; - }; - Ok(()) - } - .instrument(tracing::info_span!(parent: None, "delete_tenant", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())), - ); - } - async fn background( mut guard: OwnedMutexGuard, conf: &PageServerConf, @@ -580,8 +346,6 @@ impl DeleteTenantFlow { .context("cleanup_remaining_fs_traces")?; { - pausable_failpoint!("tenant-delete-before-map-remove"); - // This block is simply removing the TenantSlot for this tenant. It requires a loop because // we might conflict with a TenantSlot::InProgress marker and need to wait for it. // diff --git a/pageserver/src/tenant/disk_btree.rs b/pageserver/src/tenant/disk_btree.rs index 6d85d1e60ed0..119df3e6c408 100644 --- a/pageserver/src/tenant/disk_btree.rs +++ b/pageserver/src/tenant/disk_btree.rs @@ -22,7 +22,7 @@ use async_stream::try_stream; use byteorder::{ReadBytesExt, BE}; use bytes::{BufMut, Bytes, BytesMut}; use either::Either; -use futures::Stream; +use futures::{Stream, StreamExt}; use hex; use std::{ cmp::Ordering, @@ -259,6 +259,16 @@ where Ok(result) } + pub fn iter<'a>( + &'a self, + start_key: &'a [u8; L], + ctx: &'a RequestContext, + ) -> DiskBtreeIterator<'a> { + DiskBtreeIterator { + stream: Box::pin(self.get_stream_from(start_key, ctx)), + } + } + /// Return a stream which yields all key, value pairs from the index /// starting from the first key greater or equal to `start_key`. /// @@ -496,6 +506,19 @@ where } } +pub struct DiskBtreeIterator<'a> { + #[allow(clippy::type_complexity)] + stream: std::pin::Pin< + Box, u64), DiskBtreeError>> + 'a>, + >, +} + +impl<'a> DiskBtreeIterator<'a> { + pub async fn next(&mut self) -> Option, u64), DiskBtreeError>> { + self.stream.next().await + } +} + /// /// Public builder object, for creating a new tree. /// @@ -1088,6 +1111,17 @@ pub(crate) mod tests { == all_data.get(&u128::MAX).cloned() ); + // Test iterator and get_stream API + let mut iter = reader.iter(&[0; 16], &ctx); + let mut cnt = 0; + while let Some(res) = iter.next().await { + let (key, val) = res?; + let key = u128::from_be_bytes(key.as_slice().try_into().unwrap()); + assert_eq!(val, *all_data.get(&key).unwrap()); + cnt += 1; + } + assert_eq!(cnt, all_data.len()); + Ok(()) } diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 4520bb929596..326086a3ccdf 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -3,7 +3,6 @@ use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf}; use futures::StreamExt; -use hyper::StatusCode; use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::models::LocationConfigMode; @@ -27,8 +26,7 @@ use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use tracing::*; -use remote_storage::GenericRemoteStorage; -use utils::{completion, crashsafe}; +use utils::{backoff, completion, crashsafe}; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; @@ -42,12 +40,11 @@ use crate::task_mgr::{self, TaskKind}; use crate::tenant::config::{ AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig, }; -use crate::tenant::delete::DeleteTenantFlow; use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::storage_layer::inmemory_layer; use crate::tenant::timeline::ShutdownMode; use crate::tenant::{AttachedTenantConf, GcError, SpawnMode, Tenant, TenantState}; -use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME, TEMP_FILE_SUFFIX}; +use crate::{InitializationOrder, TEMP_FILE_SUFFIX}; use utils::crashsafe::path_with_suffix_extension; use utils::fs_ext::PathExt; @@ -422,12 +419,6 @@ fn load_tenant_config( } }; - let tenant_ignore_mark_file = tenant_dir_path.join(IGNORED_TENANT_FILE_NAME); - if tenant_ignore_mark_file.exists() { - info!("Found an ignore mark file {tenant_ignore_mark_file:?}, skipping the tenant"); - return Ok(None); - } - Ok(Some(( tenant_shard_id, Tenant::load_tenant_config(conf, &tenant_shard_id), @@ -713,12 +704,6 @@ fn tenant_spawn( "Cannot load tenant from empty directory {tenant_path:?}" ); - let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id); - anyhow::ensure!( - !conf.tenant_ignore_mark_file_path(&tenant_shard_id).exists(), - "Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}" - ); - let remote_storage = resources.remote_storage.clone(); let tenant = match Tenant::spawn( conf, @@ -1067,7 +1052,7 @@ impl TenantManager { // not do significant I/O, and shutdowns should be prompt via cancellation tokens. let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any) .map_err(|e| match e { - TenantSlotError::AlreadyExists(_, _) | TenantSlotError::NotFound(_) => { + TenantSlotError::NotFound(_) => { unreachable!("Called with mode Any") } TenantSlotError::InProgress => UpsertLocationError::InProgress, @@ -1367,56 +1352,10 @@ impl TenantManager { } } - pub(crate) async fn delete_tenant( + async fn delete_tenant_remote( &self, tenant_shard_id: TenantShardId, - activation_timeout: Duration, - ) -> Result { - super::span::debug_assert_current_span_has_tenant_id(); - // We acquire a SlotGuard during this function to protect against concurrent - // changes while the ::prepare phase of DeleteTenantFlow executes, but then - // have to return the Tenant to the map while the background deletion runs. - // - // TODO: refactor deletion to happen outside the lifetime of a Tenant. - // Currently, deletion requires a reference to the tenants map in order to - // keep the Tenant in the map until deletion is complete, and then remove - // it at the end. - // - // See https://github.com/neondatabase/neon/issues/5080 - - // Tenant deletion can happen two ways: - // - Legacy: called on an attached location. The attached Tenant object stays alive in Stopping - // state until deletion is complete. - // - New: called on a pageserver without an attached location. We proceed with deletion from - // remote storage. - // - // See https://github.com/neondatabase/neon/issues/5080 for more context on this transition. - - let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; - match &slot_guard.old_value { - Some(TenantSlot::Attached(tenant)) => { - // Legacy deletion flow: the tenant remains attached, goes to Stopping state, and - // deletion will be resumed across restarts. - let tenant = tenant.clone(); - return self - .delete_tenant_attached(slot_guard, tenant, activation_timeout) - .await; - } - Some(TenantSlot::Secondary(secondary_tenant)) => { - secondary_tenant.shutdown().await; - let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id); - let tmp_dir = safe_rename_tenant_dir(&local_tenant_directory) - .await - .with_context(|| { - format!("local tenant directory {local_tenant_directory:?} rename") - })?; - spawn_background_purge(tmp_dir); - } - Some(TenantSlot::InProgress(_)) => unreachable!(), - None => {} - }; - - // Fall through: local state for this tenant is no longer present, proceed with remote delete + ) -> Result<(), DeleteTenantError> { let remote_path = remote_tenant_path(&tenant_shard_id); let keys = match self .resources @@ -1433,7 +1372,7 @@ impl TenantManager { Err(remote_storage::DownloadError::Cancelled) => { return Err(DeleteTenantError::Cancelled) } - Err(remote_storage::DownloadError::NotFound) => return Ok(StatusCode::NOT_FOUND), + Err(remote_storage::DownloadError::NotFound) => return Ok(()), Err(other) => return Err(DeleteTenantError::Other(anyhow::anyhow!(other))), }; @@ -1447,60 +1386,83 @@ impl TenantManager { .await?; } - // Callers use 404 as success for deletions, for historical reasons. - Ok(StatusCode::NOT_FOUND) + Ok(()) } - async fn delete_tenant_attached( + /// If a tenant is attached, detach it. Then remove its data from remote storage. + /// + /// A tenant is considered deleted once it is gone from remote storage. It is the caller's + /// responsibility to avoid trying to attach the tenant again or use it any way once deletion + /// has started: this operation is not atomic, and must be retried until it succeeds. + pub(crate) async fn delete_tenant( &self, - slot_guard: SlotGuard, - tenant: Arc, - activation_timeout: Duration, - ) -> Result { - match tenant.current_state() { - TenantState::Broken { .. } | TenantState::Stopping { .. } => { - // If deletion is already in progress, return success (the semantics of this - // function are to rerturn success afterr deletion is spawned in background). - // Otherwise fall through and let [`DeleteTenantFlow`] handle this state. - if DeleteTenantFlow::is_in_progress(&tenant) { - // The `delete_progress` lock is held: deletion is already happening - // in the bacckground - slot_guard.revert(); - return Ok(StatusCode::ACCEPTED); + tenant_shard_id: TenantShardId, + ) -> Result<(), DeleteTenantError> { + super::span::debug_assert_current_span_has_tenant_id(); + + async fn delete_local( + conf: &PageServerConf, + tenant_shard_id: &TenantShardId, + ) -> anyhow::Result<()> { + let local_tenant_directory = conf.tenant_path(tenant_shard_id); + let tmp_dir = safe_rename_tenant_dir(&local_tenant_directory) + .await + .with_context(|| { + format!("local tenant directory {local_tenant_directory:?} rename") + })?; + spawn_background_purge(tmp_dir); + Ok(()) + } + + let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; + match &slot_guard.old_value { + Some(TenantSlot::Attached(tenant)) => { + // Legacy deletion flow: the tenant remains attached, goes to Stopping state, and + // deletion will be resumed across restarts. + let tenant = tenant.clone(); + let (_guard, progress) = utils::completion::channel(); + match tenant.shutdown(progress, ShutdownMode::Hard).await { + Ok(()) => {} + Err(barrier) => { + info!("Shutdown already in progress, waiting for it to complete"); + barrier.wait().await; + } } + delete_local(self.conf, &tenant_shard_id).await?; } - _ => { - tenant - .wait_to_become_active(activation_timeout) - .await - .map_err(|e| match e { - GetActiveTenantError::WillNotBecomeActive(_) - | GetActiveTenantError::Broken(_) => { - DeleteTenantError::InvalidState(tenant.current_state()) - } - GetActiveTenantError::Cancelled => DeleteTenantError::Cancelled, - GetActiveTenantError::NotFound(_) => DeleteTenantError::NotAttached, - GetActiveTenantError::WaitForActiveTimeout { - latest_state: _latest_state, - wait_time: _wait_time, - } => DeleteTenantError::InvalidState(tenant.current_state()), - })?; + Some(TenantSlot::Secondary(secondary_tenant)) => { + secondary_tenant.shutdown().await; + + delete_local(self.conf, &tenant_shard_id).await?; } - } + Some(TenantSlot::InProgress(_)) => unreachable!(), + None => {} + }; - let result = DeleteTenantFlow::run( - self.conf, - self.resources.remote_storage.clone(), - &TENANTS, - tenant, + // Fall through: local state for this tenant is no longer present, proceed with remote delete. + // - We use a retry wrapper here so that common transient S3 errors (e.g. 503, 429) do not result + // in 500 responses to delete requests. + // - We keep the `SlotGuard` during this I/O, so that if a concurrent delete request comes in, it will + // 503/retry, rather than kicking off a wasteful concurrent deletion. + match backoff::retry( + || async move { self.delete_tenant_remote(tenant_shard_id).await }, + |e| match e { + DeleteTenantError::Cancelled => true, + DeleteTenantError::SlotError(_) => { + unreachable!("Remote deletion doesn't touch slots") + } + _ => false, + }, + 1, + 3, + &format!("delete_tenant[tenant_shard_id={tenant_shard_id}]"), &self.cancel, ) - .await; - - // The Tenant goes back into the map in Stopping state, it will eventually be removed by DeleteTenantFLow - slot_guard.revert(); - let () = result?; - Ok(StatusCode::ACCEPTED) + .await + { + Some(r) => r, + None => Err(DeleteTenantError::Cancelled), + } } #[instrument(skip_all, fields(tenant_id=%tenant.get_tenant_shard_id().tenant_id, shard_id=%tenant.get_tenant_shard_id().shard_slug(), new_shard_count=%new_shard_count.literal()))] @@ -1901,17 +1863,10 @@ impl TenantManager { &self, conf: &'static PageServerConf, tenant_shard_id: TenantShardId, - detach_ignored: bool, deletion_queue_client: &DeletionQueueClient, ) -> Result<(), TenantStateError> { let tmp_path = self - .detach_tenant0( - conf, - &TENANTS, - tenant_shard_id, - detach_ignored, - deletion_queue_client, - ) + .detach_tenant0(conf, &TENANTS, tenant_shard_id, deletion_queue_client) .await?; spawn_background_purge(tmp_path); @@ -1923,7 +1878,6 @@ impl TenantManager { conf: &'static PageServerConf, tenants: &std::sync::RwLock, tenant_shard_id: TenantShardId, - detach_ignored: bool, deletion_queue_client: &DeletionQueueClient, ) -> Result { let tenant_dir_rename_operation = |tenant_id_to_clean: TenantShardId| async move { @@ -1946,26 +1900,6 @@ impl TenantManager { // before this tenant is potentially re-attached elsewhere. deletion_queue_client.flush_advisory(); - // Ignored tenants are not present in memory and will bail the removal from memory operation. - // Before returning the error, check for ignored tenant removal case — we only need to clean its local files then. - if detach_ignored - && matches!( - removal_result, - Err(TenantStateError::SlotError(TenantSlotError::NotFound(_))) - ) - { - let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id); - if tenant_ignore_mark.exists() { - info!("Detaching an ignored tenant"); - let tmp_path = tenant_dir_rename_operation(tenant_shard_id) - .await - .with_context(|| { - format!("Ignored tenant {tenant_shard_id} local directory rename") - })?; - return Ok(tmp_path); - } - } - removal_result } @@ -2222,97 +2156,6 @@ pub(crate) enum TenantStateError { Other(#[from] anyhow::Error), } -pub(crate) async fn load_tenant( - conf: &'static PageServerConf, - tenant_id: TenantId, - generation: Generation, - broker_client: storage_broker::BrokerClientChannel, - remote_storage: GenericRemoteStorage, - deletion_queue_client: DeletionQueueClient, - ctx: &RequestContext, -) -> Result<(), TenantMapInsertError> { - // This is a legacy API (replaced by `/location_conf`). It does not support sharding - let tenant_shard_id = TenantShardId::unsharded(tenant_id); - - let slot_guard = - tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustNotExist)?; - let tenant_path = conf.tenant_path(&tenant_shard_id); - - let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id); - if tenant_ignore_mark.exists() { - std::fs::remove_file(&tenant_ignore_mark).with_context(|| { - format!( - "Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading" - ) - })?; - } - - let resources = TenantSharedResources { - broker_client, - remote_storage, - deletion_queue_client, - }; - - let mut location_conf = - Tenant::load_tenant_config(conf, &tenant_shard_id).map_err(TenantMapInsertError::Other)?; - location_conf.attach_in_generation(AttachmentMode::Single, generation); - - Tenant::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await?; - - let shard_identity = location_conf.shard; - let new_tenant = tenant_spawn( - conf, - tenant_shard_id, - &tenant_path, - resources, - AttachedTenantConf::try_from(location_conf)?, - shard_identity, - None, - &TENANTS, - SpawnMode::Eager, - ctx, - ) - .with_context(|| format!("Failed to schedule tenant processing in path {tenant_path:?}"))?; - - slot_guard.upsert(TenantSlot::Attached(new_tenant))?; - Ok(()) -} - -pub(crate) async fn ignore_tenant( - conf: &'static PageServerConf, - tenant_id: TenantId, -) -> Result<(), TenantStateError> { - ignore_tenant0(conf, &TENANTS, tenant_id).await -} - -#[instrument(skip_all, fields(shard_id))] -async fn ignore_tenant0( - conf: &'static PageServerConf, - tenants: &std::sync::RwLock, - tenant_id: TenantId, -) -> Result<(), TenantStateError> { - // This is a legacy API (replaced by `/location_conf`). It does not support sharding - let tenant_shard_id = TenantShardId::unsharded(tenant_id); - tracing::Span::current().record( - "shard_id", - tracing::field::display(tenant_shard_id.shard_slug()), - ); - - remove_tenant_from_memory(tenants, tenant_shard_id, async { - let ignore_mark_file = conf.tenant_ignore_mark_file_path(&tenant_shard_id); - fs::File::create(&ignore_mark_file) - .await - .context("Failed to create ignore mark file") - .and_then(|_| { - crashsafe::fsync_file_and_parent(&ignore_mark_file) - .context("Failed to fsync ignore mark file") - }) - .with_context(|| format!("Failed to crate ignore mark for tenant {tenant_shard_id}"))?; - Ok(()) - }) - .await -} - #[derive(Debug, thiserror::Error)] pub(crate) enum TenantMapListError { #[error("tenant map is still initiailizing")] @@ -2337,10 +2180,6 @@ pub(crate) enum TenantSlotError { #[error("Tenant {0} not found")] NotFound(TenantShardId), - /// When acquiring a slot with the expectation that the tenant does not already exist. - #[error("tenant {0} already exists, state: {1:?}")] - AlreadyExists(TenantShardId, TenantState), - // Tried to read a slot that is currently being mutated by another administrative // operation. #[error("tenant has a state change in progress, try again later")] @@ -2656,8 +2495,6 @@ enum TenantSlotAcquireMode { Any, /// Return an error if trying to acquire a slot and it doesn't already exist MustExist, - /// Return an error if trying to acquire a slot and it already exists - MustNotExist, } fn tenant_map_acquire_slot( @@ -2711,27 +2548,6 @@ fn tenant_map_acquire_slot_impl( tracing::debug!("Occupied, failing for InProgress"); Err(TenantSlotError::InProgress) } - (slot, MustNotExist) => match slot { - TenantSlot::Attached(tenant) => { - tracing::debug!("Attached && MustNotExist, return AlreadyExists"); - Err(TenantSlotError::AlreadyExists( - *tenant_shard_id, - tenant.current_state(), - )) - } - _ => { - // FIXME: the AlreadyExists error assumes that we have a Tenant - // to get the state from - tracing::debug!("Occupied & MustNotExist, return AlreadyExists"); - Err(TenantSlotError::AlreadyExists( - *tenant_shard_id, - TenantState::Broken { - reason: "Present but not attached".to_string(), - backtrace: "".to_string(), - }, - )) - } - }, _ => { // Happy case: the slot was not in any state that violated our mode let (completion, barrier) = utils::completion::channel(); diff --git a/pageserver/src/tenant/size.rs b/pageserver/src/tenant/size.rs index cdd5b0cbe73f..b2338b620ebf 100644 --- a/pageserver/src/tenant/size.rs +++ b/pageserver/src/tenant/size.rs @@ -60,10 +60,6 @@ pub(crate) enum CalculateSyntheticSizeError { #[error(transparent)] Fatal(anyhow::Error), - /// The LSN we are trying to calculate a size at no longer exists at the point we query it - #[error("Could not find size at {lsn} in timeline {timeline_id}")] - LsnNotFound { timeline_id: TimelineId, lsn: Lsn }, - /// Tenant shut down while calculating size #[error("Cancelled")] Cancelled, @@ -375,9 +371,8 @@ pub(super) async fn gather_inputs( /// Augment 'segments' with logical sizes /// -/// this will probably conflict with on-demand downloaded layers, or at least force them all -/// to be downloaded -/// +/// This will leave segments' sizes as None if the Timeline associated with the segment is deleted concurrently +/// (i.e. we cannot read its logical size at a particular LSN). async fn fill_logical_sizes( timelines: &[Arc], segments: &mut [SegmentMeta], @@ -498,8 +493,6 @@ async fn fill_logical_sizes( if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) { seg.segment.size = Some(*size); - } else { - return Err(CalculateSyntheticSizeError::LsnNotFound { timeline_id, lsn }); } } Ok(()) diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index a6dfa84f3598..d679b78f328c 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -346,6 +346,7 @@ async fn gc_loop(tenant: Arc, cancel: CancellationToken) { // cutoff specified as time. let ctx = RequestContext::todo_child(TaskKind::GarbageCollector, DownloadBehavior::Download); + let mut first = true; loop { tokio::select! { @@ -362,6 +363,14 @@ async fn gc_loop(tenant: Arc, cancel: CancellationToken) { if first { first = false; + + if delay_by_lease_length(tenant.get_lsn_lease_length(), &cancel) + .await + .is_err() + { + break; + } + if random_init_delay(period, &cancel).await.is_err() { break; } @@ -531,6 +540,21 @@ pub(crate) async fn random_init_delay( } } +/// Delays GC by defaul lease length at restart. +/// +/// We do this as the leases mapping are not persisted to disk. By delaying GC by default +/// length, we gurantees that all the leases we granted before the restart will expire +/// when we run GC for the first time after the restart. +pub(crate) async fn delay_by_lease_length( + length: Duration, + cancel: &CancellationToken, +) -> Result<(), Cancelled> { + match tokio::time::timeout(length, cancel.cancelled()).await { + Ok(_) => Err(Cancelled), + Err(_) => Ok(()), + } +} + /// Attention: the `task` and `period` beocme labels of a pageserver-wide prometheus metric. pub(crate) fn warn_when_period_overrun( elapsed: Duration, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 08bec329e112..5398ad399c84 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -47,7 +47,6 @@ use utils::{ vec_map::VecMap, }; -use std::ops::{Deref, Range}; use std::pin::pin; use std::sync::atomic::Ordering as AtomicOrdering; use std::sync::{Arc, Mutex, RwLock, Weak}; @@ -61,6 +60,10 @@ use std::{ cmp::{max, min, Ordering}, ops::ControlFlow, }; +use std::{ + collections::btree_map::Entry, + ops::{Deref, Range}, +}; use crate::metrics::GetKind; use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS; @@ -98,9 +101,7 @@ use crate::{ use crate::config::PageServerConf; use crate::keyspace::{KeyPartitioning, KeySpace}; -use crate::metrics::{ - TimelineMetrics, MATERIALIZED_PAGE_CACHE_HIT, MATERIALIZED_PAGE_CACHE_HIT_DIRECT, -}; +use crate::metrics::TimelineMetrics; use crate::pgdatadir_mapping::CalculateLogicalSizeError; use crate::tenant::config::TenantConfOpt; use pageserver_api::reltag::RelTag; @@ -117,7 +118,6 @@ use utils::{ simple_rcu::{Rcu, RcuReadGuard}, }; -use crate::page_cache; use crate::repository::GcResult; use crate::repository::{Key, Value}; use crate::task_mgr; @@ -131,7 +131,7 @@ use self::layer_manager::LayerManager; use self::logical_size::LogicalSize; use self::walreceiver::{WalReceiver, WalReceiverConf}; -use super::{config::TenantConf, storage_layer::VectoredValueReconstructState}; +use super::config::TenantConf; use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf}; use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe}; use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer}; @@ -454,6 +454,9 @@ pub(crate) struct GcInfo { /// The cutoff coordinates, which are combined by selecting the minimum. pub(crate) cutoffs: GcCutoffs, + + /// Leases granted to particular LSNs. + pub(crate) leases: BTreeMap, } impl GcInfo { @@ -881,32 +884,11 @@ impl Timeline { self.timeline_get_throttle.throttle(ctx, 1).await; - // Check the page cache. We will get back the most recent page with lsn <= `lsn`. - // The cached image can be returned directly if there is no WAL between the cached image - // and requested LSN. The cached image can also be used to reduce the amount of WAL needed - // for redo. - let cached_page_img = match self.lookup_cached_page(&key, lsn, ctx).await { - Some((cached_lsn, cached_img)) => { - match cached_lsn.cmp(&lsn) { - Ordering::Less => {} // there might be WAL between cached_lsn and lsn, we need to check - Ordering::Equal => { - MATERIALIZED_PAGE_CACHE_HIT_DIRECT.inc(); - return Ok(cached_img); // exact LSN match, return the image - } - Ordering::Greater => { - unreachable!("the returned lsn should never be after the requested lsn") - } - } - Some((cached_lsn, cached_img)) - } - None => None, - }; - match self.conf.get_impl { GetImpl::Legacy => { let reconstruct_state = ValueReconstructState { records: Vec::new(), - img: cached_page_img, + img: None, }; self.get_impl(key, lsn, reconstruct_state, ctx).await @@ -920,13 +902,6 @@ impl Timeline { // entry returned above. let mut reconstruct_state = ValuesReconstructState::new(); - // Only add the cached image to the reconstruct state when it exists. - if cached_page_img.is_some() { - let mut key_state = VectoredValueReconstructState::default(); - key_state.img = cached_page_img; - reconstruct_state.keys.insert(key, Ok(key_state)); - } - let vectored_res = self .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx) .await; @@ -1555,17 +1530,46 @@ impl Timeline { Ok(()) } - /// Obtains a temporary lease blocking garbage collection for the given LSN + /// Obtains a temporary lease blocking garbage collection for the given LSN. + /// + /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also + /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if + /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and + /// the requesting lease. pub(crate) fn make_lsn_lease( &self, - _lsn: Lsn, + lsn: Lsn, + length: Duration, _ctx: &RequestContext, ) -> anyhow::Result { - const LEASE_LENGTH: Duration = Duration::from_secs(5 * 60); - let lease = LsnLease { - valid_until: SystemTime::now() + LEASE_LENGTH, + let lease = { + let mut gc_info = self.gc_info.write().unwrap(); + + let valid_until = SystemTime::now() + length; + + let entry = gc_info.leases.entry(lsn); + + let lease = { + if let Entry::Occupied(mut occupied) = entry { + let existing_lease = occupied.get_mut(); + if valid_until > existing_lease.valid_until { + existing_lease.valid_until = valid_until; + } + existing_lease.clone() + } else { + // Reject already GC-ed LSN (lsn < latest_gc_cutoff) + let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn(); + if lsn < *latest_gc_cutoff_lsn { + bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn); + } + + entry.or_insert(LsnLease { valid_until }).clone() + } + }; + + lease }; - // TODO: dummy implementation + Ok(lease) } @@ -2082,6 +2086,24 @@ const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10; // Private functions impl Timeline { + pub(crate) fn get_lsn_lease_length(&self) -> Duration { + let tenant_conf = self.tenant_conf.load(); + tenant_conf + .tenant_conf + .lsn_lease_length + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length) + } + + // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072 + #[allow(unused)] + pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration { + let tenant_conf = self.tenant_conf.load(); + tenant_conf + .tenant_conf + .lsn_lease_length_for_ts + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts) + } + pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy { let tenant_conf = self.tenant_conf.load(); tenant_conf @@ -3187,7 +3209,6 @@ impl Timeline { ValueReconstructResult::Continue => { // If we reached an earlier cached page image, we're done. if cont_lsn == cached_lsn + 1 { - MATERIALIZED_PAGE_CACHE_HIT.inc_by(1); return Ok(traversal_path); } if let Some(prev) = prev_lsn { @@ -3561,26 +3582,6 @@ impl Timeline { }) } - /// # Cancel-safety - /// - /// This method is cancellation-safe. - async fn lookup_cached_page( - &self, - key: &Key, - lsn: Lsn, - ctx: &RequestContext, - ) -> Option<(Lsn, Bytes)> { - let cache = page_cache::get(); - - // FIXME: It's pointless to check the cache for things that are not 8kB pages. - // We should look at the key to determine if it's a cacheable object - let (lsn, read_guard) = cache - .lookup_materialized_page(self.tenant_shard_id, self.timeline_id, key, lsn, ctx) - .await?; - let img = Bytes::from(read_guard.to_vec()); - Some((lsn, img)) - } - async fn get_ready_ancestor_timeline( &self, ancestor: &Arc, @@ -4907,13 +4908,25 @@ impl Timeline { return Err(GcError::TimelineCancelled); } - let (horizon_cutoff, pitr_cutoff, retain_lsns) = { + let (horizon_cutoff, pitr_cutoff, retain_lsns, max_lsn_with_valid_lease) = { let gc_info = self.gc_info.read().unwrap(); let horizon_cutoff = min(gc_info.cutoffs.horizon, self.get_disk_consistent_lsn()); let pitr_cutoff = gc_info.cutoffs.pitr; let retain_lsns = gc_info.retain_lsns.clone(); - (horizon_cutoff, pitr_cutoff, retain_lsns) + + // Gets the maximum LSN that holds the valid lease. + // + // Caveat: `refresh_gc_info` is in charged of updating the lease map. + // Here, we do not check for stale leases again. + let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn); + + ( + horizon_cutoff, + pitr_cutoff, + retain_lsns, + max_lsn_with_valid_lease, + ) }; let mut new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff); @@ -4944,7 +4957,13 @@ impl Timeline { .set(Lsn::INVALID.0 as i64); let res = self - .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff) + .gc_timeline( + horizon_cutoff, + pitr_cutoff, + retain_lsns, + max_lsn_with_valid_lease, + new_gc_cutoff, + ) .instrument( info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff), ) @@ -4961,6 +4980,7 @@ impl Timeline { horizon_cutoff: Lsn, pitr_cutoff: Lsn, retain_lsns: Vec, + max_lsn_with_valid_lease: Option, new_gc_cutoff: Lsn, ) -> Result { // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc @@ -5009,7 +5029,8 @@ impl Timeline { // 1. it is older than cutoff LSN; // 2. it is older than PITR interval; // 3. it doesn't need to be retained for 'retain_lsns'; - // 4. newer on-disk image layers cover the layer's whole key range + // 4. it does not need to be kept for LSNs holding valid leases. + // 5. newer on-disk image layers cover the layer's whole key range // // TODO holding a write lock is too agressive and avoidable let mut guard = self.layers.write().await; @@ -5060,7 +5081,21 @@ impl Timeline { } } - // 4. Is there a later on-disk layer for this relation? + // 4. Is there a valid lease that requires us to keep this layer? + if let Some(lsn) = &max_lsn_with_valid_lease { + // keep if layer start <= any of the lease + if &l.get_lsn_range().start <= lsn { + debug!( + "keeping {} because there is a valid lease preventing GC at {}", + l.layer_name(), + lsn, + ); + result.layers_needed_by_leases += 1; + continue 'outer; + } + } + + // 5. Is there a later on-disk layer for this relation? // // The end-LSN is exclusive, while disk_consistent_lsn is // inclusive. For example, if disk_consistent_lsn is 100, it is @@ -5193,8 +5228,6 @@ impl Timeline { trace!("found {} WAL records that will init the page for {} at {}, performing WAL redo", data.records.len(), key, request_lsn); }; - let last_rec_lsn = data.records.last().unwrap().0; - let img = match self .walredo_mgr .as_ref() @@ -5208,23 +5241,6 @@ impl Timeline { Err(e) => return Err(PageReconstructError::WalRedo(e)), }; - if img.len() == page_cache::PAGE_SZ { - let cache = page_cache::get(); - if let Err(e) = cache - .memorize_materialized_page( - self.tenant_shard_id, - self.timeline_id, - key, - last_rec_lsn, - &img, - ) - .await - .context("Materialized page memoization failed") - { - return Err(PageReconstructError::from(e)); - } - } - Ok(img) } } @@ -5438,6 +5454,11 @@ impl Timeline { self.last_record_lsn.advance(new_lsn); } + #[cfg(test)] + pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) { + self.disk_consistent_lsn.store(new_value); + } + /// Force create an image layer and place it into the layer map. /// /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`] diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index 5eae2d8204f0..a665cafafe71 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -381,6 +381,15 @@ pageserver_connect(shardno_t shard_no, int elevel) us_since_last_attempt = (int64) (now - shard->last_reconnect_time); shard->last_reconnect_time = now; + /* + * Make sure we don't do exponential backoff with a constant multiplier + * of 0 us, as that doesn't really do much for timeouts... + * + * cf. https://github.com/neondatabase/neon/issues/7897 + */ + if (shard->delay_us == 0) + shard->delay_us = MIN_RECONNECT_INTERVAL_USEC; + /* * If we did other tasks between reconnect attempts, then we won't * need to wait as long as a full delay. diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 316e23a72eba..da1a6f76f0a2 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -100,17 +100,12 @@ static void StartProposerReplication(WalProposer *wp, StartReplicationCmd *cmd); static void WalSndLoop(WalProposer *wp); static void XLogBroadcastWalProposer(WalProposer *wp); -static void XLogWalPropWrite(WalProposer *wp, char *buf, Size nbytes, XLogRecPtr recptr); -static void XLogWalPropClose(XLogRecPtr recptr); - static void add_nwr_event_set(Safekeeper *sk, uint32 events); static void update_nwr_event_set(Safekeeper *sk, uint32 events); static void rm_safekeeper_event_set(Safekeeper *to_remove, bool is_sk); static void CheckGracefulShutdown(WalProposer *wp); -static XLogRecPtr GetLogRepRestartLSN(WalProposer *wp); - static void init_walprop_config(bool syncSafekeepers) { @@ -1236,8 +1231,6 @@ StartProposerReplication(WalProposer *wp, StartReplicationCmd *cmd) static void WalSndLoop(WalProposer *wp) { - XLogRecPtr flushPtr; - /* Clear any already-pending wakeups */ ResetLatch(MyLatch); @@ -1333,8 +1326,9 @@ XLogBroadcastWalProposer(WalProposer *wp) } /* - Used to download WAL before basebackup for logical walsenders from sk, no longer - needed because walsender always uses neon_walreader. + Used to download WAL before basebackup for walproposer/logical walsenders. No + longer used, replaced by neon_walreader; but callback still exists because + simulation tests use it. */ static bool WalProposerRecovery(WalProposer *wp, Safekeeper *sk) @@ -1342,136 +1336,6 @@ WalProposerRecovery(WalProposer *wp, Safekeeper *sk) return true; } -/* - * These variables are used similarly to openLogFile/SegNo, - * but for walproposer to write the XLOG during recovery. walpropFileTLI is the TimeLineID - * corresponding the filename of walpropFile. - */ -static int walpropFile = -1; -static TimeLineID walpropFileTLI = 0; -static XLogSegNo walpropSegNo = 0; - -/* - * Write XLOG data to disk. - */ -static void -XLogWalPropWrite(WalProposer *wp, char *buf, Size nbytes, XLogRecPtr recptr) -{ - int startoff; - int byteswritten; - - /* - * Apart from walproposer, basebackup LSN page is also written out by - * postgres itself which writes WAL only in pages, and in basebackup it is - * inherently dummy (only safekeepers have historic WAL). Update WAL - * buffers here to avoid dummy page overwriting correct one we download - * here. Ugly, but alternatives are about the same ugly. We won't need - * that if we switch to on-demand WAL download from safekeepers, without - * writing to disk. - * - * https://github.com/neondatabase/neon/issues/5749 - */ - if (!wp->config->syncSafekeepers) - XLogUpdateWalBuffers(buf, recptr, nbytes); - - while (nbytes > 0) - { - int segbytes; - - /* Close the current segment if it's completed */ - if (walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)) - XLogWalPropClose(recptr); - - if (walpropFile < 0) - { -#if PG_VERSION_NUM >= 150000 - /* FIXME Is it ok to use hardcoded value here? */ - TimeLineID tli = 1; -#else - bool use_existent = true; -#endif - /* Create/use new log file */ - XLByteToSeg(recptr, walpropSegNo, wal_segment_size); -#if PG_VERSION_NUM >= 150000 - walpropFile = XLogFileInit(walpropSegNo, tli); - walpropFileTLI = tli; -#else - walpropFile = XLogFileInit(walpropSegNo, &use_existent, false); - walpropFileTLI = ThisTimeLineID; -#endif - } - - /* Calculate the start offset of the received logs */ - startoff = XLogSegmentOffset(recptr, wal_segment_size); - - if (startoff + nbytes > wal_segment_size) - segbytes = wal_segment_size - startoff; - else - segbytes = nbytes; - - /* OK to write the logs */ - errno = 0; - - byteswritten = pg_pwrite(walpropFile, buf, segbytes, (off_t) startoff); - if (byteswritten <= 0) - { - char xlogfname[MAXFNAMELEN]; - int save_errno; - - /* if write didn't set errno, assume no disk space */ - if (errno == 0) - errno = ENOSPC; - - save_errno = errno; - XLogFileName(xlogfname, walpropFileTLI, walpropSegNo, wal_segment_size); - errno = save_errno; - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not write to log segment %s " - "at offset %u, length %lu: %m", - xlogfname, startoff, (unsigned long) segbytes))); - } - - /* Update state for write */ - recptr += byteswritten; - - nbytes -= byteswritten; - buf += byteswritten; - } - - /* - * Close the current segment if it's fully written up in the last cycle of - * the loop. - */ - if (walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)) - { - XLogWalPropClose(recptr); - } -} - -/* - * Close the current segment. - */ -static void -XLogWalPropClose(XLogRecPtr recptr) -{ - Assert(walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)); - - if (close(walpropFile) != 0) - { - char xlogfname[MAXFNAMELEN]; - - XLogFileName(xlogfname, walpropFileTLI, walpropSegNo, wal_segment_size); - - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not close log segment %s: %m", - xlogfname))); - } - - walpropFile = -1; -} - static void walprop_pg_wal_reader_allocate(Safekeeper *sk) { @@ -1987,58 +1851,6 @@ walprop_pg_log_internal(WalProposer *wp, int level, const char *line) elog(FATAL, "unexpected log_internal message at level %d: %s", level, line); } -static XLogRecPtr -GetLogRepRestartLSN(WalProposer *wp) -{ - FILE *f; - XLogRecPtr lrRestartLsn = InvalidXLogRecPtr; - - /* We don't need to do anything in syncSafekeepers mode. */ - if (wp->config->syncSafekeepers) - return InvalidXLogRecPtr; - - /* - * If there are active logical replication subscription we need to provide - * enough WAL for their WAL senders based on th position of their - * replication slots. - */ - f = fopen("restart.lsn", "rb"); - if (f != NULL) - { - size_t rc = fread(&lrRestartLsn, sizeof(lrRestartLsn), 1, f); - - fclose(f); - if (rc == 1 && lrRestartLsn != InvalidXLogRecPtr) - { - uint64 download_range_mb; - - wpg_log(LOG, "logical replication restart LSN %X/%X", LSN_FORMAT_ARGS(lrRestartLsn)); - - /* - * If we need to download more than a max_slot_wal_keep_size, - * don't do it to avoid risk of exploding pg_wal. Logical - * replication won't work until recreated, but at least compute - * would start; this also follows max_slot_wal_keep_size - * semantics. - */ - download_range_mb = (wp->propEpochStartLsn - lrRestartLsn) / MB; - if (max_slot_wal_keep_size_mb > 0 && download_range_mb >= max_slot_wal_keep_size_mb) - { - wpg_log(WARNING, "not downloading WAL for logical replication since %X/%X as max_slot_wal_keep_size=%dMB", - LSN_FORMAT_ARGS(lrRestartLsn), max_slot_wal_keep_size_mb); - return InvalidXLogRecPtr; - } - - /* - * start from the beginning of the segment to fetch page headers - * verifed by XLogReader - */ - lrRestartLsn = lrRestartLsn - XLogSegmentOffset(lrRestartLsn, wal_segment_size); - } - } - return lrRestartLsn; -} - void SetNeonCurrentClusterSize(uint64 size) { diff --git a/poetry.lock b/poetry.lock index 25c0c7398d42..7740388fb8be 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2806,13 +2806,13 @@ files = [ [[package]] name = "urllib3" -version = "1.26.18" +version = "1.26.19" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, + {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, ] [package.extras] diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index 1355b7e1d86b..e72bf199e362 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -543,7 +543,9 @@ mod tests { rx: impl Stream, ) -> Vec<(u64, usize, i64)> { let remote_storage_config = RemoteStorageConfig { - storage: RemoteStorageKind::LocalFs(tmpdir.to_path_buf()), + storage: RemoteStorageKind::LocalFs { + local_path: tmpdir.to_path_buf(), + }, timeout: std::time::Duration::from_secs(120), }; let storage = GenericRemoteStorage::from_config(&remote_storage_config).unwrap(); diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 95b46ae002fe..072f51958f48 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -91,7 +91,7 @@ pub async fn task_main( let endpoint_rate_limiter2 = endpoint_rate_limiter.clone(); connections.spawn(async move { - let (socket, peer_addr) = match read_proxy_protocol(socket).await{ + let (socket, peer_addr) = match read_proxy_protocol(socket).await { Ok((socket, Some(addr))) => (socket, addr.ip()), Err(e) => { error!("per-client task finished with an error: {e:#}"); @@ -101,36 +101,38 @@ pub async fn task_main( error!("missing required client IP"); return; } - Ok((socket, None)) => (socket, peer_addr.ip()) + Ok((socket, None)) => (socket, peer_addr.ip()), }; match socket.inner.set_nodelay(true) { - Ok(()) => {}, + Ok(()) => {} Err(e) => { error!("per-client task finished with an error: failed to set socket option: {e:#}"); return; - }, + } }; let mut ctx = RequestMonitoring::new( - session_id, - peer_addr, - crate::metrics::Protocol::Tcp, - &config.region, - ); + session_id, + peer_addr, + crate::metrics::Protocol::Tcp, + &config.region, + ); let span = ctx.span.clone(); - let res = handle_client( - config, - &mut ctx, - cancellation_handler, - socket, - ClientMode::Tcp, - endpoint_rate_limiter2, - conn_gauge, - ) - .instrument(span.clone()) - .await; + let startup = Box::pin( + handle_client( + config, + &mut ctx, + cancellation_handler, + socket, + ClientMode::Tcp, + endpoint_rate_limiter2, + conn_gauge, + ) + .instrument(span.clone()), + ); + let res = startup.await; match res { Err(e) => { diff --git a/proxy/src/proxy/copy_bidirectional.rs b/proxy/src/proxy/copy_bidirectional.rs index 4b09ebd8dc2c..aaf3688f21a0 100644 --- a/proxy/src/proxy/copy_bidirectional.rs +++ b/proxy/src/proxy/copy_bidirectional.rs @@ -98,7 +98,7 @@ pub(super) struct CopyBuffer { amt: u64, buf: Box<[u8]>, } -const DEFAULT_BUF_SIZE: usize = 8 * 1024; +const DEFAULT_BUF_SIZE: usize = 1024; impl CopyBuffer { pub(super) fn new() -> Self { diff --git a/proxy/src/serverless.rs b/proxy/src/serverless.rs index 24ee749e6e92..efa999ed7d79 100644 --- a/proxy/src/serverless.rs +++ b/proxy/src/serverless.rs @@ -27,14 +27,14 @@ use rand::SeedableRng; pub use reqwest_middleware::{ClientWithMiddleware, Error}; pub use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; use tokio::time::timeout; -use tokio_rustls::TlsAcceptor; +use tokio_rustls::{server::TlsStream, TlsAcceptor}; use tokio_util::task::TaskTracker; use crate::cancellation::CancellationHandlerMain; use crate::config::ProxyConfig; use crate::context::RequestMonitoring; use crate::metrics::Metrics; -use crate::protocol2::read_proxy_protocol; +use crate::protocol2::{read_proxy_protocol, ChainRW}; use crate::proxy::run_until_cancelled; use crate::rate_limiter::EndpointRateLimiter; use crate::serverless::backend::PoolingBackend; @@ -102,8 +102,6 @@ pub async fn task_main( let connections = tokio_util::task::task_tracker::TaskTracker::new(); connections.close(); // allows `connections.wait to complete` - let server = Builder::new(TokioExecutor::new()); - while let Some(res) = run_until_cancelled(ws_listener.accept(), &cancellation_token).await { let (conn, peer_addr) = res.context("could not accept TCP stream")?; if let Err(e) = conn.set_nodelay(true) { @@ -127,24 +125,50 @@ pub async fn task_main( } let conn_token = cancellation_token.child_token(); - let conn = connection_handler( - config, - backend.clone(), - connections.clone(), - cancellation_handler.clone(), - endpoint_rate_limiter.clone(), - conn_token.clone(), - server.clone(), - tls_acceptor.clone(), - conn, - peer_addr, - ) - .instrument(http_conn_span); + let tls_acceptor = tls_acceptor.clone(); + let backend = backend.clone(); + let connections2 = connections.clone(); + let cancellation_handler = cancellation_handler.clone(); + let endpoint_rate_limiter = endpoint_rate_limiter.clone(); + connections.spawn( + async move { + let conn_token2 = conn_token.clone(); + let _cancel_guard = config.http_config.cancel_set.insert(conn_id, conn_token2); - connections.spawn(async move { - let _cancel_guard = config.http_config.cancel_set.insert(conn_id, conn_token); - conn.await - }); + let session_id = uuid::Uuid::new_v4(); + + let _gauge = Metrics::get() + .proxy + .client_connections + .guard(crate::metrics::Protocol::Http); + + let startup_result = Box::pin(connection_startup( + config, + tls_acceptor, + session_id, + conn, + peer_addr, + )) + .await; + let Some((conn, peer_addr)) = startup_result else { + return; + }; + + Box::pin(connection_handler( + config, + backend, + connections2, + cancellation_handler, + endpoint_rate_limiter, + conn_token, + conn, + peer_addr, + session_id, + )) + .await; + } + .instrument(http_conn_span), + ); } connections.wait().await; @@ -152,40 +176,22 @@ pub async fn task_main( Ok(()) } -/// Handles the TCP lifecycle. -/// +/// Handles the TCP startup lifecycle. /// 1. Parses PROXY protocol V2 /// 2. Handles TLS handshake -/// 3. Handles HTTP connection -/// 1. With graceful shutdowns -/// 2. With graceful request cancellation with connection failure -/// 3. With websocket upgrade support. -#[allow(clippy::too_many_arguments)] -async fn connection_handler( - config: &'static ProxyConfig, - backend: Arc, - connections: TaskTracker, - cancellation_handler: Arc, - endpoint_rate_limiter: Arc, - cancellation_token: CancellationToken, - server: Builder, +async fn connection_startup( + config: &ProxyConfig, tls_acceptor: TlsAcceptor, + session_id: uuid::Uuid, conn: TcpStream, peer_addr: SocketAddr, -) { - let session_id = uuid::Uuid::new_v4(); - - let _gauge = Metrics::get() - .proxy - .client_connections - .guard(crate::metrics::Protocol::Http); - +) -> Option<(TlsStream>, IpAddr)> { // handle PROXY protocol let (conn, peer) = match read_proxy_protocol(conn).await { Ok(c) => c, Err(e) => { tracing::error!(?session_id, %peer_addr, "failed to accept TCP connection: invalid PROXY protocol V2 header: {e:#}"); - return; + return None; } }; @@ -208,7 +214,7 @@ async fn connection_handler( Metrics::get().proxy.tls_handshake_failures.inc(); } warn!(?session_id, %peer_addr, "failed to accept TLS connection: {e:?}"); - return; + return None; } // The handshake timed out Err(e) => { @@ -216,16 +222,36 @@ async fn connection_handler( Metrics::get().proxy.tls_handshake_failures.inc(); } warn!(?session_id, %peer_addr, "failed to accept TLS connection: {e:?}"); - return; + return None; } }; + Some((conn, peer_addr)) +} + +/// Handles HTTP connection +/// 1. With graceful shutdowns +/// 2. With graceful request cancellation with connection failure +/// 3. With websocket upgrade support. +#[allow(clippy::too_many_arguments)] +async fn connection_handler( + config: &'static ProxyConfig, + backend: Arc, + connections: TaskTracker, + cancellation_handler: Arc, + endpoint_rate_limiter: Arc, + cancellation_token: CancellationToken, + conn: TlsStream>, + peer_addr: IpAddr, + session_id: uuid::Uuid, +) { let session_id = AtomicTake::new(session_id); // Cancel all current inflight HTTP requests if the HTTP connection is closed. let http_cancellation_token = CancellationToken::new(); let _cancel_connection = http_cancellation_token.clone().drop_guard(); + let server = Builder::new(TokioExecutor::new()); let conn = server.serve_connection_with_upgrades( hyper_util::rt::TokioIo::new(conn), hyper1::service::service_fn(move |req: hyper1::Request| { diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index a40c66a80d3f..86e64c0a386d 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -104,7 +104,7 @@ impl PoolingBackend { ) -> Result, HttpConnError> { let maybe_client = if !force_new { info!("pool: looking for an existing connection"); - self.pool.get(ctx, &conn_info).await? + self.pool.get(ctx, &conn_info)? } else { info!("pool: pool is disabled"); None diff --git a/proxy/src/serverless/conn_pool.rs b/proxy/src/serverless/conn_pool.rs index 5fa253acf86d..170bda062e51 100644 --- a/proxy/src/serverless/conn_pool.rs +++ b/proxy/src/serverless/conn_pool.rs @@ -375,7 +375,7 @@ impl GlobalConnPool { } } - pub async fn get( + pub fn get( self: &Arc, ctx: &mut RequestMonitoring, conn_info: &ConnInfo, diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 9d6a475aebf3..7a99aeb75938 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -533,27 +533,31 @@ async fn handle_inner( return Err(SqlOverHttpError::RequestTooLarge); } - let fetch_and_process_request = async { - let body = request.into_body().collect().await?.to_bytes(); - info!(length = body.len(), "request payload read"); - let payload: Payload = serde_json::from_slice(&body)?; - Ok::(payload) // Adjust error type accordingly - } - .map_err(SqlOverHttpError::from); - - let authenticate_and_connect = async { - let keys = backend - .authenticate(ctx, &config.authentication_config, &conn_info) - .await?; - let client = backend - .connect_to_compute(ctx, conn_info, keys, !allow_pool) - .await?; - // not strictly necessary to mark success here, - // but it's just insurance for if we forget it somewhere else - ctx.latency_timer.success(); - Ok::<_, HttpConnError>(client) - } - .map_err(SqlOverHttpError::from); + let fetch_and_process_request = Box::pin( + async { + let body = request.into_body().collect().await?.to_bytes(); + info!(length = body.len(), "request payload read"); + let payload: Payload = serde_json::from_slice(&body)?; + Ok::(payload) // Adjust error type accordingly + } + .map_err(SqlOverHttpError::from), + ); + + let authenticate_and_connect = Box::pin( + async { + let keys = backend + .authenticate(ctx, &config.authentication_config, &conn_info) + .await?; + let client = backend + .connect_to_compute(ctx, conn_info, keys, !allow_pool) + .await?; + // not strictly necessary to mark success here, + // but it's just insurance for if we forget it somewhere else + ctx.latency_timer.success(); + Ok::<_, HttpConnError>(client) + } + .map_err(SqlOverHttpError::from), + ); let (payload, mut client) = match run_until_cancelled( // Run both operations in parallel diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 7d3153a3c1e7..0e9772733da4 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -141,7 +141,7 @@ pub async fn serve_websocket( .client_connections .guard(crate::metrics::Protocol::Ws); - let res = handle_client( + let res = Box::pin(handle_client( config, &mut ctx, cancellation_handler, @@ -149,7 +149,7 @@ pub async fn serve_websocket( ClientMode::Websockets { hostname }, endpoint_rate_limiter, conn_gauge, - ) + )) .await; match res { diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index c8b732fee143..a650d5e20753 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -46,6 +46,7 @@ tokio = { workspace = true, features = ["fs"] } tokio-util = { workspace = true } tokio-io-timeout.workspace = true tokio-postgres.workspace = true +tokio-tar.workspace = true toml_edit.workspace = true tracing.workspace = true url.workspace = true diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 7476654426a8..86238c729271 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -13,7 +13,9 @@ use tokio::runtime::Handle; use tokio::signal::unix::{signal, SignalKind}; use tokio::task::JoinError; use toml_edit::Document; +use utils::logging::SecretString; +use std::env::{var, VarError}; use std::fs::{self, File}; use std::io::{ErrorKind, Write}; use std::str::FromStr; @@ -287,6 +289,22 @@ async fn main() -> anyhow::Result<()> { } }; + // Load JWT auth token to connect to other safekeepers for pull_timeline. + let sk_auth_token = match var("SAFEKEEPER_AUTH_TOKEN") { + Ok(v) => { + info!("loaded JWT token for authentication with safekeepers"); + Some(SecretString::from(v)) + } + Err(VarError::NotPresent) => { + info!("no JWT token for authentication with safekeepers detected"); + None + } + Err(_) => { + warn!("JWT token for authentication with safekeepers is not unicode"); + None + } + }; + let conf = SafeKeeperConf { workdir, my_id: id, @@ -307,6 +325,7 @@ async fn main() -> anyhow::Result<()> { pg_auth, pg_tenant_only_auth, http_auth, + sk_auth_token, current_thread_runtime: args.current_thread_runtime, walsenders_keep_horizon: args.walsenders_keep_horizon, partial_backup_enabled: args.partial_backup_enabled, diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index e9bb5202dad8..8e9031fae4fe 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -12,18 +12,19 @@ use std::ops::Deref; use std::path::Path; use std::time::Instant; +use crate::control_file_upgrade::downgrade_v9_to_v8; use crate::metrics::PERSIST_CONTROL_FILE_SECONDS; -use crate::state::TimelinePersistentState; +use crate::state::{EvictionState, TimelinePersistentState}; use crate::{control_file_upgrade::upgrade_control_file, timeline::get_timeline_dir}; use utils::{bin_ser::LeSer, id::TenantTimelineId}; use crate::SafeKeeperConf; pub const SK_MAGIC: u32 = 0xcafeceefu32; -pub const SK_FORMAT_VERSION: u32 = 8; +pub const SK_FORMAT_VERSION: u32 = 9; // contains persistent metadata for safekeeper -const CONTROL_FILE_NAME: &str = "safekeeper.control"; +pub const CONTROL_FILE_NAME: &str = "safekeeper.control"; // needed to atomically update the state using `rename` const CONTROL_FILE_NAME_PARTIAL: &str = "safekeeper.control.partial"; pub const CHECKSUM_SIZE: usize = std::mem::size_of::(); @@ -178,8 +179,18 @@ impl Storage for FileStorage { })?; let mut buf: Vec = Vec::new(); WriteBytesExt::write_u32::(&mut buf, SK_MAGIC)?; - WriteBytesExt::write_u32::(&mut buf, SK_FORMAT_VERSION)?; - s.ser_into(&mut buf)?; + + if s.eviction_state == EvictionState::Present { + // temp hack for forward compatibility + const PREV_FORMAT_VERSION: u32 = 8; + let prev = downgrade_v9_to_v8(s); + WriteBytesExt::write_u32::(&mut buf, PREV_FORMAT_VERSION)?; + prev.ser_into(&mut buf)?; + } else { + // otherwise, we write the current format version + WriteBytesExt::write_u32::(&mut buf, SK_FORMAT_VERSION)?; + s.ser_into(&mut buf)?; + } // calculate checksum before resize let checksum = crc32c::crc32c(&buf); diff --git a/safekeeper/src/control_file_upgrade.rs b/safekeeper/src/control_file_upgrade.rs index 8f4dfe9b4329..a4b4670e423b 100644 --- a/safekeeper/src/control_file_upgrade.rs +++ b/safekeeper/src/control_file_upgrade.rs @@ -1,7 +1,7 @@ //! Code to deal with safekeeper control file upgrades use crate::{ safekeeper::{AcceptorState, PgUuid, ServerInfo, Term, TermHistory, TermLsn}, - state::{PersistedPeers, TimelinePersistentState}, + state::{EvictionState, PersistedPeers, TimelinePersistentState}, wal_backup_partial, }; use anyhow::{bail, Result}; @@ -183,6 +183,55 @@ pub struct SafeKeeperStateV7 { pub peers: PersistedPeers, } +/// Persistent information stored on safekeeper node about timeline. +/// On disk data is prefixed by magic and format version and followed by checksum. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SafeKeeperStateV8 { + #[serde(with = "hex")] + pub tenant_id: TenantId, + #[serde(with = "hex")] + pub timeline_id: TimelineId, + /// persistent acceptor state + pub acceptor_state: AcceptorState, + /// information about server + pub server: ServerInfo, + /// Unique id of the last *elected* proposer we dealt with. Not needed + /// for correctness, exists for monitoring purposes. + #[serde(with = "hex")] + pub proposer_uuid: PgUuid, + /// Since which LSN this timeline generally starts. Safekeeper might have + /// joined later. + pub timeline_start_lsn: Lsn, + /// Since which LSN safekeeper has (had) WAL for this timeline. + /// All WAL segments next to one containing local_start_lsn are + /// filled with data from the beginning. + pub local_start_lsn: Lsn, + /// Part of WAL acknowledged by quorum *and available locally*. Always points + /// to record boundary. + pub commit_lsn: Lsn, + /// LSN that points to the end of the last backed up segment. Useful to + /// persist to avoid finding out offloading progress on boot. + pub backup_lsn: Lsn, + /// Minimal LSN which may be needed for recovery of some safekeeper (end_lsn + /// of last record streamed to everyone). Persisting it helps skipping + /// recovery in walproposer, generally we compute it from peers. In + /// walproposer proto called 'truncate_lsn'. Updates are currently drived + /// only by walproposer. + pub peer_horizon_lsn: Lsn, + /// LSN of the oldest known checkpoint made by pageserver and successfully + /// pushed to s3. We don't remove WAL beyond it. Persisted only for + /// informational purposes, we receive it from pageserver (or broker). + pub remote_consistent_lsn: Lsn, + /// Peers and their state as we remember it. Knowing peers themselves is + /// fundamental; but state is saved here only for informational purposes and + /// obviously can be stale. (Currently not saved at all, but let's provision + /// place to have less file version upgrades). + pub peers: PersistedPeers, + /// Holds names of partial segments uploaded to remote storage. Used to + /// clean up old objects without leaving garbage in remote storage. + pub partial_backup: wal_backup_partial::State, +} + pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result { // migrate to storing full term history if version == 1 { @@ -213,6 +262,7 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result Result Result Result Result Result SafeKeeperStateV8 { + assert!(state.eviction_state == EvictionState::Present); + SafeKeeperStateV8 { + tenant_id: state.tenant_id, + timeline_id: state.timeline_id, + acceptor_state: state.acceptor_state.clone(), + server: state.server.clone(), + proposer_uuid: state.proposer_uuid, + timeline_start_lsn: state.timeline_start_lsn, + local_start_lsn: state.local_start_lsn, + commit_lsn: state.commit_lsn, + backup_lsn: state.backup_lsn, + peer_horizon_lsn: state.peer_horizon_lsn, + remote_consistent_lsn: state.remote_consistent_lsn, + peers: state.peers.clone(), + partial_backup: state.partial_backup.clone(), + } +} + #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/safekeeper/src/http/client.rs b/safekeeper/src/http/client.rs new file mode 100644 index 000000000000..0bb31c200d5a --- /dev/null +++ b/safekeeper/src/http/client.rs @@ -0,0 +1,139 @@ +//! Safekeeper http client. +//! +//! Partially copied from pageserver client; some parts might be better to be +//! united. +//! +//! It would be also good to move it out to separate crate, but this needs +//! duplication of internal-but-reported structs like WalSenderState, ServerInfo +//! etc. + +use reqwest::{IntoUrl, Method, StatusCode}; +use utils::{ + http::error::HttpErrorBody, + id::{TenantId, TimelineId}, + logging::SecretString, +}; + +use super::routes::TimelineStatus; + +#[derive(Debug, Clone)] +pub struct Client { + mgmt_api_endpoint: String, + authorization_header: Option, + client: reqwest::Client, +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Failed to receive body (reqwest error). + #[error("receive body: {0}")] + ReceiveBody(reqwest::Error), + + /// Status is not ok, but failed to parse body as `HttpErrorBody`. + #[error("receive error body: {0}")] + ReceiveErrorBody(String), + + /// Status is not ok; parsed error in body as `HttpErrorBody`. + #[error("safekeeper API: {1}")] + ApiError(StatusCode, String), +} + +pub type Result = std::result::Result; + +pub trait ResponseErrorMessageExt: Sized { + fn error_from_body(self) -> impl std::future::Future> + Send; +} + +/// If status is not ok, try to extract error message from the body. +impl ResponseErrorMessageExt for reqwest::Response { + async fn error_from_body(self) -> Result { + let status = self.status(); + if !(status.is_client_error() || status.is_server_error()) { + return Ok(self); + } + + let url = self.url().to_owned(); + Err(match self.json::().await { + Ok(HttpErrorBody { msg }) => Error::ApiError(status, msg), + Err(_) => { + Error::ReceiveErrorBody(format!("http error ({}) at {}.", status.as_u16(), url)) + } + }) + } +} + +impl Client { + pub fn new(mgmt_api_endpoint: String, jwt: Option) -> Self { + Self::from_client(reqwest::Client::new(), mgmt_api_endpoint, jwt) + } + + pub fn from_client( + client: reqwest::Client, + mgmt_api_endpoint: String, + jwt: Option, + ) -> Self { + Self { + mgmt_api_endpoint, + authorization_header: jwt + .map(|jwt| SecretString::from(format!("Bearer {}", jwt.get_contents()))), + client, + } + } + + pub async fn timeline_status( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}", + self.mgmt_api_endpoint, tenant_id, timeline_id + ); + let resp = self.get(&uri).await?; + resp.json().await.map_err(Error::ReceiveBody) + } + + pub async fn snapshot( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}/snapshot", + self.mgmt_api_endpoint, tenant_id, timeline_id + ); + self.get(&uri).await + } + + async fn get(&self, uri: U) -> Result { + self.request(Method::GET, uri, ()).await + } + + /// Send the request and check that the status code is good. + async fn request( + &self, + method: Method, + uri: U, + body: B, + ) -> Result { + let res = self.request_noerror(method, uri, body).await?; + let response = res.error_from_body().await?; + Ok(response) + } + + /// Just send the request. + async fn request_noerror( + &self, + method: Method, + uri: U, + body: B, + ) -> Result { + let req = self.client.request(method, uri); + let req = if let Some(value) = &self.authorization_header { + req.header(reqwest::header::AUTHORIZATION, value.get_contents()) + } else { + req + }; + req.json(&body).send().await.map_err(Error::ReceiveBody) + } +} diff --git a/safekeeper/src/http/mod.rs b/safekeeper/src/http/mod.rs index 2a9570595f8a..52fb13ff5b20 100644 --- a/safekeeper/src/http/mod.rs +++ b/safekeeper/src/http/mod.rs @@ -1,3 +1,4 @@ +pub mod client; pub mod routes; pub use routes::make_router; diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 1e29b21fac6d..3f2cd97ccd01 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -1,38 +1,25 @@ use hyper::{Body, Request, Response, StatusCode, Uri}; - use once_cell::sync::Lazy; -use postgres_ffi::WAL_SEGMENT_SIZE; -use safekeeper_api::models::{SkTimelineInfo, TimelineCopyRequest}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::io::Write as _; use std::str::FromStr; use std::sync::Arc; use storage_broker::proto::SafekeeperTimelineInfo; use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId; -use tokio::fs::File; -use tokio::io::AsyncReadExt; -use tokio_util::sync::CancellationToken; -use utils::failpoint_support::failpoints_handler; -use utils::http::request::parse_query_param; - -use std::io::Write as _; use tokio::sync::mpsc; +use tokio::task; use tokio_stream::wrappers::ReceiverStream; +use tokio_util::sync::CancellationToken; use tracing::{info_span, Instrument}; +use utils::failpoint_support::failpoints_handler; use utils::http::endpoint::{prometheus_metrics_handler, request_span, ChannelWriter}; +use utils::http::request::parse_query_param; -use crate::debug_dump::TimelineDigestRequest; -use crate::receive_wal::WalReceiverState; -use crate::safekeeper::Term; -use crate::safekeeper::{ServerInfo, TermLsn}; -use crate::send_wal::WalSenderState; -use crate::timeline::PeerInfo; -use crate::{copy_timeline, debug_dump, patch_control_file, pull_timeline}; - -use crate::timelines_global_map::TimelineDeleteForceResult; -use crate::GlobalTimelines; -use crate::SafeKeeperConf; +use postgres_ffi::WAL_SEGMENT_SIZE; +use safekeeper_api::models::TimelineCreateRequest; +use safekeeper_api::models::{SkTimelineInfo, TimelineCopyRequest}; use utils::{ auth::SwappableJwtAuth, http::{ @@ -46,7 +33,16 @@ use utils::{ lsn::Lsn, }; -use super::models::TimelineCreateRequest; +use crate::debug_dump::TimelineDigestRequest; +use crate::receive_wal::WalReceiverState; +use crate::safekeeper::Term; +use crate::safekeeper::{ServerInfo, TermLsn}; +use crate::send_wal::WalSenderState; +use crate::timeline::PeerInfo; +use crate::timelines_global_map::TimelineDeleteForceResult; +use crate::GlobalTimelines; +use crate::SafeKeeperConf; +use crate::{copy_timeline, debug_dump, patch_control_file, pull_timeline}; #[derive(Debug, Serialize)] struct SafekeeperStatus { @@ -199,13 +195,50 @@ async fn timeline_pull_handler(mut request: Request) -> Result) -> Result, ApiError> { + let ttid = TenantTimelineId::new( + parse_request_param(&request, "tenant_id")?, + parse_request_param(&request, "timeline_id")?, + ); + check_permission(&request, Some(ttid.tenant_id))?; + + let tli = GlobalTimelines::get(ttid).map_err(ApiError::from)?; + // Note: with evicted timelines it should work better then de-evict them and + // stream; probably start_snapshot would copy partial s3 file to dest path + // and stream control file, or return FullAccessTimeline if timeline is not + // evicted. + let tli = tli + .full_access_guard() + .await + .map_err(ApiError::InternalServerError)?; + + // To stream the body use wrap_stream which wants Stream of Result, + // so create the chan and write to it in another task. + let (tx, rx) = mpsc::channel(1); + + task::spawn(pull_timeline::stream_snapshot(tli, tx)); + + let rx_stream = ReceiverStream::new(rx); + let body = Body::wrap_stream(rx_stream); + + let response = Response::builder() + .status(200) + .header(hyper::header::CONTENT_TYPE, "application/octet-stream") + .body(body) + .unwrap(); + + Ok(response) +} + async fn timeline_copy_handler(mut request: Request) -> Result, ApiError> { check_permission(&request, None)?; @@ -260,41 +293,6 @@ async fn timeline_digest_handler(request: Request) -> Result) -> Result, ApiError> { - let ttid = TenantTimelineId::new( - parse_request_param(&request, "tenant_id")?, - parse_request_param(&request, "timeline_id")?, - ); - check_permission(&request, Some(ttid.tenant_id))?; - - let filename: String = parse_request_param(&request, "filename")?; - - let tli = GlobalTimelines::get(ttid).map_err(ApiError::from)?; - let tli = tli - .full_access_guard() - .await - .map_err(ApiError::InternalServerError)?; - - let filepath = tli.get_timeline_dir().join(filename); - let mut file = File::open(&filepath) - .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; - - let mut content = Vec::new(); - // TODO: don't store files in memory - file.read_to_end(&mut content) - .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; - - Response::builder() - .status(StatusCode::OK) - .header("Content-Type", "application/octet-stream") - .body(Body::from(content)) - .map_err(|e| ApiError::InternalServerError(e.into())) -} - /// Force persist control file. async fn timeline_checkpoint_handler(request: Request) -> Result, ApiError> { check_permission(&request, None)?; @@ -566,13 +564,13 @@ pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder .delete("/v1/tenant/:tenant_id", |r| { request_span(r, tenant_delete_handler) }) + .get( + "/v1/tenant/:tenant_id/timeline/:timeline_id/snapshot", + |r| request_span(r, timeline_snapshot_handler), + ) .post("/v1/pull_timeline", |r| { request_span(r, timeline_pull_handler) }) - .get( - "/v1/tenant/:tenant_id/timeline/:timeline_id/file/:filename", - |r| request_span(r, timeline_files_handler), - ) .post( "/v1/tenant/:tenant_id/timeline/:source_timeline_id/copy", |r| request_span(r, timeline_copy_handler), diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 1a56ff736c35..cbd67f0064c3 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -7,7 +7,7 @@ use tokio::runtime::Runtime; use std::time::Duration; use storage_broker::Uri; -use utils::{auth::SwappableJwtAuth, id::NodeId}; +use utils::{auth::SwappableJwtAuth, id::NodeId, logging::SecretString}; mod auth; pub mod broker; @@ -78,6 +78,8 @@ pub struct SafeKeeperConf { pub pg_auth: Option>, pub pg_tenant_only_auth: Option>, pub http_auth: Option>, + /// JWT token to connect to other safekeepers with. + pub sk_auth_token: Option, pub current_thread_runtime: bool, pub walsenders_keep_horizon: bool, pub partial_backup_enabled: bool, @@ -114,6 +116,7 @@ impl SafeKeeperConf { pg_auth: None, pg_tenant_only_auth: None, http_auth: None, + sk_auth_token: None, heartbeat_timeout: Duration::new(5, 0), max_offloader_lag_bytes: defaults::DEFAULT_MAX_OFFLOADER_LAG_BYTES, current_thread_runtime: false, diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 7b41c98cb856..66c41f65ff2b 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -1,28 +1,244 @@ -use std::sync::Arc; - +use anyhow::{anyhow, bail, Context, Result}; +use bytes::Bytes; use camino::Utf8PathBuf; use camino_tempfile::Utf8TempDir; use chrono::{DateTime, Utc}; +use futures::{SinkExt, StreamExt, TryStreamExt}; +use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI}; use serde::{Deserialize, Serialize}; +use std::{ + cmp::min, + io::{self, ErrorKind}, + sync::Arc, +}; +use tokio::{ + fs::{File, OpenOptions}, + io::AsyncWrite, + sync::mpsc, + task, +}; +use tokio_tar::{Archive, Builder}; +use tokio_util::{ + io::{CopyToBytes, SinkWriter}, + sync::PollSender, +}; +use tracing::{error, info, instrument}; -use anyhow::{bail, Context, Result}; -use tokio::io::AsyncWriteExt; -use tracing::info; +use crate::{ + control_file::{self, CONTROL_FILE_NAME}, + debug_dump, + http::{ + client::{self, Client}, + routes::TimelineStatus, + }, + safekeeper::Term, + timeline::{get_tenant_dir, get_timeline_dir, FullAccessTimeline, Timeline, TimelineError}, + wal_storage::{self, open_wal_file, Storage}, + GlobalTimelines, SafeKeeperConf, +}; use utils::{ + crashsafe::{durable_rename, fsync_async_opt}, id::{TenantId, TenantTimelineId, TimelineId}, + logging::SecretString, lsn::Lsn, pausable_failpoint, }; -use crate::{ - control_file, debug_dump, - http::routes::TimelineStatus, - timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError}, - wal_storage::{self, Storage}, - GlobalTimelines, SafeKeeperConf, -}; +/// Stream tar archive of timeline to tx. +#[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))] +pub async fn stream_snapshot(tli: FullAccessTimeline, tx: mpsc::Sender>) { + if let Err(e) = stream_snapshot_guts(tli, tx.clone()).await { + // Error type/contents don't matter as they won't can't reach the client + // (hyper likely doesn't do anything with it), but http stream will be + // prematurely terminated. It would be nice to try to send the error in + // trailers though. + tx.send(Err(anyhow!("snapshot failed"))).await.ok(); + error!("snapshot failed: {:#}", e); + } +} + +/// State needed while streaming the snapshot. +pub struct SnapshotContext { + pub from_segno: XLogSegNo, // including + pub upto_segno: XLogSegNo, // including + pub term: Term, + pub last_log_term: Term, + pub flush_lsn: Lsn, + pub wal_seg_size: usize, + // used to remove WAL hold off in Drop. + pub tli: FullAccessTimeline, +} + +impl Drop for SnapshotContext { + fn drop(&mut self) { + let tli = self.tli.clone(); + task::spawn(async move { + let mut shared_state = tli.write_shared_state().await; + shared_state.wal_removal_on_hold = false; + }); + } +} + +pub async fn stream_snapshot_guts( + tli: FullAccessTimeline, + tx: mpsc::Sender>, +) -> Result<()> { + // tokio-tar wants Write implementor, but we have mpsc tx >; + // use SinkWriter as a Write impl. That is, + // - create Sink from the tx. It returns PollSendError if chan is closed. + let sink = PollSender::new(tx); + // - SinkWriter needs sink error to be io one, map it. + let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe)); + // - SinkWriter wants sink type to be just Bytes, not Result, so map + // it with with(). Note that with() accepts async function which we don't + // need and allows the map to fail, which we don't need either, but hence + // two Oks. + let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) }); + // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap + // into CopyToBytes. This is a data copy. + let copy_to_bytes = CopyToBytes::new(oksink); + let mut writer = SinkWriter::new(copy_to_bytes); + let pinned_writer = std::pin::pin!(writer); + + // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer + // which is also likely suboptimal. + let mut ar = Builder::new_non_terminated(pinned_writer); + + let bctx = tli.start_snapshot(&mut ar).await?; + pausable_failpoint!("sk-snapshot-after-list-pausable"); + + let tli_dir = tli.get_timeline_dir(); + info!( + "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}", + bctx.upto_segno - bctx.from_segno + 1, + bctx.from_segno, + bctx.upto_segno, + bctx.term, + bctx.last_log_term, + bctx.flush_lsn, + ); + for segno in bctx.from_segno..=bctx.upto_segno { + let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?; + let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size); + if is_partial { + wal_file_name.push_str(".partial"); + } + ar.append_file(&wal_file_name, &mut sf).await?; + } + + // Do the term check before ar.finish to make archive corrupted in case of + // term change. Client shouldn't ignore abrupt stream end, but to be sure. + tli.finish_snapshot(&bctx).await?; + + ar.finish().await?; + + Ok(()) +} + +impl FullAccessTimeline { + /// Start streaming tar archive with timeline: + /// 1) stream control file under lock; + /// 2) hold off WAL removal; + /// 3) collect SnapshotContext to understand which WAL segments should be + /// streamed. + /// + /// Snapshot streams data up to flush_lsn. To make this safe, we must check + /// that term doesn't change during the procedure, or we risk sending mix of + /// WAL from different histories. Term is remembered in the SnapshotContext + /// and checked in finish_snapshot. Note that in the last segment some WAL + /// higher than flush_lsn set here might be streamed; that's fine as long as + /// terms doesn't change. + /// + /// Alternatively we could send only up to commit_lsn to get some valid + /// state which later will be recovered by compute, in this case term check + /// is not needed, but we likely don't want that as there might be no + /// compute which could perform the recovery. + /// + /// When returned SnapshotContext is dropped WAL hold is removed. + async fn start_snapshot( + &self, + ar: &mut tokio_tar::Builder, + ) -> Result { + let mut shared_state = self.write_shared_state().await; + + let cf_path = self.get_timeline_dir().join(CONTROL_FILE_NAME); + let mut cf = File::open(cf_path).await?; + ar.append_file(CONTROL_FILE_NAME, &mut cf).await?; + + // We need to stream since the oldest segment someone (s3 or pageserver) + // still needs. This duplicates calc_horizon_lsn logic. + // + // We know that WAL wasn't removed up to this point because it cannot be + // removed further than `backup_lsn`. Since we're holding shared_state + // lock and setting `wal_removal_on_hold` later, it guarantees that WAL + // won't be removed until we're done. + let from_lsn = min( + shared_state.sk.state.remote_consistent_lsn, + shared_state.sk.state.backup_lsn, + ); + if from_lsn == Lsn::INVALID { + // this is possible if snapshot is called before handling first + // elected message + bail!("snapshot is called on uninitialized timeline"); + } + let from_segno = from_lsn.segment_number(shared_state.get_wal_seg_size()); + let term = shared_state.sk.get_term(); + let last_log_term = shared_state.sk.get_last_log_term(); + let flush_lsn = shared_state.sk.flush_lsn(); + let upto_segno = flush_lsn.segment_number(shared_state.get_wal_seg_size()); + // have some limit on max number of segments as a sanity check + const MAX_ALLOWED_SEGS: u64 = 1000; + let num_segs = upto_segno - from_segno + 1; + if num_segs > MAX_ALLOWED_SEGS { + bail!( + "snapshot is called on timeline with {} segments, but the limit is {}", + num_segs, + MAX_ALLOWED_SEGS + ); + } + + // Prevent WAL removal while we're streaming data. + // + // Since this a flag, not a counter just bail out if already set; we + // shouldn't need concurrent snapshotting. + if shared_state.wal_removal_on_hold { + bail!("wal_removal_on_hold is already true"); + } + shared_state.wal_removal_on_hold = true; + + let bctx = SnapshotContext { + from_segno, + upto_segno, + term, + last_log_term, + flush_lsn, + wal_seg_size: shared_state.get_wal_seg_size(), + tli: self.clone(), + }; + + Ok(bctx) + } -/// Info about timeline on safekeeper ready for reporting. + /// Finish snapshotting: check that term(s) hasn't changed. + /// + /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not + /// forget this if snapshotting fails mid the way. + pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> { + let shared_state = self.read_shared_state().await; + let term = shared_state.sk.get_term(); + let last_log_term = shared_state.sk.get_last_log_term(); + // There are some cases to relax this check (e.g. last_log_term might + // change, but as long as older history is strictly part of new that's + // fine), but there is no need to do it. + if bctx.term != term || bctx.last_log_term != last_log_term { + bail!("term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}", + bctx.term, bctx.last_log_term, term, last_log_term); + } + Ok(()) + } +} + +/// pull_timeline request body. #[derive(Debug, Serialize, Deserialize)] pub struct Request { pub tenant_id: TenantId, @@ -48,7 +264,10 @@ pub struct DebugDumpResponse { } /// Find the most advanced safekeeper and pull timeline from it. -pub async fn handle_request(request: Request) -> Result { +pub async fn handle_request( + request: Request, + sk_auth_token: Option, +) -> Result { let existing_tli = GlobalTimelines::get(TenantTimelineId::new( request.tenant_id, request.timeline_id, @@ -57,28 +276,26 @@ pub async fn handle_request(request: Request) -> Result { bail!("Timeline {} already exists", request.timeline_id); } - let client = reqwest::Client::new(); let http_hosts = request.http_hosts.clone(); - // Send request to /v1/tenant/:tenant_id/timeline/:timeline_id - let responses = futures::future::join_all(http_hosts.iter().map(|url| { - let url = format!( - "{}/v1/tenant/{}/timeline/{}", - url, request.tenant_id, request.timeline_id - ); - client.get(url).send() - })) - .await; + // Figure out statuses of potential donors. + let responses: Vec> = + futures::future::join_all(http_hosts.iter().map(|url| async { + let cclient = Client::new(url.clone(), sk_auth_token.clone()); + let info = cclient + .timeline_status(request.tenant_id, request.timeline_id) + .await?; + Ok(info) + })) + .await; let mut statuses = Vec::new(); for (i, response) in responses.into_iter().enumerate() { - let response = response.context(format!("Failed to get status from {}", http_hosts[i]))?; - let status: crate::http::routes::TimelineStatus = response.json().await?; + let status = response.context(format!("fetching status from {}", http_hosts[i]))?; statuses.push((status, i)); } // Find the most advanced safekeeper - // TODO: current logic may be wrong, fix it later let (status, i) = statuses .into_iter() .max_by_key(|(status, _)| { @@ -94,10 +311,14 @@ pub async fn handle_request(request: Request) -> Result { assert!(status.tenant_id == request.tenant_id); assert!(status.timeline_id == request.timeline_id); - pull_timeline(status, safekeeper_host).await + pull_timeline(status, safekeeper_host, sk_auth_token).await } -async fn pull_timeline(status: TimelineStatus, host: String) -> Result { +async fn pull_timeline( + status: TimelineStatus, + host: String, + sk_auth_token: Option, +) -> Result { let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id); info!( "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}", @@ -111,95 +332,53 @@ async fn pull_timeline(status: TimelineStatus, host: String) -> Result let conf = &GlobalTimelines::get_global_config(); - let client = reqwest::Client::new(); - // TODO: don't use debug dump, it should be used only in tests. - // This is a proof of concept, we should figure out a way - // to use scp without implementing it manually. - - // Implementing our own scp over HTTP. - // At first, we need to fetch list of files from safekeeper. - let dump: DebugDumpResponse = client - .get(format!( - "{}/v1/debug_dump?dump_all=true&tenant_id={}&timeline_id={}", - host, status.tenant_id, status.timeline_id - )) - .send() - .await? - .json() - .await?; - - if dump.timelines.len() != 1 { - bail!( - "expected to fetch single timeline, got {} timelines", - dump.timelines.len() - ); - } - - let timeline = dump.timelines.into_iter().next().unwrap(); - let disk_content = timeline.disk_content.ok_or(anyhow::anyhow!( - "timeline {} doesn't have disk content", - ttid - ))?; - - let mut filenames = disk_content - .files - .iter() - .map(|file| file.name.clone()) - .collect::>(); - - // Sort filenames to make sure we pull files in correct order - // After sorting, we should have: - // - 000000010000000000000001 - // - ... - // - 000000010000000000000002.partial - // - safekeeper.control - filenames.sort(); - - // safekeeper.control should be the first file, so we need to move it to the beginning - let control_file_index = filenames - .iter() - .position(|name| name == "safekeeper.control") - .ok_or(anyhow::anyhow!("safekeeper.control not found"))?; - filenames.remove(control_file_index); - filenames.insert(0, "safekeeper.control".to_string()); - - pausable_failpoint!("sk-pull-timeline-after-list-pausable"); - - info!( - "downloading {} files from safekeeper {}", - filenames.len(), - host - ); - let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?; - // Note: some time happens between fetching list of files and fetching files themselves. - // It's possible that some files will be removed from safekeeper and we will fail to fetch them. - // This function will fail in this case, should be retried by the caller. - for filename in filenames { - let file_path = tli_dir_path.join(&filename); - // /v1/tenant/:tenant_id/timeline/:timeline_id/file/:filename - let http_url = format!( - "{}/v1/tenant/{}/timeline/{}/file/{}", - host, status.tenant_id, status.timeline_id, filename - ); + let client = Client::new(host.clone(), sk_auth_token.clone()); + // Request stream with basebackup archive. + let bb_resp = client + .snapshot(status.tenant_id, status.timeline_id) + .await?; - let mut file = tokio::fs::File::create(&file_path).await?; - let mut response = client.get(&http_url).send().await?; - if response.status() != reqwest::StatusCode::OK { - bail!( - "pulling file {} failed: status is {}", - filename, - response.status() - ); - } - while let Some(chunk) = response.chunk().await? { - file.write_all(&chunk).await?; - file.flush().await?; + // Make Stream of Bytes from it... + let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other); + // and turn it into StreamReader implementing AsyncRead. + let bb_reader = tokio_util::io::StreamReader::new(bb_stream); + + // Extract it on the fly to the disk. We don't use simple unpack() to fsync + // files. + let mut entries = Archive::new(bb_reader).entries()?; + while let Some(base_tar_entry) = entries.next().await { + let mut entry = base_tar_entry?; + let header = entry.header(); + let file_path = header.path()?.into_owned(); + match header.entry_type() { + tokio_tar::EntryType::Regular => { + let utf8_file_path = + Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path"); + let dst_path = tli_dir_path.join(utf8_file_path); + let mut f = OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(&dst_path) + .await?; + tokio::io::copy(&mut entry, &mut f).await?; + // fsync the file + f.sync_all().await?; + } + _ => { + bail!( + "entry {} in backup tar archive is of unexpected type: {:?}", + file_path.display(), + header.entry_type() + ); + } } } - // TODO: fsync? + // fsync temp timeline directory to remember its contents. + fsync_async_opt(&tli_dir_path, !conf.no_sync).await?; // Let's create timeline from temp directory and verify that it's correct let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?; @@ -290,7 +469,9 @@ pub async fn load_temp_timeline( ttid, tmp_path, timeline_path ); tokio::fs::create_dir_all(get_tenant_dir(conf, &ttid.tenant_id)).await?; - tokio::fs::rename(tmp_path, &timeline_path).await?; + // fsync tenant dir creation + fsync_async_opt(&conf.workdir, !conf.no_sync).await?; + durable_rename(tmp_path, &timeline_path, !conf.no_sync).await?; let tli = GlobalTimelines::load_timeline(&guard, ttid) .await diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 563dbbe31581..666ffdf0cea4 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -780,6 +780,9 @@ where // Initializing backup_lsn is useful to avoid making backup think it should upload 0 segment. state.backup_lsn = max(state.backup_lsn, state.timeline_start_lsn); + // similar for remote_consistent_lsn + state.remote_consistent_lsn = + max(state.remote_consistent_lsn, state.timeline_start_lsn); state.acceptor_state.term_history = msg.term_history.clone(); self.state.finish_change(&state).await?; @@ -955,7 +958,7 @@ mod tests { use super::*; use crate::{ - state::{PersistedPeers, TimelinePersistentState}, + state::{EvictionState, PersistedPeers, TimelinePersistentState}, wal_storage::Storage, }; use std::{ops::Deref, str::FromStr, time::Instant}; @@ -1222,6 +1225,7 @@ mod tests { }, )]), partial_backup: crate::wal_backup_partial::State::default(), + eviction_state: EvictionState::Present, }; let ser = state.ser().unwrap(); @@ -1269,6 +1273,8 @@ mod tests { 0xb0, 0x01, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00, // partial_backup 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // eviction_state + 0x00, 0x00, 0x00, 0x00, ]; assert_eq!(Hex(&ser), Hex(&expected)); diff --git a/safekeeper/src/state.rs b/safekeeper/src/state.rs index be5e5162969f..e0f7b65aef84 100644 --- a/safekeeper/src/state.rs +++ b/safekeeper/src/state.rs @@ -63,11 +63,26 @@ pub struct TimelinePersistentState { /// Holds names of partial segments uploaded to remote storage. Used to /// clean up old objects without leaving garbage in remote storage. pub partial_backup: wal_backup_partial::State, + /// Eviction state of the timeline. If it's Offloaded, we should download + /// WAL files from remote storage to serve the timeline. + pub eviction_state: EvictionState, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct PersistedPeers(pub Vec<(NodeId, PersistedPeerInfo)>); +/// State of the local WAL files. Used to track current timeline state, +/// that can be either WAL files are present on disk or last partial segment +/// is offloaded to remote storage. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum EvictionState { + /// WAL files are present on disk. + Present, + /// Last partial segment is offloaded to remote storage. + /// Contains flush_lsn of the last offloaded segment. + Offloaded(Lsn), +} + impl TimelinePersistentState { pub fn new( ttid: &TenantTimelineId, @@ -98,6 +113,7 @@ impl TimelinePersistentState { .collect(), ), partial_backup: wal_backup_partial::State::default(), + eviction_state: EvictionState::Present, } } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 148a7e90bd4e..544ffdbb36cf 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, bail, Result}; use camino::Utf8PathBuf; use serde::{Deserialize, Serialize}; -use tokio::fs; +use tokio::fs::{self}; use tokio_util::sync::CancellationToken; use utils::id::TenantId; @@ -168,6 +168,9 @@ pub struct SharedState { pub(crate) sk: SafeKeeper, /// In memory list containing state of peers sent in latest messages from them. pub(crate) peers_info: PeersInfo, + // True value hinders old WAL removal; this is used by snapshotting. We + // could make it a counter, but there is no need to. + pub(crate) wal_removal_on_hold: bool, } impl SharedState { @@ -205,6 +208,7 @@ impl SharedState { Ok(Self { sk, peers_info: PeersInfo(vec![]), + wal_removal_on_hold: false, }) } @@ -222,10 +226,11 @@ impl SharedState { Ok(Self { sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?, peers_info: PeersInfo(vec![]), + wal_removal_on_hold: false, }) } - fn get_wal_seg_size(&self) -> usize { + pub(crate) fn get_wal_seg_size(&self) -> usize { self.sk.state.server.wal_seg_size as usize } diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 087b988c69f9..592426bba34d 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -39,6 +39,7 @@ pub struct StateSnapshot { // misc pub cfile_last_persist_at: Instant, pub inmem_flush_pending: bool, + pub wal_removal_on_hold: bool, pub peers: Vec, } @@ -54,6 +55,7 @@ impl StateSnapshot { cfile_backup_lsn: read_guard.sk.state.backup_lsn, cfile_last_persist_at: read_guard.sk.state.pers.last_persist_at(), inmem_flush_pending: Self::has_unflushed_inmem_state(&read_guard), + wal_removal_on_hold: read_guard.wal_removal_on_hold, peers: read_guard.get_peers(heartbeat_timeout), } } @@ -324,8 +326,8 @@ async fn update_wal_removal( last_removed_segno: u64, wal_removal_task: &mut Option>>, ) { - if wal_removal_task.is_some() { - // WAL removal is already in progress + if wal_removal_task.is_some() || state.wal_removal_on_hold { + // WAL removal is already in progress or hold off return; } diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 45e27e19519c..0c1731937c79 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -684,13 +684,12 @@ impl WalReader { let xlogoff = self.pos.segment_offset(self.wal_seg_size); let segno = self.pos.segment_number(self.wal_seg_size); let wal_file_name = XLogFileName(PG_TLI, segno, self.wal_seg_size); - let wal_file_path = self.timeline_dir.join(&wal_file_name); // Try to open local file, if we may have WAL locally if self.pos >= self.local_start_lsn { - let res = Self::open_wal_file(&wal_file_path).await; + let res = open_wal_file(&self.timeline_dir, segno, self.wal_seg_size).await; match res { - Ok(mut file) => { + Ok((mut file, _)) => { file.seek(SeekFrom::Start(xlogoff as u64)).await?; return Ok(Box::pin(file)); } @@ -718,25 +717,6 @@ impl WalReader { bail!("WAL segment is not found") } - - /// Helper function for opening a wal file. - async fn open_wal_file(wal_file_path: &Utf8Path) -> Result { - // First try to open the .partial file. - let mut partial_path = wal_file_path.to_owned(); - partial_path.set_extension("partial"); - if let Ok(opened_file) = tokio::fs::File::open(&partial_path).await { - return Ok(opened_file); - } - - // If that failed, try it without the .partial extension. - tokio::fs::File::open(&wal_file_path) - .await - .with_context(|| format!("Failed to open WAL file {:?}", wal_file_path)) - .map_err(|e| { - warn!("{}", e); - e - }) - } } /// Zero block for filling created WAL segments. @@ -758,6 +738,34 @@ async fn write_zeroes(file: &mut File, mut count: usize) -> Result<()> { Ok(()) } +/// Helper function for opening WAL segment `segno` in `dir`. Returns file and +/// whether it is .partial. +pub(crate) async fn open_wal_file( + timeline_dir: &Utf8Path, + segno: XLogSegNo, + wal_seg_size: usize, +) -> Result<(tokio::fs::File, bool)> { + let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size)?; + + // First try to open the .partial file. + let mut partial_path = wal_file_path.to_owned(); + partial_path.set_extension("partial"); + if let Ok(opened_file) = tokio::fs::File::open(&wal_file_partial_path).await { + return Ok((opened_file, true)); + } + + // If that failed, try it without the .partial extension. + let pf = tokio::fs::File::open(&wal_file_path) + .await + .with_context(|| format!("failed to open WAL file {:#}", wal_file_path)) + .map_err(|e| { + warn!("{}", e); + e + })?; + + Ok((pf, false)) +} + /// Helper returning full path to WAL segment file and its .partial brother. pub fn wal_file_paths( timeline_dir: &Utf8Path, diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 27e2a4453b5a..47539872a6c8 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -174,6 +174,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { pg_auth: None, pg_tenant_only_auth: None, http_auth: None, + sk_auth_token: None, current_thread_runtime: false, walsenders_keep_horizon: false, partial_backup_enabled: false, diff --git a/storage_controller/Cargo.toml b/storage_controller/Cargo.toml index 194619a4962a..b54dea5d474d 100644 --- a/storage_controller/Cargo.toml +++ b/storage_controller/Cargo.toml @@ -40,6 +40,7 @@ tokio.workspace = true tokio-util.workspace = true tracing.workspace = true measured.workspace = true +scopeguard.workspace = true strum.workspace = true strum_macros.workspace = true diff --git a/storage_controller/src/background_node_operations.rs b/storage_controller/src/background_node_operations.rs new file mode 100644 index 000000000000..74b7e7c84955 --- /dev/null +++ b/storage_controller/src/background_node_operations.rs @@ -0,0 +1,59 @@ +use std::{borrow::Cow, fmt::Debug, fmt::Display}; + +use tokio_util::sync::CancellationToken; +use utils::id::NodeId; + +pub(crate) const MAX_RECONCILES_PER_OPERATION: usize = 10; + +#[derive(Copy, Clone)] +pub(crate) struct Drain { + pub(crate) node_id: NodeId, +} + +#[derive(Copy, Clone)] +pub(crate) struct Fill { + pub(crate) node_id: NodeId, +} + +#[derive(Copy, Clone)] +pub(crate) enum Operation { + Drain(Drain), + Fill(Fill), +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum OperationError { + #[error("Node state changed during operation: {0}")] + NodeStateChanged(Cow<'static, str>), + #[error("Operation finalize error: {0}")] + FinalizeError(Cow<'static, str>), + #[error("Operation cancelled")] + Cancelled, +} + +pub(crate) struct OperationHandler { + pub(crate) operation: Operation, + #[allow(unused)] + pub(crate) cancel: CancellationToken, +} + +impl Display for Drain { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "drain {}", self.node_id) + } +} + +impl Display for Fill { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "fill {}", self.node_id) + } +} + +impl Display for Operation { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Operation::Drain(op) => write!(f, "{op}"), + Operation::Fill(op) => write!(f, "{op}"), + } + } +} diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index 9d326ef82dd7..4d0f8006aaa4 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -146,6 +146,9 @@ pub(crate) enum NotifyError { // A response indicates we will never succeed, such as 400 or 404 #[error("Non-retryable error {0}")] Fatal(StatusCode), + + #[error("neon_local error: {0}")] + NeonLocal(anyhow::Error), } enum MaybeSendResult { @@ -278,12 +281,18 @@ impl ComputeHook { async fn do_notify_local( &self, reconfigure_request: &ComputeHookNotifyRequest, - ) -> anyhow::Result<()> { + ) -> Result<(), NotifyError> { // neon_local updates are not safe to call concurrently, use a lock to serialize // all calls to this function let _locked = self.neon_local_lock.lock().await; - let env = match LocalEnv::load_config() { + let Some(repo_dir) = self.config.neon_local_repo_dir.as_deref() else { + tracing::warn!( + "neon_local_repo_dir not set, likely a bug in neon_local; skipping compute update" + ); + return Ok(()); + }; + let env = match LocalEnv::load_config(repo_dir) { Ok(e) => e, Err(e) => { tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})"); @@ -315,7 +324,8 @@ impl ComputeHook { tracing::info!("Reconfiguring endpoint {}", endpoint_name,); endpoint .reconfigure(compute_pageservers.clone(), *stripe_size) - .await?; + .await + .map_err(NotifyError::NeonLocal)?; } } @@ -504,7 +514,7 @@ impl ComputeHook { } else { self.do_notify_local(&request).await.map_err(|e| { // This path is for testing only, so munge the error into our prod-style error type. - tracing::error!("Local notification hook failed: {e}"); + tracing::error!("neon_local notification hook failed: {e}"); NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR) }) }; diff --git a/storage_controller/src/heartbeater.rs b/storage_controller/src/heartbeater.rs index 1ef97e78eb29..14cda0a289f2 100644 --- a/storage_controller/src/heartbeater.rs +++ b/storage_controller/src/heartbeater.rs @@ -31,6 +31,7 @@ pub(crate) enum PageserverState { Available { last_seen_at: Instant, utilization: PageserverUtilization, + new: bool, }, Offline, } @@ -127,6 +128,7 @@ impl HeartbeaterTask { heartbeat_futs.push({ let jwt_token = self.jwt_token.clone(); let cancel = self.cancel.clone(); + let new_node = !self.state.contains_key(node_id); // Clone the node and mark it as available such that the request // goes through to the pageserver even when the node is marked offline. @@ -159,6 +161,7 @@ impl HeartbeaterTask { PageserverState::Available { last_seen_at: Instant::now(), utilization, + new: new_node, } } else { PageserverState::Offline @@ -220,6 +223,7 @@ impl HeartbeaterTask { } }, Vacant(_) => { + // This is a new node. Don't generate a delta for it. deltas.push((node_id, ps_state.clone())); } } diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index bbb6d2cb320e..680e6f09c4a0 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -480,6 +480,61 @@ async fn handle_node_configure(mut req: Request) -> Result, ) } +async fn handle_node_status(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + let node_status = state.service.get_node(node_id).await?; + + json_response(StatusCode::OK, node_status) +} + +async fn handle_node_drain(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.start_node_drain(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + +async fn handle_cancel_node_drain(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.cancel_node_drain(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + +async fn handle_node_fill(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.start_node_fill(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + +async fn handle_cancel_node_fill(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.cancel_node_fill(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + async fn handle_tenant_shard_split( service: Arc, mut req: Request, @@ -832,6 +887,30 @@ pub fn make_router( RequestName("control_v1_node_config"), ) }) + .get("/control/v1/node/:node_id", |r| { + named_request_span(r, handle_node_status, RequestName("control_v1_node_status")) + }) + .put("/control/v1/node/:node_id/drain", |r| { + named_request_span(r, handle_node_drain, RequestName("control_v1_node_drain")) + }) + .delete("/control/v1/node/:node_id/drain", |r| { + named_request_span( + r, + handle_cancel_node_drain, + RequestName("control_v1_cancel_node_drain"), + ) + }) + .put("/control/v1/node/:node_id/fill", |r| { + named_request_span(r, handle_node_fill, RequestName("control_v1_node_fill")) + }) + .delete("/control/v1/node/:node_id/fill", |r| { + named_request_span( + r, + handle_cancel_node_fill, + RequestName("control_v1_cancel_node_fill"), + ) + }) + // TODO(vlad): endpoint for cancelling drain and fill // Tenant Shard operations .put("/control/v1/tenant/:tenant_shard_id/migrate", |r| { tenant_service_handler( diff --git a/storage_controller/src/id_lock_map.rs b/storage_controller/src/id_lock_map.rs index dff793289f1b..fcd3eb57e25f 100644 --- a/storage_controller/src/id_lock_map.rs +++ b/storage_controller/src/id_lock_map.rs @@ -8,14 +8,15 @@ use crate::service::RECONCILE_TIMEOUT; const LOCK_TIMEOUT_ALERT_THRESHOLD: Duration = RECONCILE_TIMEOUT; -/// A wrapper around `OwnedRwLockWriteGuard` that when dropped changes the -/// current holding operation in lock. -pub struct WrappedWriteGuard { +/// A wrapper around `OwnedRwLockWriteGuard` used for tracking the +/// operation that holds the lock, and print a warning if it exceeds +/// the LOCK_TIMEOUT_ALERT_THRESHOLD time +pub struct TracingExclusiveGuard { guard: tokio::sync::OwnedRwLockWriteGuard>, start: Instant, } -impl WrappedWriteGuard { +impl TracingExclusiveGuard { pub fn new(guard: tokio::sync::OwnedRwLockWriteGuard>) -> Self { Self { guard, @@ -24,12 +25,12 @@ impl WrappedWriteGuard { } } -impl Drop for WrappedWriteGuard { +impl Drop for TracingExclusiveGuard { fn drop(&mut self) { let duration = self.start.elapsed(); if duration > LOCK_TIMEOUT_ALERT_THRESHOLD { tracing::warn!( - "Lock on {} was held for {:?}", + "Exclusive lock by {} was held for {:?}", self.guard.as_ref().unwrap(), duration ); @@ -38,6 +39,38 @@ impl Drop for WrappedWriteGuard { } } +// A wrapper around `OwnedRwLockReadGuard` used for tracking the +/// operation that holds the lock, and print a warning if it exceeds +/// the LOCK_TIMEOUT_ALERT_THRESHOLD time +pub struct TracingSharedGuard { + _guard: tokio::sync::OwnedRwLockReadGuard>, + operation: T, + start: Instant, +} + +impl TracingSharedGuard { + pub fn new(guard: tokio::sync::OwnedRwLockReadGuard>, operation: T) -> Self { + Self { + _guard: guard, + operation, + start: Instant::now(), + } + } +} + +impl Drop for TracingSharedGuard { + fn drop(&mut self) { + let duration = self.start.elapsed(); + if duration > LOCK_TIMEOUT_ALERT_THRESHOLD { + tracing::warn!( + "Shared lock by {} was held for {:?}", + self.operation, + duration + ); + } + } +} + /// A map of locks covering some arbitrary identifiers. Useful if you have a collection of objects but don't /// want to embed a lock in each one, or if your locking granularity is different to your object granularity. /// For example, used in the storage controller where the objects are tenant shards, but sometimes locking @@ -58,21 +91,22 @@ where pub(crate) fn shared( &self, key: T, - ) -> impl std::future::Future>> { + operation: I, + ) -> impl std::future::Future> { let mut locked = self.entities.lock().unwrap(); - let entry = locked.entry(key).or_default(); - entry.clone().read_owned() + let entry = locked.entry(key).or_default().clone(); + async move { TracingSharedGuard::new(entry.read_owned().await, operation) } } pub(crate) fn exclusive( &self, key: T, operation: I, - ) -> impl std::future::Future> { + ) -> impl std::future::Future> { let mut locked = self.entities.lock().unwrap(); let entry = locked.entry(key).or_default().clone(); async move { - let mut guard = WrappedWriteGuard::new(entry.clone().write_owned().await); + let mut guard = TracingExclusiveGuard::new(entry.write_owned().await); *guard.guard = Some(operation); guard } @@ -99,12 +133,12 @@ where pub async fn trace_exclusive_lock< T: Clone + Display + Eq + PartialEq + std::hash::Hash, - I: Display + Clone, + I: Clone + Display, >( op_locks: &IdLockMap, key: T, operation: I, -) -> WrappedWriteGuard { +) -> TracingExclusiveGuard { let start = Instant::now(); let guard = op_locks.exclusive(key.clone(), operation.clone()).await; @@ -123,14 +157,14 @@ pub async fn trace_exclusive_lock< pub async fn trace_shared_lock< T: Clone + Display + Eq + PartialEq + std::hash::Hash, - I: Display, + I: Clone + Display, >( op_locks: &IdLockMap, key: T, operation: I, -) -> tokio::sync::OwnedRwLockReadGuard> { +) -> TracingSharedGuard { let start = Instant::now(); - let guard = op_locks.shared(key.clone()).await; + let guard = op_locks.shared(key.clone(), operation.clone()).await; let duration = start.elapsed(); if duration > LOCK_TIMEOUT_ALERT_THRESHOLD { @@ -159,11 +193,11 @@ mod tests { async fn multiple_shared_locks() { let id_lock_map: IdLockMap = IdLockMap::default(); - let shared_lock_1 = id_lock_map.shared(1).await; - let shared_lock_2 = id_lock_map.shared(1).await; + let shared_lock_1 = id_lock_map.shared(1, Operations::Op1).await; + let shared_lock_2 = id_lock_map.shared(1, Operations::Op2).await; - assert!(shared_lock_1.is_none()); - assert!(shared_lock_2.is_none()); + assert_eq!(shared_lock_1.operation, Operations::Op1); + assert_eq!(shared_lock_2.operation, Operations::Op2); } #[tokio::test] @@ -183,7 +217,7 @@ mod tests { assert!(_ex_lock_2.is_err()); } - let shared_lock_1 = id_lock_map.shared(resource_id).await; - assert!(shared_lock_1.is_none()); + let shared_lock_1 = id_lock_map.shared(resource_id, Operations::Op1).await; + assert_eq!(shared_lock_1.operation, Operations::Op1); } } diff --git a/storage_controller/src/lib.rs b/storage_controller/src/lib.rs index 2ea490a14b71..8caf63890406 100644 --- a/storage_controller/src/lib.rs +++ b/storage_controller/src/lib.rs @@ -2,6 +2,7 @@ use serde::Serialize; use utils::seqwait::MonotonicCounter; mod auth; +mod background_node_operations; mod compute_hook; mod heartbeater; pub mod http; diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index ce8f8d0cdd13..f1eb0b30fc38 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -4,6 +4,7 @@ use clap::Parser; use diesel::Connection; use metrics::launch_timestamp::LaunchTimestamp; use metrics::BuildInfo; +use std::path::PathBuf; use std::sync::Arc; use storage_controller::http::make_router; use storage_controller::metrics::preinitialize_metrics; @@ -77,6 +78,12 @@ struct Cli { /// How long to wait for the initial database connection to be available. #[arg(long, default_value = "5s")] db_connect_timeout: humantime::Duration, + + /// `neon_local` sets this to the path of the neon_local repo dir. + /// Only relevant for testing. + // TODO: make `cfg(feature = "testing")` + #[arg(long)] + neon_local_repo_dir: Option, } enum StrictMode { @@ -260,6 +267,7 @@ async fn async_main() -> anyhow::Result<()> { .reconciler_concurrency .unwrap_or(RECONCILER_CONCURRENCY_DEFAULT), split_threshold: args.split_threshold, + neon_local_repo_dir: args.neon_local_repo_dir, }; // After loading secrets & config, but before starting anything else, apply database migrations diff --git a/storage_controller/src/node.rs b/storage_controller/src/node.rs index 7b5513c90893..4d17dff9feaf 100644 --- a/storage_controller/src/node.rs +++ b/storage_controller/src/node.rs @@ -3,7 +3,7 @@ use std::{str::FromStr, time::Duration}; use pageserver_api::{ controller_api::{ NodeAvailability, NodeDescribeResponse, NodeRegisterRequest, NodeSchedulingPolicy, - TenantLocateResponseShard, + TenantLocateResponseShard, UtilizationScore, }, shard::TenantShardId, }; @@ -59,6 +59,10 @@ impl Node { self.id } + pub(crate) fn get_scheduling(&self) -> NodeSchedulingPolicy { + self.scheduling + } + pub(crate) fn set_scheduling(&mut self, scheduling: NodeSchedulingPolicy) { self.scheduling = scheduling } @@ -116,6 +120,16 @@ impl Node { match (self.availability, availability) { (Offline, Active(_)) => ToActive, (Active(_), Offline) => ToOffline, + // Consider the case when the storage controller handles the re-attach of a node + // before the heartbeats detect that the node is back online. We still need + // [`Service::node_configure`] to attempt reconciliations for shards with an + // unknown observed location. + // The unsavoury match arm below handles this situation. + (Active(lhs), Active(rhs)) + if lhs == UtilizationScore::worst() && rhs < UtilizationScore::worst() => + { + ToActive + } _ => Unchanged, } } @@ -141,6 +155,7 @@ impl Node { NodeSchedulingPolicy::Draining => MaySchedule::No, NodeSchedulingPolicy::Filling => MaySchedule::Yes(score), NodeSchedulingPolicy::Pause => MaySchedule::No, + NodeSchedulingPolicy::PauseForRestart => MaySchedule::No, } } @@ -157,7 +172,7 @@ impl Node { listen_http_port, listen_pg_addr, listen_pg_port, - scheduling: NodeSchedulingPolicy::Filling, + scheduling: NodeSchedulingPolicy::Active, availability: NodeAvailability::Offline, cancel: CancellationToken::new(), } diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index 67c05296d57d..47caf7ae81ab 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -442,13 +442,15 @@ impl Persistence { #[tracing::instrument(skip_all, fields(node_id))] pub(crate) async fn re_attach( &self, - node_id: NodeId, + input_node_id: NodeId, ) -> DatabaseResult> { + use crate::schema::nodes::dsl::scheduling_policy; + use crate::schema::nodes::dsl::*; use crate::schema::tenant_shards::dsl::*; let updated = self .with_measured_conn(DatabaseOperation::ReAttach, move |conn| { let rows_updated = diesel::update(tenant_shards) - .filter(generation_pageserver.eq(node_id.0 as i64)) + .filter(generation_pageserver.eq(input_node_id.0 as i64)) .set(generation.eq(generation + 1)) .execute(conn)?; @@ -457,9 +459,23 @@ impl Persistence { // TODO: UPDATE+SELECT in one query let updated = tenant_shards - .filter(generation_pageserver.eq(node_id.0 as i64)) + .filter(generation_pageserver.eq(input_node_id.0 as i64)) .select(TenantShardPersistence::as_select()) .load(conn)?; + + // If the node went through a drain and restart phase before re-attaching, + // then reset it's node scheduling policy to active. + diesel::update(nodes) + .filter(node_id.eq(input_node_id.0 as i64)) + .filter( + scheduling_policy + .eq(String::from(NodeSchedulingPolicy::PauseForRestart)) + .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Draining))) + .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Filling))), + ) + .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active))) + .execute(conn)?; + Ok(updated) }) .await?; diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 4ab85509dc4b..843159010d5b 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -1,4 +1,5 @@ use crate::{node::Node, tenant_shard::TenantShard}; +use itertools::Itertools; use pageserver_api::controller_api::UtilizationScore; use serde::Serialize; use std::collections::HashMap; @@ -283,6 +284,44 @@ impl Scheduler { } } + // Check if the number of shards attached to a given node is lagging below + // the cluster average. If that's the case, the node should be filled. + pub(crate) fn compute_fill_requirement(&self, node_id: NodeId) -> usize { + let Some(node) = self.nodes.get(&node_id) else { + debug_assert!(false); + tracing::error!("Scheduler missing node {node_id}"); + return 0; + }; + assert!(!self.nodes.is_empty()); + let expected_attached_shards_per_node = self.expected_attached_shard_count(); + + for (node_id, node) in self.nodes.iter() { + tracing::trace!(%node_id, "attached_shard_count={} shard_count={} expected={}", node.attached_shard_count, node.shard_count, expected_attached_shards_per_node); + } + + if node.attached_shard_count < expected_attached_shards_per_node { + expected_attached_shards_per_node - node.attached_shard_count + } else { + 0 + } + } + + pub(crate) fn expected_attached_shard_count(&self) -> usize { + let total_attached_shards: usize = + self.nodes.values().map(|n| n.attached_shard_count).sum(); + + assert!(!self.nodes.is_empty()); + total_attached_shards / self.nodes.len() + } + + pub(crate) fn nodes_by_attached_shard_count(&self) -> Vec<(NodeId, usize)> { + self.nodes + .iter() + .map(|(node_id, stats)| (*node_id, stats.attached_shard_count)) + .sorted_by(|lhs, rhs| Ord::cmp(&lhs.1, &rhs.1).reverse()) + .collect() + } + pub(crate) fn node_upsert(&mut self, node: &Node) { use std::collections::hash_map::Entry::*; match self.nodes.entry(node.get_id()) { @@ -352,7 +391,7 @@ impl Scheduler { return Err(ScheduleError::NoPageservers); } - let mut scores: Vec<(NodeId, AffinityScore, usize)> = self + let mut scores: Vec<(NodeId, AffinityScore, usize, usize)> = self .nodes .iter() .filter_map(|(k, v)| { @@ -363,6 +402,7 @@ impl Scheduler { *k, context.nodes.get(k).copied().unwrap_or(AffinityScore::FREE), v.shard_count, + v.attached_shard_count, )) } }) @@ -370,9 +410,12 @@ impl Scheduler { // Sort by, in order of precedence: // 1st: Affinity score. We should never pick a higher-score node if a lower-score node is available - // 2nd: Utilization. Within nodes with the same affinity, use the least loaded nodes. - // 3rd: Node ID. This is a convenience to make selection deterministic in tests and empty systems. - scores.sort_by_key(|i| (i.1, i.2, i.0)); + // 2nd: Attached shard count. Within nodes with the same affinity, we always pick the node with + // the least number of attached shards. + // 3rd: Total shard count. Within nodes with the same affinity and attached shard count, use nodes + // with the lower total shard count. + // 4th: Node ID. This is a convenience to make selection deterministic in tests and empty systems. + scores.sort_by_key(|i| (i.1, i.3, i.2, i.0)); if scores.is_empty() { // After applying constraints, no pageservers were left. diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index cf6a95bf0b85..388e0eadc8e9 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -2,19 +2,24 @@ use std::{ borrow::Cow, cmp::Ordering, collections::{BTreeMap, HashMap, HashSet}, + path::PathBuf, str::FromStr, sync::Arc, time::{Duration, Instant}, }; use crate::{ + background_node_operations::{ + Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION, + }, compute_hook::NotifyError, - id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, WrappedWriteGuard}, + id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, TracingExclusiveGuard}, persistence::{AbortShardSplitStatus, TenantFilter}, reconciler::{ReconcileError, ReconcileUnits}, - scheduler::{ScheduleContext, ScheduleMode}, + scheduler::{MaySchedule, ScheduleContext, ScheduleMode}, tenant_shard::{ - MigrateAttachment, ReconcileNeeded, ScheduleOptimization, ScheduleOptimizationAction, + MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization, + ScheduleOptimizationAction, }, }; use anyhow::Context; @@ -134,6 +139,11 @@ struct ServiceState { scheduler: Scheduler, + /// Ongoing background operation on the cluster if any is running. + /// Note that only one such operation may run at any given time, + /// hence the type choice. + ongoing_operation: Option, + /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile delayed_reconcile_rx: tokio::sync::mpsc::Receiver, } @@ -185,6 +195,7 @@ impl ServiceState { tenants, nodes: Arc::new(nodes), scheduler, + ongoing_operation: None, delayed_reconcile_rx, } } @@ -226,6 +237,9 @@ pub struct Config { /// How large must a shard grow in bytes before we split it? /// None disables auto-splitting. pub split_threshold: Option, + + // TODO: make this cfg(feature = "testing") + pub neon_local_repo_dir: Option, } impl From for ApiError { @@ -296,6 +310,17 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(value: OperationError) -> Self { + match value { + OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => { + ApiError::InternalServerError(anyhow::anyhow!(err)) + } + OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()), + } + } +} + #[allow(clippy::large_enum_variant)] enum TenantCreateOrUpdate { Create(TenantCreateRequest), @@ -334,7 +359,7 @@ struct TenantShardSplitAbort { new_shard_count: ShardCount, new_stripe_size: Option, /// Until this abort op is complete, no other operations may be done on the tenant - _tenant_lock: WrappedWriteGuard, + _tenant_lock: TracingExclusiveGuard, } #[derive(thiserror::Error, Debug)] @@ -747,29 +772,61 @@ impl Service { let res = self.heartbeater.heartbeat(nodes).await; if let Ok(deltas) = res { for (node_id, state) in deltas.0 { - let new_availability = match state { - PageserverState::Available { utilization, .. } => NodeAvailability::Active( - UtilizationScore(utilization.utilization_score), + let (new_node, new_availability) = match state { + PageserverState::Available { + utilization, new, .. + } => ( + new, + NodeAvailability::Active(UtilizationScore( + utilization.utilization_score, + )), ), - PageserverState::Offline => NodeAvailability::Offline, + PageserverState::Offline => (false, NodeAvailability::Offline), }; - let res = self - .node_configure(node_id, Some(new_availability), None) - .await; - match res { - Ok(()) => {} - Err(ApiError::NotFound(_)) => { - // This should be rare, but legitimate since the heartbeats are done - // on a snapshot of the nodes. - tracing::info!("Node {} was not found after heartbeat round", node_id); + if new_node { + // When the heartbeats detect a newly added node, we don't wish + // to attempt to reconcile the shards assigned to it. The node + // is likely handling it's re-attach response, so reconciling now + // would be counterproductive. + // + // Instead, update the in-memory state with the details learned about the + // node. + let mut locked = self.inner.write().unwrap(); + let (nodes, _tenants, scheduler) = locked.parts_mut(); + + let mut new_nodes = (**nodes).clone(); + + if let Some(node) = new_nodes.get_mut(&node_id) { + node.set_availability(new_availability); + scheduler.node_upsert(node); } - Err(err) => { - tracing::error!( - "Failed to update node {} after heartbeat round: {}", - node_id, - err - ); + + locked.nodes = Arc::new(new_nodes); + } else { + // This is the code path for geniune availability transitions (i.e node + // goes unavailable and/or comes back online). + let res = self + .node_configure(node_id, Some(new_availability), None) + .await; + + match res { + Ok(()) => {} + Err(ApiError::NotFound(_)) => { + // This should be rare, but legitimate since the heartbeats are done + // on a snapshot of the nodes. + tracing::info!( + "Node {} was not found after heartbeat round", + node_id + ); + } + Err(err) => { + tracing::error!( + "Failed to update node {} after heartbeat round: {}", + node_id, + err + ); + } } } } @@ -1181,13 +1238,14 @@ impl Service { let locked = self.inner.write().unwrap(); !locked.tenants.contains_key(&attach_req.tenant_shard_id) }; + if insert { let tsp = TenantShardPersistence { tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(), shard_number: attach_req.tenant_shard_id.shard_number.0 as i32, shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32, shard_stripe_size: 0, - generation: Some(0), + generation: attach_req.generation_override.or(Some(0)), generation_pageserver: None, placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(), config: serde_json::to_string(&TenantConfig::default()).unwrap(), @@ -1371,7 +1429,7 @@ impl Service { async fn node_activate_reconcile( &self, mut node: Node, - _lock: &WrappedWriteGuard, + _lock: &TracingExclusiveGuard, ) -> Result<(), ApiError> { // This Node is a mutable local copy: we will set it active so that we can use its // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated @@ -1562,15 +1620,32 @@ impl Service { // Setting a node active unblocks any Reconcilers that might write to the location config API, // but those requests will not be accepted by the node until it has finished processing // the re-attach response. + // + // Additionally, reset the nodes scheduling policy to match the conditional update done + // in [`Persistence::re_attach`]. if let Some(node) = nodes.get(&reattach_req.node_id) { - if !node.is_available() { + let reset_scheduling = matches!( + node.get_scheduling(), + NodeSchedulingPolicy::PauseForRestart + | NodeSchedulingPolicy::Draining + | NodeSchedulingPolicy::Filling + ); + + if !node.is_available() || reset_scheduling { let mut new_nodes = (**nodes).clone(); if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) { - node.set_availability(NodeAvailability::Active(UtilizationScore::worst())); + if !node.is_available() { + node.set_availability(NodeAvailability::Active(UtilizationScore::worst())); + } + + if reset_scheduling { + node.set_scheduling(NodeSchedulingPolicy::Active); + } + scheduler.node_upsert(node); + let new_nodes = Arc::new(new_nodes); + *nodes = new_nodes; } - let new_nodes = Arc::new(new_nodes); - *nodes = new_nodes; } } @@ -1851,6 +1926,25 @@ impl Service { Ok(()) } + /// Same as [`Service::await_waiters`], but returns the waiters which are still + /// in progress + async fn await_waiters_remainder( + &self, + waiters: Vec, + timeout: Duration, + ) -> Vec { + let deadline = Instant::now().checked_add(timeout).unwrap(); + for waiter in waiters.iter() { + let timeout = deadline.duration_since(Instant::now()); + let _ = waiter.wait_timeout(timeout).await; + } + + waiters + .into_iter() + .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress)) + .collect::>() + } + /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request, /// and transform it into either a tenant creation of a series of shard updates. /// @@ -2564,6 +2658,7 @@ impl Service { TenantOperations::TimelineCreate, ) .await; + failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock"); self.ensure_attached_wait(tenant_id).await?; @@ -4132,6 +4227,18 @@ impl Service { Ok(nodes) } + pub(crate) async fn get_node(&self, node_id: NodeId) -> Result { + self.inner + .read() + .unwrap() + .nodes + .get(&node_id) + .cloned() + .ok_or(ApiError::NotFound( + format!("Node {node_id} not registered").into(), + )) + } + pub(crate) async fn node_register( &self, register_req: NodeRegisterRequest, @@ -4286,9 +4393,6 @@ impl Service { if let Some(scheduling) = scheduling { node.set_scheduling(scheduling); - - // TODO: once we have a background scheduling ticker for fill/drain, kick it - // to wake up and start working. } // Update the scheduler, in case the elegibility of the node for new shards has changed @@ -4316,6 +4420,16 @@ impl Service { continue; } + if !new_nodes + .values() + .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_))) + { + // Special case for when all nodes are unavailable and/or unschedulable: there is no point + // trying to reschedule since there's nowhere else to go. Without this + // branch we incorrectly detach tenants in response to node unavailability. + continue; + } + if tenant_shard.intent.demote_attached(scheduler, node_id) { tenant_shard.sequence = tenant_shard.sequence.next(); @@ -4353,6 +4467,12 @@ impl Service { // When a node comes back online, we must reconcile any tenant that has a None observed // location on the node. for tenant_shard in locked.tenants.values_mut() { + // If a reconciliation is already in progress, rely on the previous scheduling + // decision and skip triggering a new reconciliation. + if tenant_shard.reconciler.is_some() { + continue; + } + if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) { if observed_loc.conf.is_none() { self.maybe_reconcile_shard(tenant_shard, &new_nodes); @@ -4363,7 +4483,7 @@ impl Service { // TODO: in the background, we should balance work back onto this pageserver } AvailabilityTransition::Unchanged => { - tracing::debug!("Node {} no change during config", node_id); + tracing::debug!("Node {} no availability change during config", node_id); } } @@ -4372,6 +4492,283 @@ impl Service { Ok(()) } + pub(crate) async fn start_node_drain( + self: &Arc, + node_id: NodeId, + ) -> Result<(), ApiError> { + let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + let schedulable_nodes_count = nodes + .iter() + .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_))) + .count(); + + ( + locked + .ongoing_operation + .as_ref() + .map(|ongoing| ongoing.operation), + node.is_available(), + node.get_scheduling(), + schedulable_nodes_count, + ) + }; + + if let Some(ongoing) = ongoing_op { + return Err(ApiError::PreconditionFailed( + format!("Background operation already ongoing for node: {}", ongoing).into(), + )); + } + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if schedulable_nodes_count == 0 { + return Err(ApiError::PreconditionFailed( + "No other schedulable nodes to drain to".into(), + )); + } + + match node_policy { + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => { + self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining)) + .await?; + + let cancel = self.cancel.child_token(); + let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?; + + self.inner.write().unwrap().ongoing_operation = Some(OperationHandler { + operation: Operation::Drain(Drain { node_id }), + cancel: cancel.clone(), + }); + + tokio::task::spawn({ + let service = self.clone(); + let cancel = cancel.clone(); + async move { + let _gate_guard = gate_guard; + + scopeguard::defer! { + let prev = service.inner.write().unwrap().ongoing_operation.take(); + + if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) { + assert_eq!(removed_drain.node_id, node_id, "We always take the same operation"); + } else { + panic!("We always remove the same operation") + } + } + + tracing::info!(%node_id, "Drain background operation starting"); + let res = service.drain_node(node_id, cancel).await; + match res { + Ok(()) => { + tracing::info!(%node_id, "Drain background operation completed successfully"); + } + Err(OperationError::Cancelled) => { + tracing::info!(%node_id, "Drain background operation was cancelled"); + } + Err(err) => { + tracing::error!(%node_id, "Drain background operation encountered: {err}") + } + } + } + }); + } + NodeSchedulingPolicy::Draining => { + return Err(ApiError::Conflict(format!( + "Node {node_id} has drain in progress" + ))); + } + policy => { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} cannot be drained due to {policy:?} policy").into(), + )); + } + } + + Ok(()) + } + + pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> { + let (node_available, node_policy) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + + (node.is_available(), node.get_scheduling()) + }; + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if !matches!(node_policy, NodeSchedulingPolicy::Draining) { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} has no drain in progress").into(), + )); + } + + if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() { + if let Operation::Drain(drain) = op_handler.operation { + if drain.node_id == node_id { + tracing::info!("Cancelling background drain operation for node {node_id}"); + op_handler.cancel.cancel(); + return Ok(()); + } + } + } + + Err(ApiError::PreconditionFailed( + format!("Node {node_id} has no drain in progress").into(), + )) + } + + pub(crate) async fn start_node_fill(self: &Arc, node_id: NodeId) -> Result<(), ApiError> { + let (ongoing_op, node_available, node_policy, total_nodes_count) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + + ( + locked + .ongoing_operation + .as_ref() + .map(|ongoing| ongoing.operation), + node.is_available(), + node.get_scheduling(), + nodes.len(), + ) + }; + + if let Some(ongoing) = ongoing_op { + return Err(ApiError::PreconditionFailed( + format!("Background operation already ongoing for node: {}", ongoing).into(), + )); + } + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if total_nodes_count <= 1 { + return Err(ApiError::PreconditionFailed( + "No other nodes to fill from".into(), + )); + } + + match node_policy { + NodeSchedulingPolicy::Active => { + self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling)) + .await?; + + let cancel = self.cancel.child_token(); + let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?; + + self.inner.write().unwrap().ongoing_operation = Some(OperationHandler { + operation: Operation::Fill(Fill { node_id }), + cancel: cancel.clone(), + }); + + tokio::task::spawn({ + let service = self.clone(); + let cancel = cancel.clone(); + async move { + let _gate_guard = gate_guard; + + scopeguard::defer! { + let prev = service.inner.write().unwrap().ongoing_operation.take(); + + if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) { + assert_eq!(removed_fill.node_id, node_id, "We always take the same operation"); + } else { + panic!("We always remove the same operation") + } + } + + tracing::info!(%node_id, "Fill background operation starting"); + let res = service.fill_node(node_id, cancel).await; + match res { + Ok(()) => { + tracing::info!(%node_id, "Fill background operation completed successfully"); + } + Err(OperationError::Cancelled) => { + tracing::info!(%node_id, "Fill background operation was cancelled"); + } + Err(err) => { + tracing::error!(%node_id, "Fill background operation encountered: {err}") + } + } + } + }); + } + NodeSchedulingPolicy::Filling => { + return Err(ApiError::Conflict(format!( + "Node {node_id} has fill in progress" + ))); + } + policy => { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} cannot be filled due to {policy:?} policy").into(), + )); + } + } + + Ok(()) + } + + pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> { + let (node_available, node_policy) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + + (node.is_available(), node.get_scheduling()) + }; + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if !matches!(node_policy, NodeSchedulingPolicy::Filling) { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} has no fill in progress").into(), + )); + } + + if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() { + if let Operation::Fill(fill) = op_handler.operation { + if fill.node_id == node_id { + tracing::info!("Cancelling background drain operation for node {node_id}"); + op_handler.cancel.cancel(); + return Ok(()); + } + } + } + + Err(ApiError::PreconditionFailed( + format!("Node {node_id} has no fill in progress").into(), + )) + } + /// Helper for methods that will try and call pageserver APIs for /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant /// is attached somewhere. @@ -4956,4 +5353,383 @@ impl Service { // to complete. self.gate.close().await; } + + /// Drain a node by moving the shards attached to it as primaries. + /// This is a long running operation and it should run as a separate Tokio task. + pub(crate) async fn drain_node( + &self, + node_id: NodeId, + cancel: CancellationToken, + ) -> Result<(), OperationError> { + let mut last_inspected_shard: Option = None; + let mut inspected_all_shards = false; + let mut waiters = Vec::new(); + + while !inspected_all_shards { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", + node_id, err + ) + .into(), + )); + } + } + } + + { + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); + + let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged( + format!("node {node_id} was removed").into(), + ))?; + + let current_policy = node.get_scheduling(); + if !matches!(current_policy, NodeSchedulingPolicy::Draining) { + // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think + // about it + return Err(OperationError::NodeStateChanged( + format!("node {node_id} changed state to {current_policy:?}").into(), + )); + } + + let mut cursor = tenants.iter_mut().skip_while({ + let skip_past = last_inspected_shard; + move |(tid, _)| match skip_past { + Some(last) => **tid != last, + None => false, + } + }); + + while waiters.len() < MAX_RECONCILES_PER_OPERATION { + let (tid, tenant_shard) = match cursor.next() { + Some(some) => some, + None => { + inspected_all_shards = true; + break; + } + }; + + // If the shard is not attached to the node being drained, skip it. + if *tenant_shard.intent.get_attached() != Some(node_id) { + last_inspected_shard = Some(*tid); + continue; + } + + match tenant_shard.reschedule_to_secondary(None, scheduler) { + Err(e) => { + tracing::warn!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Scheduling error when draining pageserver {} : {e}", node_id + ); + } + Ok(()) => { + let scheduled_to = tenant_shard.intent.get_attached(); + tracing::info!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Rescheduled shard while draining node {}: {} -> {:?}", + node_id, + node_id, + scheduled_to + ); + + let waiter = self.maybe_reconcile_shard(tenant_shard, nodes); + if let Some(some) = waiter { + waiters.push(some); + } + } + } + + last_inspected_shard = Some(*tid); + } + } + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + + failpoint_support::sleep_millis_async!("sleepy-drain-loop"); + } + + while !waiters.is_empty() { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", + node_id, err + ) + .into(), + )); + } + } + } + + tracing::info!("Awaiting {} pending drain reconciliations", waiters.len()); + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + // At this point we have done the best we could to drain shards from this node. + // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]` + // to complete the drain. + if let Err(err) = self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart)) + .await + { + // This is not fatal. Anything that is polling the node scheduling policy to detect + // the end of the drain operations will hang, but all such places should enforce an + // overall timeout. The scheduling policy will be updated upon node re-attach and/or + // by the counterpart fill operation. + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}" + ) + .into(), + )); + } + + Ok(()) + } + + /// Create a node fill plan (pick secondaries to promote) that meets the following requirements: + /// 1. The node should be filled until it reaches the expected cluster average of + /// attached shards. If there are not enough secondaries on the node, the plan stops early. + /// 2. Select tenant shards to promote such that the number of attached shards is balanced + /// throughout the cluster. We achieve this by picking tenant shards from each node, + /// starting from the ones with the largest number of attached shards, until the node + /// reaches the expected cluster average. + /// 3. Avoid promoting more shards of the same tenant than required. The upper bound + /// for the number of tenants from the same shard promoted to the node being filled is: + /// shard count for the tenant divided by the number of nodes in the cluster. + fn fill_node_plan(&self, node_id: NodeId) -> Vec { + let mut locked = self.inner.write().unwrap(); + let fill_requirement = locked.scheduler.compute_fill_requirement(node_id); + + let mut tids_by_node = locked + .tenants + .iter_mut() + .filter_map(|(tid, tenant_shard)| { + if tenant_shard.intent.get_secondary().contains(&node_id) { + if let Some(primary) = tenant_shard.intent.get_attached() { + return Some((*primary, *tid)); + } + } + + None + }) + .into_group_map(); + + let expected_attached = locked.scheduler.expected_attached_shard_count(); + let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count(); + + let mut promoted_per_tenant: HashMap = HashMap::new(); + let mut plan = Vec::new(); + + for (node_id, attached) in nodes_by_load { + let available = locked + .nodes + .get(&node_id) + .map_or(false, |n| n.is_available()); + if !available { + continue; + } + + if plan.len() >= fill_requirement + || tids_by_node.is_empty() + || attached <= expected_attached + { + break; + } + + let mut can_take = attached - expected_attached; + let mut remove_node = false; + while can_take > 0 { + match tids_by_node.get_mut(&node_id) { + Some(tids) => match tids.pop() { + Some(tid) => { + let max_promote_for_tenant = std::cmp::max( + tid.shard_count.count() as usize / locked.nodes.len(), + 1, + ); + let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default(); + if *promoted < max_promote_for_tenant { + plan.push(tid); + *promoted += 1; + can_take -= 1; + } + } + None => { + remove_node = true; + break; + } + }, + None => { + break; + } + } + } + + if remove_node { + tids_by_node.remove(&node_id); + } + } + + plan + } + + /// Fill a node by promoting its secondaries until the cluster is balanced + /// with regards to attached shard counts. Note that this operation only + /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`]. + /// This is a long running operation and it should run as a separate Tokio task. + pub(crate) async fn fill_node( + &self, + node_id: NodeId, + cancel: CancellationToken, + ) -> Result<(), OperationError> { + // TODO(vlad): Currently this operates on the assumption that all + // secondaries are warm. This is not always true (e.g. we just migrated the + // tenant). Take that into consideration by checking the secondary status. + let mut tids_to_promote = self.fill_node_plan(node_id); + let mut waiters = Vec::new(); + + // Execute the plan we've composed above. Before aplying each move from the plan, + // we validate to ensure that it has not gone stale in the meantime. + while !tids_to_promote.is_empty() { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", + node_id, err + ) + .into(), + )); + } + } + } + + { + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); + + let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged( + format!("node {node_id} was removed").into(), + ))?; + + let current_policy = node.get_scheduling(); + if !matches!(current_policy, NodeSchedulingPolicy::Filling) { + // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think + // about it + return Err(OperationError::NodeStateChanged( + format!("node {node_id} changed state to {current_policy:?}").into(), + )); + } + + while waiters.len() < MAX_RECONCILES_PER_OPERATION { + if let Some(tid) = tids_to_promote.pop() { + if let Some(tenant_shard) = tenants.get_mut(&tid) { + // If the node being filled is not a secondary anymore, + // skip the promotion. + if !tenant_shard.intent.get_secondary().contains(&node_id) { + continue; + } + + let previously_attached_to = *tenant_shard.intent.get_attached(); + match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) { + Err(e) => { + tracing::warn!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Scheduling error when filling pageserver {} : {e}", node_id + ); + } + Ok(()) => { + tracing::info!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Rescheduled shard while filling node {}: {:?} -> {}", + node_id, + previously_attached_to, + node_id + ); + + if let Some(waiter) = + self.maybe_reconcile_shard(tenant_shard, nodes) + { + waiters.push(waiter); + } + } + } + } + } else { + break; + } + } + } + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + while !waiters.is_empty() { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", + node_id, err + ) + .into(), + )); + } + } + } + + tracing::info!("Awaiting {} pending fill reconciliations", waiters.len()); + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + if let Err(err) = self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + // This isn't a huge issue since the filling process starts upon request. However, it + // will prevent the next drain from starting. The only case in which this can fail + // is database unavailability. Such a case will require manual intervention. + return Err(OperationError::FinalizeError( + format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}") + .into(), + )); + } + + Ok(()) + } } diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index 77bbf4c60491..45295bc59be8 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -10,7 +10,9 @@ use crate::{ reconciler::ReconcileUnits, scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext}, }; -use pageserver_api::controller_api::{PlacementPolicy, ShardSchedulingPolicy}; +use pageserver_api::controller_api::{ + NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy, +}; use pageserver_api::{ models::{LocationConfig, LocationConfigMode, TenantConfig}, shard::{ShardIdentity, TenantShardId}, @@ -311,6 +313,12 @@ pub(crate) struct ReconcilerWaiter { seq: Sequence, } +pub(crate) enum ReconcilerStatus { + Done, + Failed, + InProgress, +} + #[derive(thiserror::Error, Debug)] pub(crate) enum ReconcileWaitError { #[error("Timeout waiting for shard {0}")] @@ -373,6 +381,16 @@ impl ReconcilerWaiter { Ok(()) } + + pub(crate) fn get_status(&self) -> ReconcilerStatus { + if self.seq_wait.would_wait_for(self.seq).is_err() { + ReconcilerStatus::Done + } else if self.error_seq_wait.would_wait_for(self.seq).is_err() { + ReconcilerStatus::Failed + } else { + ReconcilerStatus::InProgress + } + } } /// Having spawned a reconciler task, the tenant shard's state will carry enough @@ -628,6 +646,48 @@ impl TenantShard { Ok(()) } + /// Reschedule this tenant shard to one of its secondary locations. Returns a scheduling error + /// if the swap is not possible and leaves the intent state in its original state. + /// + /// Arguments: + /// `attached_to`: the currently attached location matching the intent state (may be None if the + /// shard is not attached) + /// `promote_to`: an optional secondary location of this tenant shard. If set to None, we ask + /// the scheduler to recommend a node + pub(crate) fn reschedule_to_secondary( + &mut self, + promote_to: Option, + scheduler: &mut Scheduler, + ) -> Result<(), ScheduleError> { + let promote_to = match promote_to { + Some(node) => node, + None => match scheduler.node_preferred(self.intent.get_secondary()) { + Some(node) => node, + None => { + return Err(ScheduleError::ImpossibleConstraint); + } + }, + }; + + assert!(self.intent.get_secondary().contains(&promote_to)); + + if let Some(node) = self.intent.get_attached() { + let demoted = self.intent.demote_attached(scheduler, *node); + if !demoted { + return Err(ScheduleError::ImpossibleConstraint); + } + } + + self.intent.promote_attached(scheduler, promote_to); + + // Increment the sequence number for the edge case where a + // reconciler is already running to avoid waiting on the + // current reconcile instead of spawning a new one. + self.sequence = self.sequence.next(); + + Ok(()) + } + /// Optimize attachments: if a shard has a secondary location that is preferable to /// its primary location based on soft constraints, switch that secondary location /// to be attached. @@ -652,13 +712,17 @@ impl TenantShard { let mut scores = all_pageservers .iter() .flat_map(|node_id| { - if matches!( - nodes - .get(node_id) - .map(|n| n.may_schedule()) - .unwrap_or(MaySchedule::No), - MaySchedule::No + let node = nodes.get(node_id); + if node.is_none() { + None + } else if matches!( + node.unwrap().get_scheduling(), + NodeSchedulingPolicy::Filling ) { + // If the node is currently filling, don't count it as a candidate to avoid, + // racing with the background fill. + None + } else if matches!(node.unwrap().may_schedule(), MaySchedule::No) { None } else { let affinity_score = schedule_context.get_node_affinity(*node_id); @@ -1610,14 +1674,10 @@ pub(crate) mod tests { // We should see equal number of locations on the two nodes. assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 4); - // Scheduling does not consider the number of attachments picking the initial - // pageserver to attach to (hence the assertion that all primaries are on the - // same node) - // TODO: Tweak the scheduling to evenly distribute attachments for new shards. - assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 4); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 2); assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 4); - assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 0); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 2); // Add another two nodes: we should see the shards spread out when their optimize // methods are called diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 8b8075f8c1a4..e01bb6da5165 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -118,8 +118,6 @@ def histogram(prefix_without_trailing_underscore: str) -> List[str]: "libmetrics_launch_timestamp", "libmetrics_build_info", "libmetrics_tracing_event_count_total", - "pageserver_materialized_cache_hits_total", - "pageserver_materialized_cache_hits_direct_total", "pageserver_page_cache_read_hits_total", "pageserver_page_cache_read_accesses_total", "pageserver_page_cache_size_current_bytes", diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 394f5283f33f..b624c84fad42 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1177,10 +1177,10 @@ def __init__(self, config: NeonEnvBuilder): force=config.config_init_force, ) - def start(self): + def start(self, timeout_in_seconds: Optional[int] = None): # Storage controller starts first, so that pageserver /re-attach calls don't # bounce through retries on startup - self.storage_controller.start() + self.storage_controller.start(timeout_in_seconds=timeout_in_seconds) # Wait for storage controller readiness to prevent unnecessary post start-up # reconcile. @@ -1196,10 +1196,18 @@ def start(self): ) # The `or None` is for the linter for pageserver in self.pageservers: - futs.append(executor.submit(lambda ps=pageserver: ps.start())) + futs.append( + executor.submit( + lambda ps=pageserver: ps.start(timeout_in_seconds=timeout_in_seconds) + ) + ) for safekeeper in self.safekeepers: - futs.append(executor.submit(lambda sk=safekeeper: sk.start())) + futs.append( + executor.submit( + lambda sk=safekeeper: sk.start(timeout_in_seconds=timeout_in_seconds) + ) + ) for f in futs: f.result() @@ -1783,8 +1791,13 @@ def init( res.check_returncode() return res - def storage_controller_start(self): + def storage_controller_start( + self, + timeout_in_seconds: Optional[int] = None, + ): cmd = ["storage_controller", "start"] + if timeout_in_seconds is not None: + cmd.append(f"--start-timeout={timeout_in_seconds}s") return self.raw_cli(cmd) def storage_controller_stop(self, immediate: bool): @@ -1797,8 +1810,11 @@ def pageserver_start( self, id: int, extra_env_vars: Optional[Dict[str, str]] = None, + timeout_in_seconds: Optional[int] = None, ) -> "subprocess.CompletedProcess[str]": start_args = ["pageserver", "start", f"--id={id}"] + if timeout_in_seconds is not None: + start_args.append(f"--start-timeout={timeout_in_seconds}s") storage = self.env.pageserver_remote_storage if isinstance(storage, S3Storage): @@ -1816,7 +1832,10 @@ def pageserver_stop(self, id: int, immediate=False) -> "subprocess.CompletedProc return self.raw_cli(cmd) def safekeeper_start( - self, id: int, extra_opts: Optional[List[str]] = None + self, + id: int, + extra_opts: Optional[List[str]] = None, + timeout_in_seconds: Optional[int] = None, ) -> "subprocess.CompletedProcess[str]": s3_env_vars = None if isinstance(self.env.safekeepers_remote_storage, S3Storage): @@ -1826,6 +1845,8 @@ def safekeeper_start( extra_opts = [f"-e={opt}" for opt in extra_opts] else: extra_opts = [] + if timeout_in_seconds is not None: + extra_opts.append(f"--start-timeout={timeout_in_seconds}s") return self.raw_cli( ["safekeeper", "start", str(id), *extra_opts], extra_env_vars=s3_env_vars ) @@ -2077,9 +2098,9 @@ def __init__(self, env: NeonEnv, auth_enabled: bool): self.allowed_errors: list[str] = DEFAULT_STORAGE_CONTROLLER_ALLOWED_ERRORS self.logfile = self.workdir / "storage_controller.log" - def start(self): + def start(self, timeout_in_seconds: Optional[int] = None): assert not self.running - self.env.neon_cli.storage_controller_start() + self.env.neon_cli.storage_controller_start(timeout_in_seconds) self.running = True return self @@ -2159,12 +2180,19 @@ def storage_controller_ready(): return time.time() - t1 def attach_hook_issue( - self, tenant_shard_id: Union[TenantId, TenantShardId], pageserver_id: int + self, + tenant_shard_id: Union[TenantId, TenantShardId], + pageserver_id: int, + generation_override: Optional[int] = None, ) -> int: + body = {"tenant_shard_id": str(tenant_shard_id), "node_id": pageserver_id} + if generation_override is not None: + body["generation_override"] = generation_override + response = self.request( "POST", f"{self.env.storage_controller_api}/debug/v1/attach-hook", - json={"tenant_shard_id": str(tenant_shard_id), "node_id": pageserver_id}, + json=body, headers=self.headers(TokenScope.ADMIN), ) gen = response.json()["gen"] @@ -2213,6 +2241,46 @@ def node_register(self, node: NeonPageserver): headers=self.headers(TokenScope.ADMIN), ) + def node_drain(self, node_id): + log.info(f"node_drain({node_id})") + self.request( + "PUT", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/drain", + headers=self.headers(TokenScope.ADMIN), + ) + + def cancel_node_drain(self, node_id): + log.info(f"cancel_node_drain({node_id})") + self.request( + "DELETE", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/drain", + headers=self.headers(TokenScope.ADMIN), + ) + + def node_fill(self, node_id): + log.info(f"node_fill({node_id})") + self.request( + "PUT", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/fill", + headers=self.headers(TokenScope.ADMIN), + ) + + def cancel_node_fill(self, node_id): + log.info(f"cancel_node_fill({node_id})") + self.request( + "DELETE", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/fill", + headers=self.headers(TokenScope.ADMIN), + ) + + def node_status(self, node_id): + response = self.request( + "GET", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}", + headers=self.headers(TokenScope.ADMIN), + ) + return response.json() + def node_list(self): response = self.request( "GET", @@ -2500,6 +2568,7 @@ def doit(config: Dict[str, Any]): def start( self, extra_env_vars: Optional[Dict[str, str]] = None, + timeout_in_seconds: Optional[int] = None, ) -> "NeonPageserver": """ Start the page server. @@ -2508,7 +2577,9 @@ def start( """ assert self.running is False - self.env.neon_cli.pageserver_start(self.id, extra_env_vars=extra_env_vars) + self.env.neon_cli.pageserver_start( + self.id, extra_env_vars=extra_env_vars, timeout_in_seconds=timeout_in_seconds + ) self.running = True return self @@ -2522,13 +2593,17 @@ def stop(self, immediate: bool = False) -> "NeonPageserver": self.running = False return self - def restart(self, immediate: bool = False): + def restart( + self, + immediate: bool = False, + timeout_in_seconds: Optional[int] = None, + ): """ High level wrapper for restart: restarts the process, and waits for tenant state to stabilize. """ self.stop(immediate=immediate) - self.start() + self.start(timeout_in_seconds=timeout_in_seconds) self.quiesce_tenants() def quiesce_tenants(self): @@ -2611,6 +2686,7 @@ def tenant_attach( config: None | Dict[str, Any] = None, config_null: bool = False, generation: Optional[int] = None, + override_storage_controller_generation: bool = False, ): """ Tenant attachment passes through here to acquire a generation number before proceeding @@ -2619,6 +2695,10 @@ def tenant_attach( client = self.http_client() if generation is None: generation = self.env.storage_controller.attach_hook_issue(tenant_id, self.id) + elif override_storage_controller_generation: + generation = self.env.storage_controller.attach_hook_issue( + tenant_id, self.id, generation + ) return client.tenant_attach( tenant_id, config, @@ -2664,12 +2744,6 @@ def tenant_create( client = self.http_client(auth_token=auth_token) return client.tenant_create(tenant_id, conf, generation=generation) - def tenant_load(self, tenant_id: TenantId): - client = self.http_client() - return client.tenant_load( - tenant_id, generation=self.env.storage_controller.attach_hook_issue(tenant_id, self.id) - ) - def list_layers( self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId ) -> list[Path]: @@ -3410,6 +3484,13 @@ def __init__( self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf + # Semaphore is set to 1 when we start, and acquire'd back to zero when we stop + # + # We use a semaphore rather than a bool so that racing calls to stop() don't + # try and stop the same process twice, as stop() is called by test teardown and + # potentially by some __del__ chains in other threads. + self._running = threading.Semaphore(0) + def http_client( self, auth_token: Optional[str] = None, retries: Optional[Retry] = None ) -> EndpointHttpClient: @@ -3487,7 +3568,7 @@ def start( pageserver_id=pageserver_id, allow_multiple=allow_multiple, ) - self.running = True + self._running.release(1) return self @@ -3535,9 +3616,12 @@ def edit_hba(self, hba: List[str]): conf_file.write("\n".join(hba) + "\n") conf_file.write(data) - if self.running: + if self.is_running(): self.safe_psql("SELECT pg_reload_conf()") + def is_running(self): + return self._running._value > 0 + def reconfigure(self, pageserver_id: Optional[int] = None): assert self.endpoint_id is not None self.env.neon_cli.endpoint_reconfigure(self.endpoint_id, self.tenant_id, pageserver_id) @@ -3579,15 +3663,19 @@ def create_remote_extension_spec(self, spec: dict[str, Any]): def stop(self, mode: str = "fast") -> "Endpoint": """ Stop the Postgres instance if it's running. + + Because test teardown might try and stop an endpoint concurrently with test code + stopping the endpoint, this method is thread safe + Returns self. """ - if self.running: + running = self._running.acquire(blocking=False) + if running: assert self.endpoint_id is not None self.env.neon_cli.endpoint_stop( self.endpoint_id, check_return_code=self.check_stop_result, mode=mode ) - self.running = False return self @@ -3597,12 +3685,13 @@ def stop_and_destroy(self, mode: str = "immediate") -> "Endpoint": Returns self. """ - assert self.endpoint_id is not None - self.env.neon_cli.endpoint_stop( - self.endpoint_id, True, check_return_code=self.check_stop_result, mode=mode - ) - self.endpoint_id = None - self.running = False + running = self._running.acquire(blocking=False) + if running: + assert self.endpoint_id is not None + self.env.neon_cli.endpoint_stop( + self.endpoint_id, True, check_return_code=self.check_stop_result, mode=mode + ) + self.endpoint_id = None return self @@ -3790,9 +3879,13 @@ def __init__(self, env: NeonEnv, port: SafekeeperPort, id: int, running: bool = self.running = running self.logfile = Path(self.data_dir) / f"safekeeper-{id}.log" - def start(self, extra_opts: Optional[List[str]] = None) -> "Safekeeper": + def start( + self, extra_opts: Optional[List[str]] = None, timeout_in_seconds: Optional[int] = None + ) -> "Safekeeper": assert self.running is False - self.env.neon_cli.safekeeper_start(self.id, extra_opts=extra_opts) + self.env.neon_cli.safekeeper_start( + self.id, extra_opts=extra_opts, timeout_in_seconds=timeout_in_seconds + ) self.running = True # wait for wal acceptor start by checking its status started_at = time.time() @@ -3847,7 +3940,15 @@ def append_logical_message( assert isinstance(res, dict) return res - def http_client(self, auth_token: Optional[str] = None) -> SafekeeperHttpClient: + def http_client( + self, auth_token: Optional[str] = None, gen_sk_wide_token: bool = True + ) -> SafekeeperHttpClient: + """ + When auth_token is None but gen_sk_wide is True creates safekeeper wide + token, which is a reasonable default. + """ + if auth_token is None and gen_sk_wide_token: + auth_token = self.env.auth_keys.generate_safekeeper_token() is_testing_enabled = '"testing"' in self.env.get_binary_version("safekeeper") return SafekeeperHttpClient( port=self.port.http, auth_token=auth_token, is_testing_enabled=is_testing_enabled @@ -3897,11 +3998,13 @@ def list_segments(self, tenant_id, timeline_id) -> List[str]: segments.sort() return segments - def checkpoint_up_to(self, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn): + def checkpoint_up_to( + self, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn, wait_wal_removal=True + ): """ Assuming pageserver(s) uploaded to s3 up to `lsn`, 1) wait for remote_consistent_lsn and wal_backup_lsn on safekeeper to reach it. - 2) checkpoint timeline on safekeeper, which should remove WAL before this LSN. + 2) checkpoint timeline on safekeeper, which should remove WAL before this LSN; optionally wait for that. """ cli = self.http_client() @@ -3925,7 +4028,8 @@ def are_lsns_advanced(): # pageserver to this safekeeper wait_until(30, 1, are_lsns_advanced) cli.checkpoint(tenant_id, timeline_id) - wait_until(30, 1, are_segments_removed) + if wait_wal_removal: + wait_until(30, 1, are_segments_removed) def wait_until_paused(self, failpoint: str): msg = f"at failpoint {failpoint}" @@ -4447,6 +4551,7 @@ def wait_for_last_flush_lsn( tenant: TenantId, timeline: TimelineId, pageserver_id: Optional[int] = None, + auth_token: Optional[str] = None, ) -> Lsn: """Wait for pageserver to catch up the latest flush LSN, returns the last observed lsn.""" @@ -4460,7 +4565,7 @@ def wait_for_last_flush_lsn( f"wait_for_last_flush_lsn: waiting for {last_flush_lsn} on shard {tenant_shard_id} on pageserver {pageserver.id})" ) waited = wait_for_last_record_lsn( - pageserver.http_client(), tenant_shard_id, timeline, last_flush_lsn + pageserver.http_client(auth_token=auth_token), tenant_shard_id, timeline, last_flush_lsn ) assert waited >= last_flush_lsn @@ -4556,6 +4661,7 @@ def last_flush_lsn_upload( tenant_id: TenantId, timeline_id: TimelineId, pageserver_id: Optional[int] = None, + auth_token: Optional[str] = None, ) -> Lsn: """ Wait for pageserver to catch to the latest flush LSN of given endpoint, @@ -4563,11 +4669,11 @@ def last_flush_lsn_upload( reaching flush LSN). """ last_flush_lsn = wait_for_last_flush_lsn( - env, endpoint, tenant_id, timeline_id, pageserver_id=pageserver_id + env, endpoint, tenant_id, timeline_id, pageserver_id=pageserver_id, auth_token=auth_token ) shards = tenant_get_shards(env, tenant_id, pageserver_id) for tenant_shard_id, pageserver in shards: - ps_http = pageserver.http_client() + ps_http = pageserver.http_client(auth_token=auth_token) wait_for_last_record_lsn(ps_http, tenant_shard_id, timeline_id, last_flush_lsn) # force a checkpoint to trigger upload ps_http.timeline_checkpoint(tenant_shard_id, timeline_id) diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index 147d5705d38a..c5b09e360893 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -106,6 +106,11 @@ def scan_pageserver_log_for_errors( ".*startup_reconcile: Could not scan node.*", # Tests run in dev mode ".*Starting in dev mode.*", + # Tests that stop endpoints & use the storage controller's neon_local notification + # mechanism might fail (neon_local's stopping and endpoint isn't atomic wrt the storage + # controller's attempts to notify the endpoint). + ".*reconciler.*neon_local notification hook failed.*", + ".*reconciler.*neon_local error.*", ] diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index d5441bd69464..ecc83a954657 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -340,17 +340,6 @@ def tenant_delete(self, tenant_id: Union[TenantId, TenantShardId]): self.verbose_error(res) return res - def tenant_load(self, tenant_id: TenantId, generation=None): - body = None - if generation is not None: - body = {"generation": generation} - res = self.post(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/load", json=body) - self.verbose_error(res) - - def tenant_ignore(self, tenant_id: TenantId): - res = self.post(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/ignore") - self.verbose_error(res) - def tenant_status( self, tenant_id: Union[TenantId, TenantShardId], activate: bool = False ) -> Dict[Any, Any]: diff --git a/test_runner/fixtures/pageserver/many_tenants.py b/test_runner/fixtures/pageserver/many_tenants.py index def80a1c3e6c..8730d8ef751d 100644 --- a/test_runner/fixtures/pageserver/many_tenants.py +++ b/test_runner/fixtures/pageserver/many_tenants.py @@ -66,6 +66,8 @@ def attach_broken(tenant): env.pageserver.tenant_attach( tenant, config=template_config.copy(), + generation=100, + override_storage_controller_generation=True, ) time.sleep(0.1) wait_until_tenant_state(ps_http, tenant, "Broken", 10) diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 72384c138b54..60535b759261 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -430,52 +430,6 @@ def enable_remote_storage_versioning( return response -def wait_tenant_status_404( - pageserver_http: PageserverHttpClient, - tenant_id: TenantId, - iterations: int, - interval: float = 0.250, -): - def tenant_is_missing(): - data = {} - try: - data = pageserver_http.tenant_status(tenant_id) - log.info(f"tenant status {data}") - except PageserverApiException as e: - log.debug(e) - if e.status_code == 404: - return - - raise RuntimeError(f"Timeline exists state {data.get('state')}") - - wait_until(iterations, interval=interval, func=tenant_is_missing) - - -def tenant_delete_wait_completed( - pageserver_http: PageserverHttpClient, - tenant_id: TenantId, - iterations: int, - ignore_errors: bool = False, -): - if not ignore_errors: - pageserver_http.tenant_delete(tenant_id=tenant_id) - else: - interval = 0.5 - - def delete_request_sent(): - try: - pageserver_http.tenant_delete(tenant_id=tenant_id) - except PageserverApiException as e: - log.debug(e) - if e.status_code == 404: - return - except Exception as e: - log.debug(e) - - wait_until(iterations, interval=interval, func=delete_request_sent) - wait_tenant_status_404(pageserver_http, tenant_id=tenant_id, iterations=iterations) - - MANY_SMALL_LAYERS_TENANT_CONFIG = { "gc_period": "0s", "compaction_period": "0s", diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 772a39fe357c..1d579214b0c5 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -85,6 +85,8 @@ def setup_wrapper(env: NeonEnv): f"max_throughput_latest_lsn-{n_tenants}-{pgbench_scale}", n_tenants, setup_wrapper, + # https://github.com/neondatabase/neon/issues/8070 + timeout_in_seconds=60, ) env.pageserver.allowed_errors.append( @@ -209,3 +211,11 @@ def run_benchmark_max_throughput_latest_lsn( unit="ms", report=MetricReport.LOWER_IS_BETTER, ) + + env.storage_controller.allowed_errors.append( + # The test setup swaps NeonEnv instances, hence different + # pg instances are used for the storage controller db. This means + # the storage controller doesn't know about the nodes mentioned + # in attachments.json at start-up. + ".* Scheduler missing node 1", + ) diff --git a/test_runner/performance/pageserver/util.py b/test_runner/performance/pageserver/util.py index f31cd9a9f8ce..92e05663ce20 100644 --- a/test_runner/performance/pageserver/util.py +++ b/test_runner/performance/pageserver/util.py @@ -2,7 +2,7 @@ Utilities used by all code in this sub-directory """ -from typing import Any, Callable, Dict, Tuple +from typing import Any, Callable, Dict, Optional, Tuple import fixtures.pageserver.many_tenants as many_tenants from fixtures.common_types import TenantId, TimelineId @@ -41,6 +41,7 @@ def setup_pageserver_with_tenants( name: str, n_tenants: int, setup: Callable[[NeonEnv], Tuple[TenantId, TimelineId, Dict[str, Any]]], + timeout_in_seconds: Optional[int] = None, ) -> NeonEnv: """ Utility function to set up a pageserver with a given number of identical tenants. @@ -50,6 +51,6 @@ def doit(neon_env_builder: NeonEnvBuilder) -> NeonEnv: return many_tenants.single_timeline(neon_env_builder, setup, n_tenants) env = neon_env_builder.build_and_use_snapshot(name, doit) - env.start() + env.start(timeout_in_seconds=timeout_in_seconds) ensure_pageserver_ready_for_benchmarking(env, n_tenants) return env diff --git a/test_runner/performance/test_bulk_insert.py b/test_runner/performance/test_bulk_insert.py index 3f56da7c1d4e..3dad3489762d 100644 --- a/test_runner/performance/test_bulk_insert.py +++ b/test_runner/performance/test_bulk_insert.py @@ -4,7 +4,6 @@ from fixtures.benchmark_fixture import MetricReport from fixtures.common_types import Lsn from fixtures.compare_fixtures import NeonCompare, PgCompare -from fixtures.pageserver.utils import wait_tenant_status_404 from fixtures.pg_version import PgVersion @@ -68,7 +67,6 @@ def measure_recovery_time(env: NeonCompare): (attach_gen, _) = attach_status client.tenant_delete(env.tenant) - wait_tenant_status_404(client, env.tenant, iterations=60, interval=0.5) env.env.pageserver.tenant_create(tenant_id=env.tenant, generation=attach_gen) # Measure recovery time diff --git a/test_runner/pg_clients/typescript/serverless-driver/package-lock.json b/test_runner/pg_clients/typescript/serverless-driver/package-lock.json index 5a3ad3c23840..f3b456f1edc7 100644 --- a/test_runner/pg_clients/typescript/serverless-driver/package-lock.json +++ b/test_runner/pg_clients/typescript/serverless-driver/package-lock.json @@ -6,7 +6,7 @@ "": { "dependencies": { "@neondatabase/serverless": "0.9.0", - "ws": "8.16.0" + "ws": "8.17.1" } }, "node_modules/@neondatabase/serverless": { @@ -96,9 +96,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, diff --git a/test_runner/pg_clients/typescript/serverless-driver/package.json b/test_runner/pg_clients/typescript/serverless-driver/package.json index 9d9da0f42cb6..3ae7a8a6cfcd 100644 --- a/test_runner/pg_clients/typescript/serverless-driver/package.json +++ b/test_runner/pg_clients/typescript/serverless-driver/package.json @@ -2,6 +2,6 @@ "type": "module", "dependencies": { "@neondatabase/serverless": "0.9.0", - "ws": "8.16.0" + "ws": "8.17.1" } } diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 1d193b8999e9..f4667a82dc33 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -195,6 +195,8 @@ def test_fully_custom_config(positive_env: NeonEnv): "walreceiver_connect_timeout": "13m", "image_layer_creation_check_threshold": 1, "switch_aux_file_policy": "cross-validation", + "lsn_lease_length": "1m", + "lsn_lease_length_for_ts": "5s", } ps_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_local_file_cache.py b/test_runner/regress/test_local_file_cache.py index 76c6581448e9..3c404c3b231e 100644 --- a/test_runner/regress/test_local_file_cache.py +++ b/test_runner/regress/test_local_file_cache.py @@ -1,4 +1,5 @@ import os +import queue import random import threading import time @@ -8,11 +9,7 @@ from fixtures.utils import query_scalar -def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder, build_type: str): - if build_type == "debug": - # Disable vectored read path cross validation since it makes the test time out. - neon_env_builder.pageserver_config_override = "validate_vectored_get=false" - +def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() cache_dir = os.path.join(env.repo_dir, "file_cache") @@ -33,11 +30,10 @@ def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder, build_type: s cur = endpoint.connect().cursor() + stop = threading.Event() n_rows = 100000 n_threads = 20 - n_updates_per_thread = 10000 n_updates_per_connection = 1000 - n_total_updates = n_threads * n_updates_per_thread cur.execute("CREATE TABLE lfctest (id int4 PRIMARY KEY, n int) WITH (fillfactor=10)") cur.execute(f"INSERT INTO lfctest SELECT g, 1 FROM generate_series(1, {n_rows}) g") @@ -48,11 +44,11 @@ def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder, build_type: s # performed (plus the initial 1 on each row). # # Furthermore, each thread will reconnect between every 1000 updates. - def run_updates(): + def run_updates(n_updates_performed_q: queue.Queue[int]): n_updates_performed = 0 conn = endpoint.connect() cur = conn.cursor() - for _ in range(n_updates_per_thread): + while not stop.is_set(): id = random.randint(1, n_rows) cur.execute(f"UPDATE lfctest SET n = n + 1 WHERE id = {id}") n_updates_performed += 1 @@ -61,19 +57,28 @@ def run_updates(): conn.close() conn = endpoint.connect() cur = conn.cursor() + n_updates_performed_q.put(n_updates_performed) + n_updates_performed_q: queue.Queue[int] = queue.Queue() threads: List[threading.Thread] = [] for _i in range(n_threads): - thread = threading.Thread(target=run_updates, args=(), daemon=True) + thread = threading.Thread(target=run_updates, args=(n_updates_performed_q,), daemon=True) thread.start() threads.append(thread) time.sleep(5) + # unlink, this is what we're actually testing new_cache_dir = os.path.join(env.repo_dir, "file_cache_new") os.rename(cache_dir, new_cache_dir) + time.sleep(10) + + stop.set() + + n_updates_performed = 0 for thread in threads: thread.join() + n_updates_performed += n_updates_performed_q.get() - assert query_scalar(cur, "SELECT SUM(n) FROM lfctest") == n_total_updates + n_rows + assert query_scalar(cur, "SELECT SUM(n) FROM lfctest") == n_rows + n_updates_performed diff --git a/test_runner/regress/test_pageserver_metric_collection.py b/test_runner/regress/test_pageserver_metric_collection.py index b0465f2a96e2..cea35a6acb73 100644 --- a/test_runner/regress/test_pageserver_metric_collection.py +++ b/test_runner/regress/test_pageserver_metric_collection.py @@ -75,9 +75,6 @@ def metrics_handler(request: Request) -> Response: env.pageserver.allowed_errors.extend( [ ".*metrics endpoint refused the sent metrics*", - # we have a fast rate of calculation, these can happen at shutdown - ".*synthetic_size_worker:calculate_synthetic_size.*:gather_size_inputs.*: failed to calculate logical size at .*: cancelled.*", - ".*synthetic_size_worker: failed to calculate synthetic size for tenant .*: failed to calculate some logical_sizes", ".*metrics_collection: failed to upload to S3: Failed to upload data of length .* to storage path.*", ] ) @@ -238,9 +235,6 @@ def metrics_handler(request: Request) -> Response: env.pageserver.allowed_errors.extend( [ ".*metrics endpoint refused the sent metrics*", - # we have a fast rate of calculation, these can happen at shutdown - ".*synthetic_size_worker:calculate_synthetic_size.*:gather_size_inputs.*: failed to calculate logical size at .*: cancelled.*", - ".*synthetic_size_worker: failed to calculate synthetic size for tenant .*: failed to calculate some logical_sizes", ] ) diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 2782d33e1591..8431840dc069 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -11,8 +11,6 @@ from fixtures.pageserver.common_types import parse_layer_file_name from fixtures.pageserver.utils import ( assert_prefix_empty, - poll_for_remote_storage_iterations, - tenant_delete_wait_completed, wait_for_upload_queue_empty, ) from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind, S3Storage, s3_storage @@ -363,8 +361,7 @@ def caught_up(): # Check that deletion works properly on a tenant that was live-migrated # (reproduce https://github.com/neondatabase/neon/issues/6802) - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - tenant_delete_wait_completed(pageserver_b.http_client(), tenant_id, iterations) + pageserver_b.http_client().tenant_delete(tenant_id) def test_heatmap_uploads(neon_env_builder: NeonEnvBuilder): @@ -552,7 +549,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): ) log.info("Deleting tenant...") - tenant_delete_wait_completed(ps_attached.http_client(), tenant_id, 10) + ps_attached.http_client().tenant_delete(tenant_id) assert_prefix_empty( neon_env_builder.pageserver_remote_storage, diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 885a94a557fc..756a2c17c909 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -23,11 +23,11 @@ # Run the main PostgreSQL regression tests, in src/test/regress. # +@pytest.mark.timeout(600) @pytest.mark.parametrize("shard_count", [None, 4]) def test_pg_regress( neon_env_builder: NeonEnvBuilder, test_output_dir: Path, - build_type: str, pg_bin: PgBin, capsys: CaptureFixture[str], base_dir: Path, @@ -43,10 +43,6 @@ def test_pg_regress( if shard_count is not None: neon_env_builder.num_pageservers = shard_count - if build_type == "debug": - # Disable vectored read path cross validation since it makes the test time out. - neon_env_builder.pageserver_config_override = "validate_vectored_get=false" - neon_env_builder.enable_pageserver_remote_storage(s3_storage()) neon_env_builder.enable_scrub_on_exit() env = neon_env_builder.init_start(initial_tenant_shard_count=shard_count) diff --git a/test_runner/regress/test_physical_replication.py b/test_runner/regress/test_physical_replication.py index 034f2b669df4..a1bff32eedd1 100644 --- a/test_runner/regress/test_physical_replication.py +++ b/test_runner/regress/test_physical_replication.py @@ -6,7 +6,6 @@ def test_physical_replication(neon_simple_env: NeonEnv): env = neon_simple_env - n_records = 100000 with env.endpoints.create_start( branch_name="main", endpoint_id="primary", @@ -22,8 +21,20 @@ def test_physical_replication(neon_simple_env: NeonEnv): with p_con.cursor() as p_cur: with secondary.connect() as s_con: with s_con.cursor() as s_cur: - for pk in range(n_records): + runtime_secs = 30 + started_at = time.time() + pk = 0 + while True: + pk += 1 + now = time.time() + if now - started_at > runtime_secs: + break p_cur.execute("insert into t (pk) values (%s)", (pk,)) + # an earlier version of this test was based on a fixed number of loop iterations + # and selected for pk=(random.randrange(1, fixed number of loop iterations)). + # => the probability of selection for a value that was never inserted changed from 99.9999% to 0% over the course of the test. + # + # We changed the test to where=(random.randrange(1, 2*pk)), which means the probability is now fixed to 50%. s_cur.execute( - "select * from t where pk=%s", (random.randrange(1, n_records),) + "select * from t where pk=%s", (random.randrange(1, 2 * pk),) ) diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 6383d24c57ae..9992647e56c7 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -11,8 +11,6 @@ MANY_SMALL_LAYERS_TENANT_CONFIG, assert_prefix_empty, enable_remote_storage_versioning, - poll_for_remote_storage_iterations, - tenant_delete_wait_completed, wait_for_upload, ) from fixtures.remote_storage import RemoteStorageKind, s3_storage @@ -83,8 +81,7 @@ def test_tenant_s3_restore( assert ( ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 ), "tenant removed before we deletion was issued" - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - tenant_delete_wait_completed(ps_http, tenant_id, iterations) + ps_http.tenant_delete(tenant_id) ps_http.deletion_queue_flush(execute=True) assert ( ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 0 diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index f41468210c2b..9cc13ecfdbca 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -24,7 +24,6 @@ enable_remote_storage_versioning, list_prefix, remote_storage_delete_key, - tenant_delete_wait_completed, timeline_delete_wait_completed, ) from fixtures.pg_version import PgVersion @@ -40,7 +39,7 @@ def get_node_shard_counts(env: NeonEnv, tenant_ids): - counts: defaultdict[str, int] = defaultdict(int) + counts: defaultdict[int, int] = defaultdict(int) for tid in tenant_ids: for shard in env.storage_controller.locate(tid): counts[shard["node_id"]] += 1 @@ -158,7 +157,7 @@ def node_evacuated(node_id: int) -> None: # Delete all the tenants for tid in tenant_ids: - tenant_delete_wait_completed(env.storage_controller.pageserver_api(), tid, 10) + env.storage_controller.pageserver_api().tenant_delete(tid) env.storage_controller.consistency_check() @@ -934,19 +933,27 @@ def apply(self, env: NeonEnv): def clear(self, env: NeonEnv): raise NotImplementedError() + def nodes(self): + raise NotImplementedError() + class NodeStop(Failure): - def __init__(self, pageserver_id, immediate): - self.pageserver_id = pageserver_id + def __init__(self, pageserver_ids, immediate): + self.pageserver_ids = pageserver_ids self.immediate = immediate def apply(self, env: NeonEnv): - pageserver = env.get_pageserver(self.pageserver_id) - pageserver.stop(immediate=self.immediate) + for ps_id in self.pageserver_ids: + pageserver = env.get_pageserver(ps_id) + pageserver.stop(immediate=self.immediate) def clear(self, env: NeonEnv): - pageserver = env.get_pageserver(self.pageserver_id) - pageserver.start() + for ps_id in self.pageserver_ids: + pageserver = env.get_pageserver(ps_id) + pageserver.start() + + def nodes(self): + return self.pageserver_ids class PageserverFailpoint(Failure): @@ -962,6 +969,9 @@ def clear(self, env: NeonEnv): pageserver = env.get_pageserver(self.pageserver_id) pageserver.http_client().configure_failpoints((self.failpoint, "off")) + def nodes(self): + return [self.pageserver_id] + def build_node_to_tenants_map(env: NeonEnv) -> dict[int, list[TenantId]]: tenants = env.storage_controller.tenant_list() @@ -985,8 +995,9 @@ def build_node_to_tenants_map(env: NeonEnv) -> dict[int, list[TenantId]]: @pytest.mark.parametrize( "failure", [ - NodeStop(pageserver_id=1, immediate=False), - NodeStop(pageserver_id=1, immediate=True), + NodeStop(pageserver_ids=[1], immediate=False), + NodeStop(pageserver_ids=[1], immediate=True), + NodeStop(pageserver_ids=[1, 2], immediate=True), PageserverFailpoint(pageserver_id=1, failpoint="get-utilization-http-handler"), ], ) @@ -1039,33 +1050,50 @@ def tenants_placed(): wait_until(10, 1, tenants_placed) # ... then we apply the failure - offline_node_id = failure.pageserver_id - online_node_id = (set(range(1, len(env.pageservers) + 1)) - {offline_node_id}).pop() - env.get_pageserver(offline_node_id).allowed_errors.append( - # In the case of the failpoint failure, the impacted pageserver - # still believes it has the tenant attached since location - # config calls into it will fail due to being marked offline. - ".*Dropped remote consistent LSN updates.*", - ) + offline_node_ids = set(failure.nodes()) + online_node_ids = set(range(1, len(env.pageservers) + 1)) - offline_node_ids + + for node_id in offline_node_ids: + env.get_pageserver(node_id).allowed_errors.append( + # In the case of the failpoint failure, the impacted pageserver + # still believes it has the tenant attached since location + # config calls into it will fail due to being marked offline. + ".*Dropped remote consistent LSN updates.*", + ) + + if len(offline_node_ids) > 1: + env.get_pageserver(node_id).allowed_errors.append( + ".*Scheduling error when marking pageserver.*offline.*", + ) failure.apply(env) # ... expecting the heartbeats to mark it offline - def node_offline(): + def nodes_offline(): nodes = env.storage_controller.node_list() log.info(f"{nodes=}") - target = next(n for n in nodes if n["id"] == offline_node_id) - assert target["availability"] == "Offline" + for node in nodes: + if node["id"] in offline_node_ids: + assert node["availability"] == "Offline" # A node is considered offline if the last successful heartbeat # was more than 10 seconds ago (hardcoded in the storage controller). - wait_until(20, 1, node_offline) + wait_until(20, 1, nodes_offline) # .. expecting the tenant on the offline node to be migrated def tenant_migrated(): + if len(online_node_ids) == 0: + time.sleep(5) + return + node_to_tenants = build_node_to_tenants_map(env) log.info(f"{node_to_tenants=}") - assert set(node_to_tenants[online_node_id]) == set(tenant_ids) + + observed_tenants = set() + for node_id in online_node_ids: + observed_tenants |= set(node_to_tenants[node_id]) + + assert observed_tenants == set(tenant_ids) wait_until(10, 1, tenant_migrated) @@ -1073,31 +1101,24 @@ def tenant_migrated(): failure.clear(env) # ... expecting the offline node to become active again - def node_online(): + def nodes_online(): nodes = env.storage_controller.node_list() - target = next(n for n in nodes if n["id"] == offline_node_id) - assert target["availability"] == "Active" + for node in nodes: + if node["id"] in online_node_ids: + assert node["availability"] == "Active" - wait_until(10, 1, node_online) + wait_until(10, 1, nodes_online) time.sleep(5) - # ... then we create a new tenant - tid = TenantId.generate() - env.storage_controller.tenant_create(tid) - - # ... expecting it to be placed on the node that just came back online - tenants = env.storage_controller.tenant_list() - newest_tenant = next(t for t in tenants if t["tenant_shard_id"] == str(tid)) - locations = list(newest_tenant["observed"]["locations"].keys()) - locations = [int(node_id) for node_id in locations] - assert locations == [offline_node_id] + node_to_tenants = build_node_to_tenants_map(env) + log.info(f"Back online: {node_to_tenants=}") # ... expecting the storage controller to reach a consistent state def storage_controller_consistent(): env.storage_controller.consistency_check() - wait_until(10, 1, storage_controller_consistent) + wait_until(30, 1, storage_controller_consistent) def test_storage_controller_re_attach(neon_env_builder: NeonEnvBuilder): @@ -1362,7 +1383,8 @@ def test_lock_time_tracing(neon_env_builder: NeonEnvBuilder): tenant_id = env.initial_tenant env.storage_controller.allowed_errors.extend( [ - ".*Lock on.*", + ".*Exclusive lock by.*", + ".*Shared lock by.*", ".*Scheduling is disabled by policy.*", f".*Operation TimelineCreate on key {tenant_id} has waited.*", ] @@ -1394,11 +1416,25 @@ def update_tenent_policy(): ) thread_update_tenant_policy.join() - env.storage_controller.assert_log_contains("Lock on UpdatePolicy was held for") - env.storage_controller.assert_log_contains( + env.storage_controller.assert_log_contains("Exclusive lock by UpdatePolicy was held for") + _, last_log_cursor = env.storage_controller.assert_log_contains( f"Operation TimelineCreate on key {tenant_id} has waited" ) + # Test out shared lock + env.storage_controller.configure_failpoints( + ("tenant-create-timeline-shared-lock", "return(31000)") + ) + + timeline_id = TimelineId.generate() + # This will hold the shared lock for enough time to cause an warning + env.storage_controller.pageserver_api().timeline_create( + pg_version=PgVersion.NOT_SET, tenant_id=tenant_id, new_timeline_id=timeline_id + ) + env.storage_controller.assert_log_contains( + "Shared lock by TimelineCreate was held for", offset=last_log_cursor + ) + @pytest.mark.parametrize("remote_storage", [RemoteStorageKind.LOCAL_FS, s3_storage()]) @pytest.mark.parametrize("shard_count", [None, 4]) @@ -1480,3 +1516,156 @@ def test_tenant_import(neon_env_builder: NeonEnvBuilder, shard_count, remote_sto workload = Workload(env, tenant_id, timeline, branch_name=branch) workload.expect_rows = expect_rows workload.validate() + + +def retryable_node_operation(op, ps_id, max_attempts, backoff): + while max_attempts > 0: + try: + op(ps_id) + return + except StorageControllerApiException as e: + max_attempts -= 1 + log.info(f"Operation failed ({max_attempts} attempts left): {e}") + + if max_attempts == 0: + raise e + + time.sleep(backoff) + + +def poll_node_status(env, node_id, desired_scheduling_policy, max_attempts, backoff): + log.info(f"Polling {node_id} for {desired_scheduling_policy} scheduling policy") + while max_attempts > 0: + try: + status = env.storage_controller.node_status(node_id) + policy = status["scheduling"] + if policy == desired_scheduling_policy: + return + else: + max_attempts -= 1 + log.info(f"Status call returned {policy=} ({max_attempts} attempts left)") + + if max_attempts == 0: + raise AssertionError( + f"Status for {node_id=} did not reach {desired_scheduling_policy=}" + ) + + time.sleep(backoff) + except StorageControllerApiException as e: + max_attempts -= 1 + log.info(f"Status call failed ({max_attempts} retries left): {e}") + + if max_attempts == 0: + raise e + + time.sleep(backoff) + + +def test_graceful_cluster_restart(neon_env_builder: NeonEnvBuilder): + """ + Graceful reststart of storage controller clusters use the drain and + fill hooks in order to migrate attachments away from pageservers before + restarting. In practice, Ansible will drive this process. + """ + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.start() + + tenant_count = 5 + shard_count_per_tenant = 8 + total_shards = tenant_count * shard_count_per_tenant + tenant_ids = [] + + for _ in range(0, tenant_count): + tid = TenantId.generate() + tenant_ids.append(tid) + env.neon_cli.create_tenant( + tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant + ) + + # Give things a chance to settle. + env.storage_controller.reconcile_until_idle(timeout_secs=30) + + nodes = env.storage_controller.node_list() + assert len(nodes) == 2 + + def assert_shard_counts_balanced(env: NeonEnv, shard_counts, total_shards): + # Assert that all nodes have some attached shards + assert len(shard_counts) == len(env.pageservers) + + min_shard_count = min(shard_counts.values()) + max_shard_count = max(shard_counts.values()) + + flake_factor = 5 / 100 + assert max_shard_count - min_shard_count <= int(total_shards * flake_factor) + + # Perform a graceful rolling restart + for ps in env.pageservers: + retryable_node_operation( + lambda ps_id: env.storage_controller.node_drain(ps_id), ps.id, max_attempts=3, backoff=2 + ) + poll_node_status(env, ps.id, "PauseForRestart", max_attempts=6, backoff=5) + + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after draining node {ps.id}: {shard_counts}") + # Assert that we've drained the node + assert shard_counts[ps.id] == 0 + # Assert that those shards actually went somewhere + assert sum(shard_counts.values()) == total_shards + + ps.restart() + poll_node_status(env, ps.id, "Active", max_attempts=10, backoff=1) + + retryable_node_operation( + lambda ps_id: env.storage_controller.node_fill(ps_id), ps.id, max_attempts=3, backoff=2 + ) + poll_node_status(env, ps.id, "Active", max_attempts=6, backoff=5) + + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after filling node {ps.id}: {shard_counts}") + assert_shard_counts_balanced(env, shard_counts, total_shards) + + # Now check that shards are reasonably balanced + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after rolling restart: {shard_counts}") + assert_shard_counts_balanced(env, shard_counts, total_shards) + + +def test_background_operation_cancellation(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.start() + + tenant_count = 5 + shard_count_per_tenant = 8 + tenant_ids = [] + + for _ in range(0, tenant_count): + tid = TenantId.generate() + tenant_ids.append(tid) + env.neon_cli.create_tenant( + tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant + ) + + # See sleep comment in the test above. + time.sleep(2) + + nodes = env.storage_controller.node_list() + assert len(nodes) == 2 + + env.storage_controller.configure_failpoints(("sleepy-drain-loop", "return(2000)")) + + ps_id_to_drain = env.pageservers[0].id + + retryable_node_operation( + lambda ps_id: env.storage_controller.node_drain(ps_id), + ps_id_to_drain, + max_attempts=3, + backoff=2, + ) + + poll_node_status(env, ps_id_to_drain, "Draining", max_attempts=6, backoff=2) + + env.storage_controller.cancel_node_drain(ps_id_to_drain) + + poll_node_status(env, ps_id_to_drain, "Active", max_attempts=6, backoff=2) diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index fd3cc45c3f48..a3316f2f4592 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -1,17 +1,11 @@ -import concurrent.futures -import enum -import os -import shutil from threading import Thread import pytest from fixtures.common_types import Lsn, TenantId, TimelineId -from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, StorageScrubber, - last_flush_lsn_upload, wait_for_last_flush_lsn, ) from fixtures.pageserver.http import PageserverApiException @@ -19,18 +13,33 @@ MANY_SMALL_LAYERS_TENANT_CONFIG, assert_prefix_empty, assert_prefix_not_empty, - poll_for_remote_storage_iterations, - tenant_delete_wait_completed, wait_for_upload, - wait_tenant_status_404, - wait_until_tenant_active, - wait_until_tenant_state, ) -from fixtures.remote_storage import RemoteStorageKind, available_s3_storages, s3_storage +from fixtures.remote_storage import RemoteStorageKind, s3_storage from fixtures.utils import run_pg_bench_small, wait_until from requests.exceptions import ReadTimeout +def error_tolerant_delete(ps_http, tenant_id): + """ + For tests that inject 500 errors, we must retry repeatedly when issuing deletions + """ + while True: + try: + ps_http.tenant_delete(tenant_id=tenant_id) + except PageserverApiException as e: + if e.status_code == 500: + # This test uses failure injection, which can produce 500s as the pageserver expects + # the object store to always be available, and the ListObjects during deletion is generally + # an infallible operation + assert "simulated failure of remote operation" in e.message + else: + raise + else: + # Success, drop out + break + + def test_tenant_delete_smoke( neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, @@ -59,21 +68,7 @@ def test_tenant_delete_smoke( # Check that deleting a non-existent tenant gives the expected result: this is a loop because we # may need to retry on some remote storage errors injected by the test harness - while True: - try: - ps_http.tenant_delete(tenant_id=tenant_id) - except PageserverApiException as e: - if e.status_code == 500: - # This test uses failure injection, which can produce 500s as the pageserver expects - # the object store to always be available, and the ListObjects during deletion is generally - # an infallible operation - assert "simulated failure of remote operation" in e.message - elif e.status_code == 404: - # This is our expected result: trying to erase a non-existent tenant gives us 404 - assert "NotFound" in e.message - break - else: - raise + error_tolerant_delete(ps_http, tenant_id) env.neon_cli.create_tenant( tenant_id=tenant_id, @@ -108,10 +103,8 @@ def test_tenant_delete_smoke( # Upload a heatmap so that we exercise deletion of that too ps_http.tenant_heatmap_upload(tenant_id) - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 2 - tenant_delete_wait_completed(ps_http, tenant_id, iterations) + error_tolerant_delete(ps_http, tenant_id) assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 tenant_path = env.pageserver.tenant_dir(tenant_id) @@ -129,286 +122,7 @@ def test_tenant_delete_smoke( # Deletion updates the tenant count: the one default tenant remains assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 - - -class Check(enum.Enum): - RETRY_WITHOUT_RESTART = enum.auto() - RETRY_WITH_RESTART = enum.auto() - - -FAILPOINTS = [ - "tenant-delete-before-shutdown", - "tenant-delete-before-create-remote-mark", - "tenant-delete-before-create-local-mark", - "tenant-delete-before-background", - "tenant-delete-before-polling-ongoing-deletions", - "tenant-delete-before-cleanup-remaining-fs-traces", - "tenant-delete-before-remove-timelines-dir", - "tenant-delete-before-remove-deleted-mark", - "tenant-delete-before-remove-tenant-dir", - # Some failpoints from timeline deletion - "timeline-delete-before-index-deleted-at", - "timeline-delete-before-rm", - "timeline-delete-before-index-delete", -] - -FAILPOINTS_BEFORE_BACKGROUND = [ - "timeline-delete-before-schedule", - "tenant-delete-before-shutdown", - "tenant-delete-before-create-remote-mark", - "tenant-delete-before-create-local-mark", - "tenant-delete-before-background", -] - - -def combinations(): - result = [] - - remotes = available_s3_storages() - - for remote_storage_kind in remotes: - for delete_failpoint in FAILPOINTS: - # Simulate failures for only one type of remote storage - # to avoid log pollution and make tests run faster - if remote_storage_kind is RemoteStorageKind.MOCK_S3: - simulate_failures = True - else: - simulate_failures = False - result.append((remote_storage_kind, delete_failpoint, simulate_failures)) - return result - - -@pytest.mark.parametrize("check", list(Check)) -@pytest.mark.parametrize("remote_storage_kind, failpoint, simulate_failures", combinations()) -def test_delete_tenant_exercise_crash_safety_failpoints( - neon_env_builder: NeonEnvBuilder, - remote_storage_kind: RemoteStorageKind, - failpoint: str, - simulate_failures: bool, - check: Check, - pg_bin: PgBin, -): - if simulate_failures: - neon_env_builder.pageserver_config_override = "test_remote_failures=1" - - neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind) - - env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) - - tenant_id = env.initial_tenant - - env.pageserver.allowed_errors.extend( - [ - # From deletion polling - f".*NotFound: tenant {env.initial_tenant}.*", - # allow errors caused by failpoints - f".*failpoint: {failpoint}", - # It appears when we stopped flush loop during deletion (attempt) and then pageserver is stopped - ".*shutdown.*tenant_id.*shutdown.*timeline_id.*: failed to freeze and flush: cannot flush frozen layers when flush_loop is not running, state is Exited", - # We may leave some upload tasks in the queue. They're likely deletes. - # For uploads we explicitly wait with `last_flush_lsn_upload` below. - # So by ignoring these instead of waiting for empty upload queue - # we execute more distinct code paths. - '.*stopping left-over name="remote upload".*', - # an on-demand is cancelled by shutdown - ".*initial size calculation failed: downloading failed, possibly for shutdown", - ] - ) - - if simulate_failures: - env.pageserver.allowed_errors.append( - # The deletion queue will complain when it encounters simulated S3 errors - ".*deletion executor: DeleteObjects request failed.*", - ) - - ps_http = env.pageserver.http_client() - - timeline_id = env.neon_cli.create_timeline("delete", tenant_id=tenant_id) - with env.endpoints.create_start("delete", tenant_id=tenant_id) as endpoint: - # generate enough layers - run_pg_bench_small(pg_bin, endpoint.connstr()) - last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) - - assert_prefix_not_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - ) - - ps_http.configure_failpoints((failpoint, "return")) - - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - - # These failpoints are earlier than background task is spawned. - # so they result in api request failure. - if failpoint in FAILPOINTS_BEFORE_BACKGROUND: - with pytest.raises(PageserverApiException, match=failpoint): - ps_http.tenant_delete(tenant_id) - - else: - ps_http.tenant_delete(tenant_id) - tenant_info = wait_until_tenant_state( - pageserver_http=ps_http, - tenant_id=tenant_id, - expected_state="Broken", - iterations=iterations, - ) - - reason = tenant_info["state"]["data"]["reason"] - log.info(f"tenant broken: {reason}") - - # failpoint may not be the only error in the stack - assert reason.endswith(f"failpoint: {failpoint}"), reason - - if check is Check.RETRY_WITH_RESTART: - env.pageserver.restart() - - if failpoint in ( - "tenant-delete-before-shutdown", - "tenant-delete-before-create-remote-mark", - ): - wait_until_tenant_active( - ps_http, tenant_id=tenant_id, iterations=iterations, period=0.25 - ) - tenant_delete_wait_completed(ps_http, tenant_id, iterations=iterations) - else: - # Pageserver should've resumed deletion after restart. - wait_tenant_status_404(ps_http, tenant_id, iterations=iterations + 10) - elif check is Check.RETRY_WITHOUT_RESTART: - # this should succeed - # this also checks that delete can be retried even when tenant is in Broken state - ps_http.configure_failpoints((failpoint, "off")) - - tenant_delete_wait_completed(ps_http, tenant_id, iterations=iterations) - - tenant_dir = env.pageserver.tenant_dir(tenant_id) - # Check local is empty - assert not tenant_dir.exists() - - # Check remote is empty - assert_prefix_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - allowed_postfix="initdb.tar.zst", - ) - - -def test_tenant_delete_is_resumed_on_attach( - neon_env_builder: NeonEnvBuilder, - pg_bin: PgBin, -): - remote_storage_kind = s3_storage() - neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind) - - env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) - env.pageserver.allowed_errors.append( - # lucky race with stopping from flushing a layer we fail to schedule any uploads - ".*layer flush task.+: could not flush frozen layer: update_metadata_file" - ) - - tenant_id = env.initial_tenant - - ps_http = env.pageserver.http_client() - # create two timelines - for timeline in ["first", "second"]: - timeline_id = env.neon_cli.create_timeline(timeline, tenant_id=tenant_id) - with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint: - run_pg_bench_small(pg_bin, endpoint.connstr()) - wait_for_last_flush_lsn(env, endpoint, tenant=tenant_id, timeline=timeline_id) - - # sanity check, data should be there - assert_prefix_not_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - ) - - # failpoint before we remove index_part from s3 - failpoint = "timeline-delete-before-index-delete" - ps_http.configure_failpoints((failpoint, "return")) - - env.pageserver.allowed_errors.extend( - ( - # allow errors caused by failpoints - f".*failpoint: {failpoint}", - # From deletion polling - f".*NotFound: tenant {env.initial_tenant}.*", - # It appears when we stopped flush loop during deletion (attempt) and then pageserver is stopped - ".*shutdown.*tenant_id.*shutdown.*timeline_id.*: failed to freeze and flush: cannot flush frozen layers when flush_loop is not running, state is Exited", - # error from http response is also logged - ".*InternalServerError\\(Tenant is marked as deleted on remote storage.*", - '.*shutdown_pageserver{exit_code=0}: stopping left-over name="remote upload".*', - ) - ) - - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - - ps_http.tenant_delete(tenant_id) - - tenant_info = wait_until_tenant_state( - pageserver_http=ps_http, - tenant_id=tenant_id, - expected_state="Broken", - iterations=iterations, - ) - - assert_prefix_not_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - ) - - reason = tenant_info["state"]["data"]["reason"] - # failpoint may not be the only error in the stack - assert reason.endswith(f"failpoint: {failpoint}"), reason - - # now we stop pageserver and remove local tenant state - env.endpoints.stop_all() - env.pageserver.stop() - - dir_to_clear = env.pageserver.tenant_dir() - shutil.rmtree(dir_to_clear) - os.mkdir(dir_to_clear) - - env.pageserver.start() - - # now we call attach - env.pageserver.tenant_attach(tenant_id=tenant_id) - - # delete should be resumed - wait_tenant_status_404(ps_http, tenant_id, iterations) - - # we shouldn've created tenant dir on disk - tenant_path = env.pageserver.tenant_dir(tenant_id) - assert not tenant_path.exists() - - ps_http.deletion_queue_flush(execute=True) - assert_prefix_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - ) + assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "inprogress"}) == 0 def test_long_timeline_create_cancelled_by_tenant_delete(neon_env_builder: NeonEnvBuilder): @@ -483,105 +197,6 @@ def tenant_is_deleted(): deletion.join() -def test_tenant_delete_concurrent( - neon_env_builder: NeonEnvBuilder, - pg_bin: PgBin, -): - """ - Validate that concurrent delete requests to the same tenant behave correctly: - exactly one should execute: the rest should give 202 responses but not start - another deletion. - - This is a reproducer for https://github.com/neondatabase/neon/issues/5936 - """ - neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.MOCK_S3) - env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) - ps_http = env.pageserver.http_client() - tenant_id = env.initial_tenant - timeline_id = env.initial_timeline - - # Populate some data - with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint: - run_pg_bench_small(pg_bin, endpoint.connstr()) - last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) - - env.pageserver.allowed_errors.extend( - [ - # lucky race with stopping from flushing a layer we fail to schedule any uploads - ".*layer flush task.+: could not flush frozen layer: update_metadata_file", - ] - ) - - BEFORE_REMOVE_FAILPOINT = "tenant-delete-before-map-remove" - BEFORE_RUN_FAILPOINT = "tenant-delete-before-run" - - # We will let the initial delete run until right before it would remove - # the tenant's TenantSlot. This pauses it in a state where the tenant - # is visible in Stopping state, and concurrent requests should fail with 4xx. - ps_http.configure_failpoints((BEFORE_REMOVE_FAILPOINT, "pause")) - - def delete_tenant(): - return ps_http.tenant_delete(tenant_id) - - def hit_remove_failpoint(): - return env.pageserver.assert_log_contains(f"at failpoint {BEFORE_REMOVE_FAILPOINT}")[1] - - def hit_run_failpoint(): - env.pageserver.assert_log_contains(f"at failpoint {BEFORE_RUN_FAILPOINT}") - - with concurrent.futures.ThreadPoolExecutor() as executor: - background_200_req = executor.submit(delete_tenant) - assert background_200_req.result(timeout=10).status_code == 202 - - # Wait until the first request completes its work and is blocked on removing - # the TenantSlot from tenant manager. - log_cursor = wait_until(100, 0.1, hit_remove_failpoint) - assert log_cursor is not None - - # Start another request: this should succeed without actually entering the deletion code - ps_http.tenant_delete(tenant_id) - assert not env.pageserver.log_contains( - f"at failpoint {BEFORE_RUN_FAILPOINT}", offset=log_cursor - ) - - # Start another background request, which will pause after acquiring a TenantSlotGuard - # but before completing. - ps_http.configure_failpoints((BEFORE_RUN_FAILPOINT, "pause")) - background_4xx_req = executor.submit(delete_tenant) - wait_until(100, 0.1, hit_run_failpoint) - - # The TenantSlot is still present while the original request is hung before - # final removal - assert ( - ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 - ) - - # Permit the original request to run to success - ps_http.configure_failpoints((BEFORE_REMOVE_FAILPOINT, "off")) - - # Permit the duplicate background request to run to completion and fail. - ps_http.configure_failpoints((BEFORE_RUN_FAILPOINT, "off")) - background_4xx_req.result(timeout=10) - assert not env.pageserver.log_contains( - f"at failpoint {BEFORE_RUN_FAILPOINT}", offset=log_cursor - ) - - # Physical deletion should have happened - assert_prefix_empty( - neon_env_builder.pageserver_remote_storage, - prefix="/".join( - ( - "tenants", - str(tenant_id), - ) - ), - ) - - # Zero tenants remain (we deleted the default tenant) - assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 0 - assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "inprogress"}) == 0 - - def test_tenant_delete_races_timeline_creation( neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, @@ -674,9 +289,7 @@ def deletion_arrived(): # Disable the failpoint and wait for deletion to finish ps_http.configure_failpoints((BEFORE_INITDB_UPLOAD_FAILPOINT, "off")) - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - - tenant_delete_wait_completed(ps_http, tenant_id, iterations, ignore_errors=True) + ps_http.tenant_delete(tenant_id) # Physical deletion should have happened assert_prefix_empty( @@ -727,8 +340,7 @@ def test_tenant_delete_scrubber(pg_bin: PgBin, neon_env_builder: NeonEnvBuilder) env.start() ps_http = env.pageserver.http_client() - iterations = poll_for_remote_storage_iterations(remote_storage_kind) - tenant_delete_wait_completed(ps_http, tenant_id, iterations) + ps_http.tenant_delete(tenant_id) env.stop() scrubber.scan_metadata() diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 871351b2d54b..4c49e6fb856c 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -344,56 +344,6 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): pageserver_http.timeline_gc(tenant_id, timeline_id, 0) -# Creates and ignores a tenant, then detaches it: first, with no parameters (should fail), -# then with parameters to force ignored tenant detach (should not fail). -def test_tenant_detach_ignored_tenant(neon_simple_env: NeonEnv): - env = neon_simple_env - client = env.pageserver.http_client() - - # create a new tenant - tenant_id, _ = env.neon_cli.create_tenant() - - env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS) - - # assert tenant exists on disk - assert env.pageserver.tenant_dir(tenant_id).exists() - - endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) - # we rely upon autocommit after each statement - endpoint.safe_psql_many( - queries=[ - "CREATE TABLE t(key int primary key, value text)", - "INSERT INTO t SELECT generate_series(1,100000), 'payload'", - ] - ) - - # ignore tenant - client.tenant_ignore(tenant_id) - env.pageserver.allowed_errors.append(".*NotFound: tenant .*") - # ensure tenant couldn't be detached without the special flag for ignored tenant - log.info("detaching ignored tenant WITHOUT required flag") - with pytest.raises( - expected_exception=PageserverApiException, match=f"NotFound: tenant {tenant_id}" - ): - client.tenant_detach(tenant_id) - - log.info("tenant detached failed as expected") - - # ensure tenant is detached with ignore state - log.info("detaching ignored tenant with required flag") - client.tenant_detach(tenant_id, True) - log.info("ignored tenant detached without error") - - # check that nothing is left on disk for deleted tenant - assert not env.pageserver.tenant_dir(tenant_id).exists() - - # assert the tenant does not exists in the Pageserver - tenants_after_detach = [tenant["id"] for tenant in client.tenant_list()] - assert ( - tenant_id not in tenants_after_detach - ), f"Ignored and then detached tenant {tenant_id} should not be present in pageserver's memory" - - # Creates a tenant, and detaches it with extra paremeter that forces ignored tenant detach. # Tenant should be detached without issues. def test_tenant_detach_regular_tenant(neon_simple_env: NeonEnv): @@ -500,153 +450,6 @@ def test_detach_while_attaching( cur.execute("SELECT COUNT(*) FROM foo") -# Tests that `ignore` and `get` operations' combination is able to remove and restore the tenant in pageserver's memory. -# * writes some data into tenant's timeline -# * ensures it's synced with the remote storage -# * `ignore` the tenant -# * verify that ignored tenant files are generally unchanged, only an ignored mark had appeared -# * verify the ignored tenant is gone from pageserver's memory -# * restart the pageserver and verify that ignored tenant is still not loaded -# * `load` the same tenant -# * ensure that it's status is `Active` and it's present in pageserver's memory with all timelines -def test_ignored_tenant_reattach(neon_env_builder: NeonEnvBuilder): - neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.MOCK_S3) - env = neon_env_builder.init_start() - pageserver_http = env.pageserver.http_client() - - ignored_tenant_id, _ = env.neon_cli.create_tenant() - tenant_dir = env.pageserver.tenant_dir(ignored_tenant_id) - tenants_before_ignore = [tenant["id"] for tenant in pageserver_http.tenant_list()] - tenants_before_ignore.sort() - timelines_before_ignore = [ - timeline["timeline_id"] - for timeline in pageserver_http.timeline_list(tenant_id=ignored_tenant_id) - ] - files_before_ignore = [tenant_path for tenant_path in tenant_dir.glob("**/*")] - - # ignore the tenant and veirfy it's not present in pageserver replies, with its files still on disk - pageserver_http.tenant_ignore(ignored_tenant_id) - - files_after_ignore_with_retain = [tenant_path for tenant_path in tenant_dir.glob("**/*")] - new_files = set(files_after_ignore_with_retain) - set(files_before_ignore) - disappeared_files = set(files_before_ignore) - set(files_after_ignore_with_retain) - assert ( - len(disappeared_files) == 0 - ), f"Tenant ignore should not remove files from disk, missing: {disappeared_files}" - assert ( - len(new_files) == 1 - ), f"Only tenant ignore file should appear on disk but got: {new_files}" - - tenants_after_ignore = [tenant["id"] for tenant in pageserver_http.tenant_list()] - assert ignored_tenant_id not in tenants_after_ignore, "Ignored tenant should be missing" - assert len(tenants_after_ignore) + 1 == len( - tenants_before_ignore - ), "Only ignored tenant should be missing" - - # restart the pageserver to ensure we don't load the ignore timeline - env.pageserver.stop() - env.pageserver.start() - tenants_after_restart = [tenant["id"] for tenant in pageserver_http.tenant_list()] - tenants_after_restart.sort() - assert ( - tenants_after_restart == tenants_after_ignore - ), "Ignored tenant should not be reloaded after pageserver restart" - - # now, load it from the local files and expect it works - env.pageserver.tenant_load(tenant_id=ignored_tenant_id) - wait_until_tenant_state(pageserver_http, ignored_tenant_id, "Active", 5) - - tenants_after_attach = [tenant["id"] for tenant in pageserver_http.tenant_list()] - tenants_after_attach.sort() - assert tenants_after_attach == tenants_before_ignore, "Should have all tenants back" - - timelines_after_ignore = [ - timeline["timeline_id"] - for timeline in pageserver_http.timeline_list(tenant_id=ignored_tenant_id) - ] - assert timelines_before_ignore == timelines_after_ignore, "Should have all timelines back" - - -# Tests that it's possible to `load` tenants with missing layers and get them restored: -# * writes some data into tenant's timeline -# * ensures it's synced with the remote storage -# * `ignore` the tenant -# * removes all timeline's local layers -# * `load` the same tenant -# * ensure that it's status is `Active` -# * check that timeline data is restored -def test_ignored_tenant_download_missing_layers(neon_env_builder: NeonEnvBuilder): - neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - env = neon_env_builder.init_start() - pageserver_http = env.pageserver.http_client() - endpoint = env.endpoints.create_start("main") - - tenant_id = env.initial_tenant - timeline_id = env.initial_timeline - - env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS) - - data_id = 1 - data_secret = "very secret secret" - insert_test_data(pageserver_http, tenant_id, timeline_id, data_id, data_secret, endpoint) - - tenants_before_ignore = [tenant["id"] for tenant in pageserver_http.tenant_list()] - tenants_before_ignore.sort() - timelines_before_ignore = [ - timeline["timeline_id"] for timeline in pageserver_http.timeline_list(tenant_id=tenant_id) - ] - - # ignore the tenant and remove its layers - pageserver_http.tenant_ignore(tenant_id) - timeline_dir = env.pageserver.timeline_dir(tenant_id, timeline_id) - layers_removed = False - for dir_entry in timeline_dir.iterdir(): - if dir_entry.name.startswith("00000"): - # Looks like a layer file. Remove it - dir_entry.unlink() - layers_removed = True - assert layers_removed, f"Found no layers for tenant {timeline_dir}" - - # now, load it from the local files and expect it to work due to remote storage restoration - env.pageserver.tenant_load(tenant_id=tenant_id) - wait_until_tenant_state(pageserver_http, tenant_id, "Active", 5) - - tenants_after_attach = [tenant["id"] for tenant in pageserver_http.tenant_list()] - tenants_after_attach.sort() - assert tenants_after_attach == tenants_before_ignore, "Should have all tenants back" - - timelines_after_ignore = [ - timeline["timeline_id"] for timeline in pageserver_http.timeline_list(tenant_id=tenant_id) - ] - assert timelines_before_ignore == timelines_after_ignore, "Should have all timelines back" - - endpoint.stop() - endpoint.start() - ensure_test_data(data_id, data_secret, endpoint) - - -# Tests that attach is never working on a tenant, ignored or not, as long as it's not absent locally -# Similarly, tests that it's not possible to schedule a `load` for tenat that's not ignored. -def test_load_negatives(neon_env_builder: NeonEnvBuilder): - neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - env = neon_env_builder.init_start() - pageserver_http = env.pageserver.http_client() - env.endpoints.create_start("main") - - tenant_id = env.initial_tenant - - env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS) - - env.pageserver.allowed_errors.append(".*tenant .*? already exists, state:.*") - with pytest.raises( - expected_exception=PageserverApiException, - match=f"tenant {tenant_id} already exists, state: Active", - ): - env.pageserver.tenant_load(tenant_id) - - pageserver_http.tenant_ignore(tenant_id) - - def test_detach_while_activating( neon_env_builder: NeonEnvBuilder, ): @@ -770,7 +573,7 @@ def found_broken(): wait_until(10, 0.5, found_broken) - client.tenant_ignore(env.initial_tenant) + client.tenant_detach(env.initial_tenant) def found_cleaned_up(): m = client.get_metrics() @@ -782,7 +585,7 @@ def found_cleaned_up(): wait_until(10, 0.5, found_cleaned_up) - env.pageserver.tenant_load(env.initial_tenant) + env.pageserver.tenant_attach(env.initial_tenant) def found_active(): m = client.get_metrics() diff --git a/test_runner/regress/test_tenant_relocation.py b/test_runner/regress/test_tenant_relocation.py index be289e03d60c..9fe732e28806 100644 --- a/test_runner/regress/test_tenant_relocation.py +++ b/test_runner/regress/test_tenant_relocation.py @@ -15,7 +15,6 @@ assert_tenant_state, wait_for_last_record_lsn, wait_for_upload, - wait_tenant_status_404, ) from fixtures.remote_storage import ( LocalFsStorage, @@ -348,9 +347,6 @@ def test_tenant_relocation( # is no longer involved, and if it is, we will see the error origin_http.tenant_detach(tenant_id) - # Wait a little, so that the detach operation has time to finish. - wait_tenant_status_404(origin_http, tenant_id, iterations=100, interval=1) - post_migration_check(ep_main, 500500, old_local_path_main) post_migration_check(ep_second, 1001000, old_local_path_second) diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index a3dd4229034e..6c85ddebbcfb 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -15,7 +15,6 @@ ) from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient from fixtures.pageserver.utils import ( - tenant_delete_wait_completed, timeline_delete_wait_completed, wait_until_tenant_active, ) @@ -669,7 +668,7 @@ def test_synthetic_size_while_deleting(neon_env_builder: NeonEnvBuilder): ), ) - tenant_delete_wait_completed(client, env.initial_tenant, 10) + client.tenant_delete(env.initial_tenant) client.configure_failpoints((failpoint, "off")) diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index f0b2f7d733d7..606ce203cdc6 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -14,7 +14,7 @@ wait_for_last_flush_lsn, ) from fixtures.pageserver.http import HistoricLayerInfo, PageserverApiException -from fixtures.pageserver.utils import wait_tenant_status_404, wait_timeline_detail_404 +from fixtures.pageserver.utils import wait_timeline_detail_404 from fixtures.remote_storage import LocalFsStorage from fixtures.utils import assert_pageserver_backups_equal @@ -578,7 +578,6 @@ def test_timeline_ancestor_errors(neon_env_builder: NeonEnvBuilder): assert info.value.status_code == 400 client.tenant_delete(env.initial_tenant) - wait_tenant_status_404(client, env.initial_tenant, 10, 1) with pytest.raises(PageserverApiException) as e: client.detach_ancestor(env.initial_tenant, first_branch) diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index db5297870e50..3110833563cd 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -26,7 +26,6 @@ assert_tenant_state, timeline_delete_wait_completed, wait_for_upload_queue_empty, - wait_tenant_status_404, wait_until_tenant_active, ) from fixtures.pg_version import PgVersion @@ -864,39 +863,33 @@ def delete_lazy_activating( ): pageserver_http = pageserver.http_client() - # Deletion itself won't complete due to our failpoint: Tenant::shutdown can't complete while calculating - # logical size is paused in a failpoint. So instead we will use a log observation to check that - # on-demand activation was triggered by the tenant deletion - log_match = f".*attach{{tenant_id={delete_tenant_id} shard_id=0000 gen=[0-9a-f]+}}: Activating tenant \\(on-demand\\).*" - if expect_attaching: assert pageserver_http.tenant_status(delete_tenant_id)["state"]["slug"] == "Attaching" with concurrent.futures.ThreadPoolExecutor() as executor: log.info("Starting background delete") - def activated_on_demand(): - assert pageserver.log_contains(log_match) is not None + def shutting_down(): + assert pageserver.log_contains(".*Waiting for timelines.*") is not None def delete_tenant(): pageserver_http.tenant_delete(delete_tenant_id) background_delete = executor.submit(delete_tenant) - log.info(f"Waiting for activation message '{log_match}'") + # We expect deletion to enter shutdown of the tenant even though it's in the attaching state try: - wait_until(10, 1, activated_on_demand) + # Deletion will get to the point in shutdown where it's waiting for timeline shutdown, then + # hang because of our failpoint blocking activation. + wait_until(10, 1, shutting_down) finally: log.info("Clearing failpoint") pageserver_http.configure_failpoints(("timeline-calculate-logical-size-pause", "off")) - # Deletion should complete successfully now that failpoint is unblocked + # Deletion should complete successfully now that failpoint is unblocked and shutdown can complete log.info("Joining background delete") background_delete.result(timeout=10) - # Poll for deletion to complete - wait_tenant_status_404(pageserver_http, tenant_id=delete_tenant_id, iterations=40) - def test_timeline_logical_size_task_priority(neon_env_builder: NeonEnvBuilder): """ diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index dce30f5388fe..7bf208db54c9 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -317,9 +317,9 @@ def test_broker(neon_env_builder: NeonEnvBuilder): time.sleep(1) # Ensure that safekeepers don't lose remote_consistent_lsn on restart. - # Control file is persisted each 5s. TODO: do that on shutdown and remove sleep. - time.sleep(6) for sk in env.safekeepers: + # force persist cfile + sk.http_client().checkpoint(tenant_id, timeline_id) sk.stop() sk.start() stat_after_restart = [cli.timeline_status(tenant_id, timeline_id) for cli in clients] @@ -374,7 +374,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): http_cli_other = env.safekeepers[0].http_client( auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) - http_cli_noauth = env.safekeepers[0].http_client() + http_cli_noauth = env.safekeepers[0].http_client(gen_sk_wide_token=False) # Pretend WAL is offloaded to s3. if auth_enabled: @@ -830,7 +830,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) wa_http_cli_bad.check_status() - wa_http_cli_noauth = wa.http_client() + wa_http_cli_noauth = wa.http_client(gen_sk_wide_token=False) wa_http_cli_noauth.check_status() # debug endpoint requires safekeeper scope @@ -964,7 +964,7 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder): # By default, neon_local enables auth on all services if auth is configured, # so http must require the token. - sk_http_cli_noauth = sk.http_client() + sk_http_cli_noauth = sk.http_client(gen_sk_wide_token=False) sk_http_cli_auth = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) with pytest.raises(sk_http_cli_noauth.HTTPError, match="Forbidden|Unauthorized"): sk_http_cli_noauth.timeline_status(tenant_id, timeline_id) @@ -1640,7 +1640,7 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): sk_http_other = sk.http_client( auth_token=env.auth_keys.generate_tenant_token(tenant_id_other) ) - sk_http_noauth = sk.http_client() + sk_http_noauth = sk.http_client(gen_sk_wide_token=False) assert (sk_data_dir / str(tenant_id) / str(timeline_id_1)).is_dir() assert (sk_data_dir / str(tenant_id) / str(timeline_id_2)).is_dir() assert (sk_data_dir / str(tenant_id) / str(timeline_id_3)).is_dir() @@ -1723,7 +1723,10 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): cur.execute("INSERT INTO t (key) VALUES (123)") +# Basic pull_timeline test. def test_pull_timeline(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True + def execute_payload(endpoint: Endpoint): with closing(endpoint.connect()) as conn: with conn.cursor() as cur: @@ -1739,7 +1742,7 @@ def execute_payload(endpoint: Endpoint): def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): for sk in safekeepers: - http_cli = sk.http_client() + http_cli = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) try: status = http_cli.timeline_status(tenant_id, timeline_id) log.info(f"Safekeeper {sk.id} status: {status}") @@ -1749,11 +1752,11 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i neon_env_builder.num_safekeepers = 4 env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_pull_timeline") + timeline_id = env.initial_timeline log.info("Use only first 3 safekeepers") env.safekeepers[3].stop() - endpoint = env.endpoints.create("test_pull_timeline") + endpoint = env.endpoints.create("main") endpoint.active_safekeepers = [1, 2, 3] endpoint.start() @@ -1769,7 +1772,7 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i res = ( env.safekeepers[3] - .http_client() + .http_client(auth_token=env.auth_keys.generate_safekeeper_token()) .pull_timeline( { "tenant_id": str(tenant_id), @@ -1787,7 +1790,7 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i show_statuses(env.safekeepers, tenant_id, timeline_id) log.info("Restarting compute with new config to verify that it works") - endpoint.stop_and_destroy().create("test_pull_timeline") + endpoint.stop_and_destroy().create("main") endpoint.active_safekeepers = [1, 3, 4] endpoint.start() @@ -1816,8 +1819,8 @@ def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_i # 4) Do some write, verify integrity with timeline_digest. # Expected to fail while holding off WAL gc plus fetching commit_lsn WAL # segment is not implemented. -@pytest.mark.xfail def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() @@ -1836,27 +1839,36 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): src_flush_lsn = src_sk.get_flush_lsn(tenant_id, timeline_id) log.info(f"flush_lsn on src before pull_timeline: {src_flush_lsn}") - dst_http = dst_sk.http_client() + src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) pt_handle.start() - dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + src_sk.wait_until_paused("sk-snapshot-after-list-pausable") # ensure segment exists endpoint.safe_psql("insert into t select generate_series(1, 180000), 'papaya'") - lsn = last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) + lsn = last_flush_lsn_upload( + env, + endpoint, + tenant_id, + timeline_id, + auth_token=env.auth_keys.generate_tenant_token(tenant_id), + ) assert lsn > Lsn("0/2000000") # Checkpoint timeline beyond lsn. - src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn) + src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn, wait_wal_removal=False) first_segment_p = src_sk.timeline_dir(tenant_id, timeline_id) / "000000010000000000000001" log.info(f"first segment exist={os.path.exists(first_segment_p)}") - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "off")) pt_handle.join() + # after pull_timeline is finished WAL should be removed on donor + src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn, wait_wal_removal=True) + timeline_start_lsn = src_sk.get_timeline_start_lsn(tenant_id, timeline_id) dst_flush_lsn = dst_sk.get_flush_lsn(tenant_id, timeline_id) log.info(f"flush_lsn on dst after pull_timeline: {dst_flush_lsn}") @@ -1883,8 +1895,8 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): # enough, so it won't be affected by term change anymore. # # Expected to fail while term check is not implemented. -@pytest.mark.xfail def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() @@ -1900,14 +1912,14 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): ep.safe_psql("create table t(key int, value text)") ep.safe_psql("insert into t select generate_series(1, 1000), 'pear'") - dst_http = dst_sk.http_client() + src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) pt_handle.start() - dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + src_sk.wait_until_paused("sk-snapshot-after-list-pausable") src_http = src_sk.http_client() term_before = src_http.timeline_status(tenant_id, timeline_id).term @@ -1922,7 +1934,7 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): term_after = src_http.timeline_status(tenant_id, timeline_id).term assert term_after > term_before, f"term_after={term_after}, term_before={term_before}" - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "off")) with pytest.raises(requests.exceptions.HTTPError): pt_handle.join() diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 99164645a730..3c446ecdeafa 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -324,14 +324,15 @@ files: help: 'Whether or not the replication slot wal_status is lost' key_labels: - slot_name - values: [wal_status_is_lost] + values: [wal_is_lost] query: | SELECT slot_name, CASE WHEN wal_status = 'lost' THEN 1 ELSE 0 - END AS wal_status_is_lost + END AS wal_is_lost FROM pg_replication_slots; + - filename: neon_collector_autoscaling.yml content: | collector_name: neon_collector_autoscaling diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index df16c717893b..139a5647c560 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -25,6 +25,7 @@ axum = { version = "0.6", features = ["ws"] } base64 = { version = "0.21", features = ["alloc"] } base64ct = { version = "1", default-features = false, features = ["std"] } bytes = { version = "1", features = ["serde"] } +camino = { version = "1", default-features = false, features = ["serde1"] } chrono = { version = "0.4", default-features = false, features = ["clock", "serde", "wasmbind"] } clap = { version = "4", features = ["derive", "string"] } clap_builder = { version = "4", default-features = false, features = ["color", "help", "std", "string", "suggestions", "usage"] }