Skip to content

Commit

Permalink
test_import_from_pageserver_small: try to make less flaky (#7843)
Browse files Browse the repository at this point in the history
With #7828 and proper fullbackup testing the test became flaky
([evidence]).

- produce better assertion messages in `assert_pageserver_backups_equal`
- use read only endpoint to confirm the row count

[evidence]:
https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7839/9192447962/index.html#suites/89cfa994d71769e01e3fc4f475a1f3fa/49009214d0f8b8ce
  • Loading branch information
koivunej committed May 23, 2024
1 parent 95a49f0 commit 49d7f9b
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 13 deletions.
25 changes: 18 additions & 7 deletions test_runner/fixtures/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,11 +541,22 @@ def build_hash_list(p: Path) -> List[Tuple[str, bytes]]:

left_list, right_list = map(build_hash_list, [left, right])

try:
assert len(left_list) == len(right_list)
assert len(left_list) == len(
right_list
), f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}"

for left_tuple, right_tuple in zip(left_list, right_list):
assert left_tuple == right_tuple
finally:
elapsed = time.time() - started_at
log.info(f"assert_pageserver_backups_equal completed in {elapsed}s")
mismatching = set()

for left_tuple, right_tuple in zip(left_list, right_list):
left_path, left_hash = left_tuple
right_path, right_hash = right_tuple
assert (
left_path == right_path
), f"file count matched, expected these to be same paths: {left_path}, {right_path}"
if left_hash != right_hash:
mismatching.add(left_path)

assert len(mismatching) == 0, f"files with hash mismatch: {mismatching}"

elapsed = time.time() - started_at
log.info(f"assert_pageserver_backups_equal completed in {elapsed}s")
9 changes: 3 additions & 6 deletions test_runner/regress/test_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def test_import_from_pageserver_small(

num_rows = 3000
lsn = _generate_data(num_rows, endpoint)
_import(num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir, test_output_dir)
_import(num_rows, lsn, env, pg_bin, timeline, test_output_dir)


@pytest.mark.timeout(1800)
Expand Down Expand Up @@ -193,9 +193,7 @@ def test_import_from_pageserver_multisegment(
log.info(f"timeline logical size = {logical_size / (1024 ** 2)}MB")
assert logical_size > 1024**3 # = 1GB

tar_output_file = _import(
num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir, test_output_dir
)
tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline, test_output_dir)

# Check if the backup data contains multiple segment files
cnt_seg_files = 0
Expand Down Expand Up @@ -235,7 +233,6 @@ def _import(
env: NeonEnv,
pg_bin: PgBin,
timeline: TimelineId,
pg_distrib_dir: Path,
test_output_dir: Path,
) -> Path:
"""Test importing backup data to the pageserver.
Expand Down Expand Up @@ -295,7 +292,7 @@ def _import(
wait_for_upload(client, tenant, timeline, lsn)

# Check it worked
endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant)
endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant, lsn=lsn)
assert endpoint.safe_psql("select count(*) from tbl") == [(expected_num_rows,)]

# Take another fullbackup
Expand Down

1 comment on commit 49d7f9b

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

3196 tests run: 3055 passed, 1 failed, 140 skipped (full report)


Failures on Postgres 14

  • test_sharding_autosplit[github-actions-selfhosted]: release
# Run all failed tests locally:
scripts/pytest -vv -n $(nproc) -k "test_sharding_autosplit[release-pg14-github-actions-selfhosted]"
Flaky tests (2)

Postgres 16

Code coverage* (full report)

  • functions: 31.4% (6450 of 20544 functions)
  • lines: 48.3% (49861 of 103274 lines)

* collected from Rust tests only


The comment gets automatically updated with the latest test results
49d7f9b at 2024-05-23T13:07:26.045Z :recycle:

Please sign in to comment.