Skip to content

Commit

Permalink
chore(pageserver): use kebab case for compaction algorithms (#7845)
Browse files Browse the repository at this point in the history
Signed-off-by: Alex Chi Z <chi@neon.tech>
  • Loading branch information
skyzh committed May 22, 2024
1 parent 4a278cc commit ff560a1
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 19 deletions.
21 changes: 18 additions & 3 deletions libs/pageserver_api/src/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ pub struct TenantConfig {
pub compaction_period: Option<String>,
pub compaction_threshold: Option<usize>,
// defer parsing compaction_algorithm, like eviction_policy
pub compaction_algorithm: Option<CompactionAlgorithm>,
pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
pub gc_horizon: Option<u64>,
pub gc_period: Option<String>,
pub image_creation_threshold: Option<usize>,
Expand Down Expand Up @@ -438,13 +438,28 @@ impl EvictionPolicy {
}
}

#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "kind")]
#[derive(
Eq,
PartialEq,
Debug,
Copy,
Clone,
strum_macros::EnumString,
strum_macros::Display,
serde_with::DeserializeFromStr,
serde_with::SerializeDisplay,
)]
#[strum(serialize_all = "kebab-case")]
pub enum CompactionAlgorithm {
Legacy,
Tiered,
}

#[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)]
pub struct CompactionAlgorithmSettings {
pub kind: CompactionAlgorithm,
}

#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct EvictionPolicyLayerAccessThreshold {
#[serde(with = "humantime_serde")]
Expand Down
10 changes: 7 additions & 3 deletions pageserver/src/tenant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3973,7 +3973,7 @@ mod tests {
use hex_literal::hex;
use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE};
use pageserver_api::keyspace::KeySpace;
use pageserver_api::models::CompactionAlgorithm;
use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings};
use rand::{thread_rng, Rng};
use tests::storage_layer::ValuesReconstructState;
use tests::timeline::{GetVectoredError, ShutdownMode};
Expand Down Expand Up @@ -5169,7 +5169,9 @@ mod tests {
compaction_algorithm: CompactionAlgorithm,
) -> anyhow::Result<()> {
let mut harness = TenantHarness::create(name)?;
harness.tenant_conf.compaction_algorithm = compaction_algorithm;
harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
kind: compaction_algorithm,
};
let (tenant, ctx) = harness.load().await;
let tline = tenant
.create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
Expand Down Expand Up @@ -5526,7 +5528,9 @@ mod tests {
compaction_algorithm: CompactionAlgorithm,
) -> anyhow::Result<()> {
let mut harness = TenantHarness::create(name)?;
harness.tenant_conf.compaction_algorithm = compaction_algorithm;
harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings {
kind: compaction_algorithm,
};
let (tenant, ctx) = harness.load().await;
let tline = tenant
.create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx)
Expand Down
13 changes: 9 additions & 4 deletions pageserver/src/tenant/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
use anyhow::bail;
use pageserver_api::models::AuxFilePolicy;
use pageserver_api::models::CompactionAlgorithm;
use pageserver_api::models::CompactionAlgorithmSettings;
use pageserver_api::models::EvictionPolicy;
use pageserver_api::models::{self, ThrottleConfig};
use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize};
Expand Down Expand Up @@ -320,7 +321,7 @@ pub struct TenantConf {
pub compaction_period: Duration,
// Level0 delta layer threshold for compaction.
pub compaction_threshold: usize,
pub compaction_algorithm: CompactionAlgorithm,
pub compaction_algorithm: CompactionAlgorithmSettings,
// Determines how much history is retained, to allow
// branching and read replicas at an older point in time.
// The unit is #of bytes of WAL.
Expand Down Expand Up @@ -406,7 +407,7 @@ pub struct TenantConfOpt {

#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
pub compaction_algorithm: Option<CompactionAlgorithm>,
pub compaction_algorithm: Option<CompactionAlgorithmSettings>,

#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
Expand Down Expand Up @@ -497,7 +498,9 @@ impl TenantConfOpt {
.unwrap_or(global_conf.compaction_threshold),
compaction_algorithm: self
.compaction_algorithm
.unwrap_or(global_conf.compaction_algorithm),
.as_ref()
.unwrap_or(&global_conf.compaction_algorithm)
.clone(),
gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
image_creation_threshold: self
Expand Down Expand Up @@ -550,7 +553,9 @@ impl Default for TenantConf {
compaction_period: humantime::parse_duration(DEFAULT_COMPACTION_PERIOD)
.expect("cannot parse default compaction period"),
compaction_threshold: DEFAULT_COMPACTION_THRESHOLD,
compaction_algorithm: DEFAULT_COMPACTION_ALGORITHM,
compaction_algorithm: CompactionAlgorithmSettings {
kind: DEFAULT_COMPACTION_ALGORITHM,
},
gc_horizon: DEFAULT_GC_HORIZON,
gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD)
.expect("cannot parse default gc period"),
Expand Down
14 changes: 8 additions & 6 deletions pageserver/src/tenant/timeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ use pageserver_api::{
},
keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning},
models::{
AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo,
DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo,
LsnLease, TimelineState,
AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings,
DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy,
InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState,
},
reltag::BlockNumber,
shard::{ShardIdentity, ShardNumber, TenantShardId},
Expand Down Expand Up @@ -1700,7 +1700,7 @@ impl Timeline {
return Ok(());
}

match self.get_compaction_algorithm() {
match self.get_compaction_algorithm_settings().kind {
CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await,
CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await,
}
Expand Down Expand Up @@ -2096,12 +2096,14 @@ impl Timeline {
.unwrap_or(self.conf.default_tenant_conf.image_creation_threshold)
}

fn get_compaction_algorithm(&self) -> CompactionAlgorithm {
fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings {
let tenant_conf = &self.tenant_conf.load();
tenant_conf
.tenant_conf
.compaction_algorithm
.unwrap_or(self.conf.default_tenant_conf.compaction_algorithm)
.as_ref()
.unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm)
.clone()
}

fn get_eviction_policy(&self) -> EvictionPolicy {
Expand Down
2 changes: 1 addition & 1 deletion test_runner/regress/test_attach_tenant_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def test_fully_custom_config(positive_env: NeonEnv):
"checkpoint_distance": 10000,
"checkpoint_timeout": "13m",
"compaction_algorithm": {
"kind": "Tiered",
"kind": "tiered",
},
"eviction_policy": {
"kind": "LayerAccessThreshold",
Expand Down
4 changes: 2 additions & 2 deletions test_runner/regress/test_compaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ def test_sharding_compaction(


class CompactionAlgorithm(str, enum.Enum):
LEGACY = "Legacy"
TIERED = "Tiered"
LEGACY = "legacy"
TIERED = "tiered"


@pytest.mark.parametrize(
Expand Down

1 comment on commit ff560a1

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

3196 tests run: 3055 passed, 1 failed, 140 skipped (full report)


Failures on Postgres 15

  • test_sharding_split_failures[failure20]: debug
# Run all failed tests locally:
scripts/pytest -vv -n $(nproc) -k "test_sharding_split_failures[debug-pg15-failure20]"
Flaky tests (1)

Postgres 16

  • test_timeline_deletion_with_files_stuck_in_upload_queue: debug

Test coverage report is not available

The comment gets automatically updated with the latest test results
ff560a1 at 2024-05-22T22:48:25.204Z :recycle:

Please sign in to comment.