Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
paritydb support for parachains db. (#4838)
Browse files Browse the repository at this point in the history
* parity db subsystem without cache and no splitted column

* fmt

* fix path (auto from parity-db fail)

* lru cache for db column with cache

* Revert "lru cache for db column with cache"

This reverts commit ae177bc.

* Write_lock mutex

* theoric code for bridges

* revert changes

* Revert bridge changes

* fix spec_version

* update parity db

* test purge-db

* Use specific ordered collection with paritydb.

* Revert "Use specific ordered collection with paritydb."

This reverts commit 8b66d0a.

* fix chain selection tests.

* remove patch

* fix auto.

* Remove useless exists directory method

* purge chain without parity-db removal

* spellcheck

* renamings and filtering.

* fix assertion

* format

* update parity-db and fmt

* Auto keep using rocksdb when it exists.

* Revert "Auto keep using rocksdb when it exists."

This reverts commit cea49b3.

* Update kvdb version.
  • Loading branch information
cheme committed Mar 3, 2022
1 parent a13f0b9 commit 0879bdb
Show file tree
Hide file tree
Showing 22 changed files with 591 additions and 85 deletions.
20 changes: 18 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 9 additions & 9 deletions node/core/approval-voting/src/approval_db/v1/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@

//! Version 1 of the DB schema.

use kvdb::{DBTransaction, KeyValueDB};
use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche};
use polkadot_node_subsystem::{SubsystemError, SubsystemResult};
use polkadot_node_subsystem_util::database::{DBTransaction, Database};
use polkadot_primitives::v1::{
BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex,
ValidatorIndex, ValidatorSignature,
Expand All @@ -41,14 +41,14 @@ pub mod tests;

/// `DbBackend` is a concrete implementation of the higher-level Backend trait
pub struct DbBackend {
inner: Arc<dyn KeyValueDB>,
inner: Arc<dyn Database>,
config: Config,
}

impl DbBackend {
/// Create a new [`DbBackend`] with the supplied key-value store and
/// config.
pub fn new(db: Arc<dyn KeyValueDB>, config: Config) -> Self {
pub fn new(db: Arc<dyn Database>, config: Config) -> Self {
DbBackend { inner: db, config }
}
}
Expand Down Expand Up @@ -239,7 +239,7 @@ impl std::error::Error for Error {}
pub type Result<T> = std::result::Result<T, Error>;

pub(crate) fn load_decode<D: Decode>(
store: &dyn KeyValueDB,
store: &dyn Database,
col_data: u32,
key: &[u8],
) -> Result<Option<D>> {
Expand Down Expand Up @@ -283,7 +283,7 @@ pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] {
}

/// Return all blocks which have entries in the DB, ascending, by height.
pub fn load_all_blocks(store: &dyn KeyValueDB, config: &Config) -> SubsystemResult<Vec<Hash>> {
pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult<Vec<Hash>> {
let mut hashes = Vec::new();
if let Some(stored_blocks) = load_stored_blocks(store, config)? {
for height in stored_blocks.0..stored_blocks.1 {
Expand All @@ -297,7 +297,7 @@ pub fn load_all_blocks(store: &dyn KeyValueDB, config: &Config) -> SubsystemResu

/// Load the stored-blocks key from the state.
pub fn load_stored_blocks(
store: &dyn KeyValueDB,
store: &dyn Database,
config: &Config,
) -> SubsystemResult<Option<StoredBlockRange>> {
load_decode(store, config.col_data, STORED_BLOCKS_KEY)
Expand All @@ -306,7 +306,7 @@ pub fn load_stored_blocks(

/// Load a blocks-at-height entry for a given block number.
pub fn load_blocks_at_height(
store: &dyn KeyValueDB,
store: &dyn Database,
config: &Config,
block_number: &BlockNumber,
) -> SubsystemResult<Vec<Hash>> {
Expand All @@ -317,7 +317,7 @@ pub fn load_blocks_at_height(

/// Load a block entry from the aux store.
pub fn load_block_entry(
store: &dyn KeyValueDB,
store: &dyn Database,
config: &Config,
block_hash: &Hash,
) -> SubsystemResult<Option<BlockEntry>> {
Expand All @@ -328,7 +328,7 @@ pub fn load_block_entry(

/// Load a candidate entry from the aux store.
pub fn load_candidate_entry(
store: &dyn KeyValueDB,
store: &dyn Database,
config: &Config,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>> {
Expand Down
8 changes: 5 additions & 3 deletions node/core/approval-voting/src/approval_db/v1/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use crate::{
backend::{Backend, OverlayedBackend},
ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo},
};
use kvdb::KeyValueDB;
use polkadot_node_subsystem_util::database::Database;
use polkadot_primitives::v1::Id as ParaId;
use std::{collections::HashMap, sync::Arc};

Expand All @@ -32,8 +32,10 @@ const NUM_COLUMNS: u32 = 1;

const TEST_CONFIG: Config = Config { col_data: DATA_COL };

fn make_db() -> (DbBackend, Arc<dyn KeyValueDB>) {
let db_writer: Arc<dyn KeyValueDB> = Arc::new(kvdb_memorydb::create(NUM_COLUMNS));
fn make_db() -> (DbBackend, Arc<dyn Database>) {
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
(DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer)
}

Expand Down
6 changes: 4 additions & 2 deletions node/core/approval-voting/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -579,11 +579,11 @@ pub(crate) mod tests {
use crate::approval_db::v1::DbBackend;
use ::test_helpers::{dummy_candidate_receipt, dummy_hash};
use assert_matches::assert_matches;
use kvdb::KeyValueDB;
use merlin::Transcript;
use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
use polkadot_node_subsystem::messages::AllMessages;
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
use polkadot_node_subsystem_util::database::Database;
use polkadot_primitives::{v1::ValidatorIndex, v2::SessionInfo};
pub(crate) use sp_consensus_babe::{
digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest},
Expand Down Expand Up @@ -1125,7 +1125,9 @@ pub(crate) mod tests {

#[test]
fn insta_approval_works() {
let db_writer: Arc<dyn KeyValueDB> = Arc::new(kvdb_memorydb::create(NUM_COLUMNS));
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
let mut db = DbBackend::new(db_writer.clone(), TEST_CONFIG);
let mut overlay_db = OverlayedBackend::new(&db);

Expand Down
6 changes: 3 additions & 3 deletions node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
//! of others. It uses this information to determine when candidates and blocks have
//! been sufficiently approved to finalize.

use kvdb::KeyValueDB;
use polkadot_node_jaeger as jaeger;
use polkadot_node_primitives::{
approval::{
Expand All @@ -43,6 +42,7 @@ use polkadot_node_subsystem::{
SubsystemResult, SubsystemSender,
};
use polkadot_node_subsystem_util::{
database::Database,
metrics::{self, prometheus},
rolling_session_window::{
new_session_window_size, RollingSessionWindow, SessionWindowSize, SessionWindowUpdate,
Expand Down Expand Up @@ -140,7 +140,7 @@ pub struct ApprovalVotingSubsystem {
keystore: Arc<LocalKeystore>,
db_config: DatabaseConfig,
slot_duration_millis: u64,
db: Arc<dyn KeyValueDB>,
db: Arc<dyn Database>,
mode: Mode,
metrics: Metrics,
}
Expand Down Expand Up @@ -329,7 +329,7 @@ impl ApprovalVotingSubsystem {
/// Create a new approval voting subsystem with the given keystore, config, and database.
pub fn with_config(
config: Config,
db: Arc<dyn KeyValueDB>,
db: Arc<dyn Database>,
keystore: Arc<LocalKeystore>,
sync_oracle: Box<dyn SyncOracle + Send>,
metrics: Metrics,
Expand Down
5 changes: 4 additions & 1 deletion node/core/approval-voting/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -475,14 +475,17 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
);

let clock = Box::new(clock);
let db = kvdb_memorydb::create(test_constants::NUM_COLUMNS);
let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);

let subsystem = run(
context,
ApprovalVotingSubsystem::with_config(
Config {
col_data: test_constants::TEST_CONFIG.col_data,
slot_duration_millis: SLOT_DURATION_MILLIS,
},
Arc::new(kvdb_memorydb::create(test_constants::NUM_COLUMNS)),
Arc::new(db),
Arc::new(keystore),
sync_oracle,
Metrics::default(),
Expand Down
26 changes: 13 additions & 13 deletions node/core/av-store/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ use std::{

use futures::{channel::oneshot, future, select, FutureExt};
use futures_timer::Delay;
use kvdb::{DBTransaction, KeyValueDB};
use parity_scale_codec::{Decode, Encode, Error as CodecError, Input};
use polkadot_node_subsystem_util::database::{DBTransaction, Database};

use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use polkadot_node_primitives::{AvailableData, ErasureChunk};
Expand Down Expand Up @@ -152,7 +152,7 @@ struct CandidateMeta {
}

fn query_inner<D: Decode>(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
column: u32,
key: &[u8],
) -> Result<Option<D>, Error> {
Expand Down Expand Up @@ -181,7 +181,7 @@ fn write_available_data(
}

fn load_available_data(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
config: &Config,
hash: &CandidateHash,
) -> Result<Option<AvailableData>, Error> {
Expand All @@ -197,7 +197,7 @@ fn delete_available_data(tx: &mut DBTransaction, config: &Config, hash: &Candida
}

fn load_chunk(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
config: &Config,
candidate_hash: &CandidateHash,
chunk_index: ValidatorIndex,
Expand Down Expand Up @@ -231,7 +231,7 @@ fn delete_chunk(
}

fn load_meta(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
config: &Config,
hash: &CandidateHash,
) -> Result<Option<CandidateMeta>, Error> {
Expand Down Expand Up @@ -443,7 +443,7 @@ impl Clock for SystemClock {
pub struct AvailabilityStoreSubsystem {
pruning_config: PruningConfig,
config: Config,
db: Arc<dyn KeyValueDB>,
db: Arc<dyn Database>,
known_blocks: KnownUnfinalizedBlocks,
finalized_number: Option<BlockNumber>,
metrics: Metrics,
Expand All @@ -452,7 +452,7 @@ pub struct AvailabilityStoreSubsystem {

impl AvailabilityStoreSubsystem {
/// Create a new `AvailabilityStoreSubsystem` with a given config on disk.
pub fn new(db: Arc<dyn KeyValueDB>, config: Config, metrics: Metrics) -> Self {
pub fn new(db: Arc<dyn Database>, config: Config, metrics: Metrics) -> Self {
Self::with_pruning_config_and_clock(
db,
config,
Expand All @@ -464,7 +464,7 @@ impl AvailabilityStoreSubsystem {

/// Create a new `AvailabilityStoreSubsystem` with a given config on disk.
fn with_pruning_config_and_clock(
db: Arc<dyn KeyValueDB>,
db: Arc<dyn Database>,
config: Config,
pruning_config: PruningConfig,
clock: Box<dyn Clock>,
Expand Down Expand Up @@ -661,7 +661,7 @@ where

async fn process_new_head<Context>(
ctx: &mut Context,
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
db_transaction: &mut DBTransaction,
config: &Config,
pruning_config: &PruningConfig,
Expand Down Expand Up @@ -711,7 +711,7 @@ where
}

fn note_block_backed(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
db_transaction: &mut DBTransaction,
config: &Config,
pruning_config: &PruningConfig,
Expand Down Expand Up @@ -740,7 +740,7 @@ fn note_block_backed(
}

fn note_block_included(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
db_transaction: &mut DBTransaction,
config: &Config,
pruning_config: &PruningConfig,
Expand Down Expand Up @@ -1128,7 +1128,7 @@ fn process_message(

// Ok(true) on success, Ok(false) on failure, and Err on internal error.
fn store_chunk(
db: &Arc<dyn KeyValueDB>,
db: &Arc<dyn Database>,
config: &Config,
candidate_hash: CandidateHash,
chunk: ErasureChunk,
Expand Down Expand Up @@ -1222,7 +1222,7 @@ fn store_available_data(
Ok(())
}

fn prune_all(db: &Arc<dyn KeyValueDB>, config: &Config, clock: &dyn Clock) -> Result<(), Error> {
fn prune_all(db: &Arc<dyn Database>, config: &Config, clock: &dyn Clock) -> Result<(), Error> {
let now = clock.now()?;
let (range_start, range_end) = pruning_range(now);

Expand Down
Loading

0 comments on commit 0879bdb

Please sign in to comment.