diff --git a/db/blob/db_blob_basic_test.cc b/db/blob/db_blob_basic_test.cc index e6832a2ae44..bd5a6e3dc1e 100644 --- a/db/blob/db_blob_basic_test.cc +++ b/db/blob/db_blob_basic_test.cc @@ -772,6 +772,7 @@ TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) { ASSERT_OK(statuses[2]); ASSERT_EQ(values[2], second_blob); } + ASSERT_OK(options.env->DeleteFile(file_path)); } #endif // !ROCKSDB_LITE @@ -1116,6 +1117,7 @@ TEST_F(DBBlobBasicTest, GenerateIOTracing) { // Assuming blob files will have Append, Close and then Read operations. ASSERT_GT(blob_files_op_count, 2); } + ASSERT_OK(env_->DeleteFile(trace_file)); } #endif // !ROCKSDB_LITE diff --git a/db/c_test.c b/db/c_test.c index 249ab9023d1..0ccb66702b9 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -772,6 +772,9 @@ int main(int argc, char** argv) { CheckGet(db, roptions, "foo", "hello"); rocksdb_backup_engine_close(be); + + rocksdb_destroy_db(options, dbbackupname, &err); + CheckNoError(err); } StartPhase("checkpoint"); @@ -915,6 +918,7 @@ int main(int argc, char** argv) { CheckNoError(err); rocksdb_delete(db, woptions, "sstk3", 5, &err); CheckNoError(err); + remove(sstfilename); } StartPhase("writebatch"); @@ -3452,6 +3456,8 @@ int main(int argc, char** argv) { StartPhase("cleanup"); rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + CheckNoError(err); rocksdb_options_destroy(options); rocksdb_block_based_options_destroy(table_options); rocksdb_readoptions_destroy(roptions); diff --git a/db/column_family_test.cc b/db/column_family_test.cc index d33cbe50a77..b738c82e1c9 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -80,6 +80,7 @@ class ColumnFamilyTestBase : public testing::Test { } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); Destroy(column_families); + EXPECT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_))); delete env_; } @@ -940,6 +941,7 @@ TEST_P(ColumnFamilyTest, IgnoreRecoveredLog) { } } } + ASSERT_OK(DestroyDir(env_, backup_logs)); } #ifndef ROCKSDB_LITE // TEST functions used are not supported diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index ef38946f7e2..2a44adee232 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -23,11 +23,31 @@ namespace ROCKSDB_NAMESPACE { class CompactFilesTest : public testing::Test { public: - CompactFilesTest() { + CompactFilesTest() : db_(nullptr) { env_ = Env::Default(); - db_name_ = test::PerThreadDBPath("compact_files_test"); + db_name_ = test::PerThreadDBPath(env_, "compact_files_test"); + options_.env = env_; + options_.create_if_missing = true; + EXPECT_OK(DestroyDB(db_name_, options_)); } + ~CompactFilesTest() { + CloseDB(); + EXPECT_OK(DestroyDB(db_name_, options_)); + } + + void OpenDB() { + ASSERT_OK(DB::Open(options_, db_name_, &db_)); + ASSERT_NE(db_, nullptr); + } + + void CloseDB() { + delete db_; + db_ = nullptr; + } + + Options options_; + DB* db_; std::string db_name_; Env* env_; }; @@ -62,25 +82,19 @@ class FlushedFileCollector : public EventListener { }; TEST_F(CompactFilesTest, L0ConflictsFiles) { - Options options; // to trigger compaction more easily const int kWriteBufferSize = 10000; const int kLevel0Trigger = 2; - options.create_if_missing = true; - options.compaction_style = kCompactionStyleLevel; + options_.compaction_style = kCompactionStyleLevel; // Small slowdown and stop trigger for experimental purpose. - options.level0_slowdown_writes_trigger = 20; - options.level0_stop_writes_trigger = 20; - options.level0_stop_writes_trigger = 20; - options.write_buffer_size = kWriteBufferSize; - options.level0_file_num_compaction_trigger = kLevel0Trigger; - options.compression = kNoCompression; - - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - assert(s.ok()); - assert(db); + options_.level0_slowdown_writes_trigger = 20; + options_.level0_stop_writes_trigger = 20; + options_.level0_stop_writes_trigger = 20; + options_.write_buffer_size = kWriteBufferSize; + options_.level0_file_num_compaction_trigger = kLevel0Trigger; + options_.compression = kNoCompression; + + OpenDB(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({ {"CompactFilesImpl:0", "BackgroundCallCompaction:0"}, @@ -91,13 +105,13 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) { // create couple files // Background compaction starts and waits in BackgroundCallCompaction:0 for (int i = 0; i < kLevel0Trigger * 4; ++i) { - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), "")); - ASSERT_OK(db->Put(WriteOptions(), std::to_string(100 - i), "")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), "")); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(100 - i), "")); + ASSERT_OK(db_->Flush(FlushOptions())); } ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta; - db->GetColumnFamilyMetaData(&meta); + db_->GetColumnFamilyMetaData(&meta); std::string file1; for (auto& file : meta.levels[0].files) { ASSERT_EQ(0, meta.levels[0].level); @@ -109,51 +123,45 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) { // The background compaction then notices that there is an L0 compaction // already in progress and doesn't do an L0 compaction // Once the background compaction finishes, the compact files finishes - ASSERT_OK(db->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), - {file1, file2}, 0)); + ASSERT_OK(db_->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), + {file1, file2}, 0)); break; } } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); - delete db; } TEST_F(CompactFilesTest, MultipleLevel) { - Options options; - options.create_if_missing = true; - options.level_compaction_dynamic_level_bytes = true; - options.num_levels = 6; + options_.level_compaction_dynamic_level_bytes = true; + options_.num_levels = 6; // Add listener FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); + options_.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - ASSERT_NE(db, nullptr); + OpenDB(); // create couple files in L0, L3, L4 and L5 for (int i = 5; i > 2; --i) { collector->ClearFlushedFiles(); - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), "")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), "")); + ASSERT_OK(db_->Flush(FlushOptions())); // Ensure background work is fully finished including listener callbacks // before accessing listener state. - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForBackgroundWork()); + ASSERT_OK( + static_cast_with_check(db_)->TEST_WaitForBackgroundWork()); auto l0_files = collector->GetFlushedFiles(); - ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, i)); + ASSERT_OK(db_->CompactFiles(CompactionOptions(), l0_files, i)); std::string prop; - ASSERT_TRUE(db->GetProperty( + ASSERT_TRUE(db_->GetProperty( "rocksdb.num-files-at-level" + std::to_string(i), &prop)); ASSERT_EQ("1", prop); } - ASSERT_OK(db->Put(WriteOptions(), std::to_string(0), "")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(0), "")); + ASSERT_OK(db_->Flush(FlushOptions())); ColumnFamilyMetaData meta; - db->GetColumnFamilyMetaData(&meta); + db_->GetColumnFamilyMetaData(&meta); // Compact files except the file in L3 std::vector files; for (int i = 0; i < 6; ++i) { @@ -171,9 +179,9 @@ TEST_F(CompactFilesTest, MultipleLevel) { std::thread thread([&] { TEST_SYNC_POINT("CompactFilesTest.MultipleLevel:0"); - ASSERT_OK(db->Put(WriteOptions(), "bar", "v2")); - ASSERT_OK(db->Put(WriteOptions(), "foo", "v2")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), "bar", "v2")); + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); + ASSERT_OK(db_->Flush(FlushOptions())); TEST_SYNC_POINT("CompactFilesTest.MultipleLevel:1"); }); @@ -181,127 +189,105 @@ TEST_F(CompactFilesTest, MultipleLevel) { // here we have input file from level 5, so the output level has to be >= 5 for (int invalid_output_level = 0; invalid_output_level < 5; invalid_output_level++) { - s = db->CompactFiles(CompactionOptions(), files, invalid_output_level); + Status s = + db_->CompactFiles(CompactionOptions(), files, invalid_output_level); std::cout << s.ToString() << std::endl; ASSERT_TRUE(s.IsInvalidArgument()); } - ASSERT_OK(db->CompactFiles(CompactionOptions(), files, 5)); + ASSERT_OK(db_->CompactFiles(CompactionOptions(), files, 5)); SyncPoint::GetInstance()->DisableProcessing(); thread.join(); - - delete db; } TEST_F(CompactFilesTest, ObsoleteFiles) { - Options options; // to trigger compaction more easily const int kWriteBufferSize = 65536; - options.create_if_missing = true; // Disable RocksDB background compaction. - options.compaction_style = kCompactionStyleNone; - options.level0_slowdown_writes_trigger = (1 << 30); - options.level0_stop_writes_trigger = (1 << 30); - options.write_buffer_size = kWriteBufferSize; - options.max_write_buffer_number = 2; - options.compression = kNoCompression; + options_.compaction_style = kCompactionStyleNone; + options_.level0_slowdown_writes_trigger = (1 << 30); + options_.level0_stop_writes_trigger = (1 << 30); + options_.write_buffer_size = kWriteBufferSize; + options_.max_write_buffer_number = 2; + options_.compression = kNoCompression; // Add listener FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); + options_.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - ASSERT_NE(db, nullptr); + OpenDB(); // create couple files for (int i = 1000; i < 2000; ++i) { - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), - std::string(kWriteBufferSize / 10, 'a' + (i % 26)))); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), + std::string(kWriteBufferSize / 10, 'a' + (i % 26)))); } auto l0_files = collector->GetFlushedFiles(); - ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForCompact()); + ASSERT_OK(db_->CompactFiles(CompactionOptions(), l0_files, 1)); + ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForCompact()); // verify all compaction input files are deleted for (auto fname : l0_files) { ASSERT_EQ(Status::NotFound(), env_->FileExists(fname)); } - delete db; } TEST_F(CompactFilesTest, NotCutOutputOnLevel0) { - Options options; - options.create_if_missing = true; // Disable RocksDB background compaction. - options.compaction_style = kCompactionStyleNone; - options.level0_slowdown_writes_trigger = 1000; - options.level0_stop_writes_trigger = 1000; - options.write_buffer_size = 65536; - options.max_write_buffer_number = 2; - options.compression = kNoCompression; - options.max_compaction_bytes = 5000; + options_.compaction_style = kCompactionStyleNone; + options_.level0_slowdown_writes_trigger = 1000; + options_.level0_stop_writes_trigger = 1000; + options_.write_buffer_size = 65536; + options_.max_write_buffer_number = 2; + options_.compression = kNoCompression; + options_.max_compaction_bytes = 5000; // Add listener FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); + options_.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - assert(s.ok()); - assert(db); + OpenDB(); // create couple files for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), - std::string(1000, 'a' + (i % 26)))); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), + std::string(1000, 'a' + (i % 26)))); } - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); + ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForFlushMemTable()); auto l0_files_1 = collector->GetFlushedFiles(); collector->ClearFlushedFiles(); for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), - std::string(1000, 'a' + (i % 26)))); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), + std::string(1000, 'a' + (i % 26)))); } - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); + ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForFlushMemTable()); auto l0_files_2 = collector->GetFlushedFiles(); - ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0)); - ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0)); - // no assertion failure - delete db; + ASSERT_OK(db_->CompactFiles(CompactionOptions(), l0_files_1, 0)); + ASSERT_OK(db_->CompactFiles(CompactionOptions(), l0_files_2, 0)); } TEST_F(CompactFilesTest, CapturingPendingFiles) { - Options options; - options.create_if_missing = true; // Disable RocksDB background compaction. - options.compaction_style = kCompactionStyleNone; + options_.compaction_style = kCompactionStyleNone; // Always do full scans for obsolete files (needed to reproduce the issue). - options.delete_obsolete_files_period_micros = 0; + options_.delete_obsolete_files_period_micros = 0; // Add listener. FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); + options_.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - assert(db); + OpenDB(); // Create 5 files. for (int i = 0; i < 5; ++i) { - ASSERT_OK(db->Put(WriteOptions(), "key" + std::to_string(i), "value")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), "key" + std::to_string(i), "value")); + ASSERT_OK(db_->Flush(FlushOptions())); } // Ensure background work is fully finished including listener callbacks // before accessing listener state. - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForBackgroundWork()); + ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForBackgroundWork()); auto l0_files = collector->GetFlushedFiles(); EXPECT_EQ(5, l0_files.size()); @@ -313,25 +299,22 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) { // Start compacting files. ROCKSDB_NAMESPACE::port::Thread compaction_thread( - [&] { EXPECT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); }); + [&] { EXPECT_OK(db_->CompactFiles(CompactionOptions(), l0_files, 1)); }); // In the meantime flush another file. TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0"); - ASSERT_OK(db->Put(WriteOptions(), "key5", "value")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), "key5", "value")); + ASSERT_OK(db_->Flush(FlushOptions())); TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1"); compaction_thread.join(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); - delete db; + CloseDB(); // Make sure we can reopen the DB. - s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - assert(db); - delete db; + OpenDB(); } TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { @@ -358,40 +341,32 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) { std::shared_ptr cf(new FilterWithGet()); - Options options; - options.create_if_missing = true; - options.compaction_filter = cf.get(); + options_.compaction_filter = cf.get(); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - - cf->SetDB(db); + OpenDB(); + cf->SetDB(db_); // Write one L0 file - ASSERT_OK(db->Put(WriteOptions(), "K1", "V1")); - ASSERT_OK(db->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), "K1", "V1")); + ASSERT_OK(db_->Flush(FlushOptions())); // Compact all L0 files using CompactFiles ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta; - db->GetColumnFamilyMetaData(&meta); + db_->GetColumnFamilyMetaData(&meta); for (auto& file : meta.levels[0].files) { std::string fname = file.db_path + "/" + file.name; ASSERT_OK( - db->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), {fname}, 0)); + db_->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(), {fname}, 0)); } - - delete db; } TEST_F(CompactFilesTest, SentinelCompressionType) { if (!Zlib_Supported()) { - fprintf(stderr, "zlib compression not supported, skip this test\n"); + ROCKSDB_GTEST_SKIP("zlib compression not supported, skip this test"); return; } if (!Snappy_Supported()) { - fprintf(stderr, "snappy compression not supported, skip this test\n"); + ROCKSDB_GTEST_SKIP("snappy compression not supported, skip this test"); return; } // Check that passing `CompressionType::kDisableCompressionOption` to @@ -399,87 +374,77 @@ TEST_F(CompactFilesTest, SentinelCompressionType) { for (auto compaction_style : {CompactionStyle::kCompactionStyleLevel, CompactionStyle::kCompactionStyleUniversal, CompactionStyle::kCompactionStyleNone}) { - ASSERT_OK(DestroyDB(db_name_, Options())); - Options options; - options.compaction_style = compaction_style; + ASSERT_OK(DestroyDB(db_name_, options_)); + options_.compaction_style = compaction_style; // L0: Snappy, L1: ZSTD, L2: Snappy - options.compression_per_level = {CompressionType::kSnappyCompression, - CompressionType::kZlibCompression, - CompressionType::kSnappyCompression}; - options.create_if_missing = true; + options_.compression_per_level = {CompressionType::kSnappyCompression, + CompressionType::kZlibCompression, + CompressionType::kSnappyCompression}; FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DB::Open(options, db_name_, &db)); + options_.listeners.emplace_back(collector); - ASSERT_OK(db->Put(WriteOptions(), "key", "val")); - ASSERT_OK(db->Flush(FlushOptions())); + OpenDB(); + ASSERT_OK(db_->Put(WriteOptions(), "key", "val")); + ASSERT_OK(db_->Flush(FlushOptions())); // Ensure background work is fully finished including listener callbacks // before accessing listener state. - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForBackgroundWork()); + ASSERT_OK( + static_cast_with_check(db_)->TEST_WaitForBackgroundWork()); auto l0_files = collector->GetFlushedFiles(); ASSERT_EQ(1, l0_files.size()); // L0->L1 compaction, so output should be ZSTD-compressed CompactionOptions compaction_opts; compaction_opts.compression = CompressionType::kDisableCompressionOption; - ASSERT_OK(db->CompactFiles(compaction_opts, l0_files, 1)); + ASSERT_OK(db_->CompactFiles(compaction_opts, l0_files, 1)); ROCKSDB_NAMESPACE::TablePropertiesCollection all_tables_props; - ASSERT_OK(db->GetPropertiesOfAllTables(&all_tables_props)); + ASSERT_OK(db_->GetPropertiesOfAllTables(&all_tables_props)); for (const auto& name_and_table_props : all_tables_props) { ASSERT_EQ(CompressionTypeToString(CompressionType::kZlibCompression), name_and_table_props.second->compression_name); } - delete db; + CloseDB(); } } TEST_F(CompactFilesTest, GetCompactionJobInfo) { - Options options; - options.create_if_missing = true; // Disable RocksDB background compaction. - options.compaction_style = kCompactionStyleNone; - options.level0_slowdown_writes_trigger = 1000; - options.level0_stop_writes_trigger = 1000; - options.write_buffer_size = 65536; - options.max_write_buffer_number = 2; - options.compression = kNoCompression; - options.max_compaction_bytes = 5000; + options_.compaction_style = kCompactionStyleNone; + options_.level0_slowdown_writes_trigger = 1000; + options_.level0_stop_writes_trigger = 1000; + options_.write_buffer_size = 65536; + options_.max_write_buffer_number = 2; + options_.compression = kNoCompression; + options_.max_compaction_bytes = 5000; // Add listener FlushedFileCollector* collector = new FlushedFileCollector(); - options.listeners.emplace_back(collector); + options_.listeners.emplace_back(collector); - DB* db = nullptr; - ASSERT_OK(DestroyDB(db_name_, options)); - Status s = DB::Open(options, db_name_, &db); - ASSERT_OK(s); - assert(db); + OpenDB(); // create couple files for (int i = 0; i < 500; ++i) { - ASSERT_OK(db->Put(WriteOptions(), std::to_string(i), - std::string(1000, 'a' + (i % 26)))); + ASSERT_OK(db_->Put(WriteOptions(), std::to_string(i), + std::string(1000, 'a' + (i % 26)))); } - ASSERT_OK(static_cast_with_check(db)->TEST_WaitForFlushMemTable()); + ASSERT_OK(static_cast_with_check(db_)->TEST_WaitForFlushMemTable()); auto l0_files_1 = collector->GetFlushedFiles(); CompactionOptions co; co.compression = CompressionType::kLZ4Compression; CompactionJobInfo compaction_job_info{}; ASSERT_OK( - db->CompactFiles(co, l0_files_1, 0, -1, nullptr, &compaction_job_info)); + db_->CompactFiles(co, l0_files_1, 0, -1, nullptr, &compaction_job_info)); ASSERT_EQ(compaction_job_info.base_input_level, 0); - ASSERT_EQ(compaction_job_info.cf_id, db->DefaultColumnFamily()->GetID()); - ASSERT_EQ(compaction_job_info.cf_name, db->DefaultColumnFamily()->GetName()); + ASSERT_EQ(compaction_job_info.cf_id, db_->DefaultColumnFamily()->GetID()); + ASSERT_EQ(compaction_job_info.cf_name, db_->DefaultColumnFamily()->GetName()); ASSERT_EQ(compaction_job_info.compaction_reason, CompactionReason::kManualCompaction); ASSERT_EQ(compaction_job_info.compression, CompressionType::kLZ4Compression); ASSERT_EQ(compaction_job_info.output_level, 0); ASSERT_OK(compaction_job_info.status); - // no assertion failure - delete db; } } // namespace ROCKSDB_NAMESPACE diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index 0f0c5daf776..3addaa375ac 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -233,6 +233,12 @@ class CompactionJobTestBase : public testing::Test { mutable_cf_options_.max_compaction_bytes = 10 * 1024 * 1024; } + ~CompactionJobTestBase() { + Options options(BuildDBOptions(db_options_, mutable_db_options_), + ColumnFamilyOptions()); + EXPECT_OK(DestroyDB(dbname_, options)); + } + void SetUp() override { EXPECT_OK(env_->CreateDirIfMissing(dbname_)); db_options_.env = env_; diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index a28ac2b8856..d72ffb3ec92 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -1203,6 +1203,7 @@ TEST_F(DBBasicTest, DBClose) { ASSERT_EQ(env->GetCloseCount(), 2); options.info_log.reset(); ASSERT_EQ(env->GetCloseCount(), 3); + ASSERT_OK(DestroyDB(dbname, options)); } TEST_F(DBBasicTest, DBCloseAllDirectoryFDs) { diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index db80b82cb24..bc1e1a65ef7 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -1864,6 +1864,13 @@ TEST_P(DBBlockCacheKeyTest, StableCacheKeys) { #endif // !ROCKSDB_LITE Close(); +#ifndef ROCKSDB_LITE + // Delete the external files + for (const auto& e : external) { + Status s = options.env->DeleteFile(e); + ASSERT_TRUE(s.IsNotFound() || s.ok()); + } +#endif // ROCKSDB_LITE Destroy(options); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); } diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 2fffcae6072..2740d6f1dff 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -6243,6 +6243,7 @@ TEST_P(DBCompactionTestWithParam, FixFileIngestionCompactionDeadlock) { ingestion_thr.join(); ASSERT_OK(dbfull()->TEST_WaitForCompact()); Close(); + ASSERT_OK(DestroyDir(env_, sst_files_dir)); } class DBCompactionTestWithOngoingFileIngestionParam @@ -6547,19 +6548,22 @@ TEST_F(DBCompactionTest, ConsistencyFailTest2) { void IngestOneKeyValue(DBImpl* db, const std::string& key, const std::string& value, const Options& options) { - ExternalSstFileInfo info; std::string f = test::PerThreadDBPath("sst_file" + key); - EnvOptions env; - ROCKSDB_NAMESPACE::SstFileWriter writer(env, options); - auto s = writer.Open(f); - ASSERT_OK(s); - // ASSERT_OK(writer.Put(Key(), "")); - ASSERT_OK(writer.Put(key, value)); + { + ExternalSstFileInfo info; + EnvOptions env; + ROCKSDB_NAMESPACE::SstFileWriter writer(env, options); + auto s = writer.Open(f); + ASSERT_OK(s); + // ASSERT_OK(writer.Put(Key(), "")); + ASSERT_OK(writer.Put(key, value)); - ASSERT_OK(writer.Finish(&info)); - IngestExternalFileOptions ingest_opt; + ASSERT_OK(writer.Finish(&info)); + IngestExternalFileOptions ingest_opt; - ASSERT_OK(db->IngestExternalFile({info.file_path}, ingest_opt)); + ASSERT_OK(db->IngestExternalFile({info.file_path}, ingest_opt)); + } + ASSERT_OK(db->GetEnv()->DeleteFile(f)); } class DBCompactionTestL0FilesMisorderCorruption : public DBCompactionTest { diff --git a/db/db_encryption_test.cc b/db/db_encryption_test.cc index 73e89d158bd..82deab2a35a 100644 --- a/db/db_encryption_test.cc +++ b/db/db_encryption_test.cc @@ -117,6 +117,7 @@ TEST_F(DBEncryptionTest, ReadEmptyFile) { ASSERT_OK(status); ASSERT_TRUE(data.empty()); + ASSERT_OK(defaultEnv->DeleteFile(filePath)); } #endif // ROCKSDB_LITE diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 7f031444a4c..56fdf8b8a4c 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -770,6 +770,9 @@ TEST_P(DBSSTTestRateLimit, RateLimitedDelete) { 0, options.statistics->getAndResetTickerCount(FILES_DELETED_IMMEDIATELY)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); + if (different_wal_dir) { + ASSERT_OK(DestroyDir(options.env, alternative_wal_dir_)); + } } INSTANTIATE_TEST_CASE_P(RateLimitedDelete, DBSSTTestRateLimit, diff --git a/db/db_test.cc b/db/db_test.cc index 1688745c177..9b4c4cd8f4d 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2382,6 +2382,7 @@ TEST_F(DBTest, DBOpen_Options) { delete db; db = nullptr; + ASSERT_OK(DestroyDB(dbname, options)); } TEST_F(DBTest, DBOpen_Change_NumLevels) { @@ -2438,6 +2439,8 @@ TEST_F(DBTest, DestroyDBMetaDatabase) { ASSERT_TRUE(!(DB::Open(options, dbname, &db)).ok()); ASSERT_TRUE(!(DB::Open(options, metadbname, &db)).ok()); ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok()); + ASSERT_OK( + DestroyDir(env_, dbname)); // The meta dbs leave stuff around. Clean up } #ifndef ROCKSDB_LITE @@ -2589,6 +2592,7 @@ TEST_F(DBTest, SnapshotFiles) { ASSERT_EQ(info.file_number, manifest_number); } } + ASSERT_OK(DestroyDir(env_, snapdir)); } while (ChangeCompactOptions()); } diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 99d0b3c4c8d..fe5fc657ff8 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -788,6 +788,7 @@ TEST_F(DBWALTest, IgnoreRecoveredLog) { ASSERT_NOK(s); Destroy(options); } while (ChangeWalOptions()); + ASSERT_OK(DestroyDir(env_, backup_logs)); } TEST_F(DBWALTest, RecoveryWithEmptyLog) { diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index ddd4b47cc59..707fd967318 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -613,6 +613,7 @@ TEST_P(FaultInjectionTest, NoDuplicateTrailingEntries) { // Verify that only one version edit exists in the file. ASSERT_EQ(1, count); } + ASSERT_OK(fault_fs->DeleteFile(file_name, IOOptions(), nullptr)); } INSTANTIATE_TEST_CASE_P( diff --git a/db/listener_test.cc b/db/listener_test.cc index 160866bb774..5bc48c72c51 100644 --- a/db/listener_test.cc +++ b/db/listener_test.cc @@ -492,6 +492,9 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) { for (auto db : dbs) { delete db; } + for (int d = 0; d < kNumDBs; ++d) { + ASSERT_OK(DestroyDB(dbname_ + std::to_string(d), options)); + } } TEST_F(EventListenerTest, DisableBGCompaction) { diff --git a/db/options_file_test.cc b/db/options_file_test.cc index eb02e6ca4f1..4c6853af77e 100644 --- a/db/options_file_test.cc +++ b/db/options_file_test.cc @@ -78,6 +78,7 @@ TEST_F(OptionsFileTest, NumberOfOptionsFiles) { VerifyOptionsFileName(db, filename_history); delete db; } + ASSERT_OK(DestroyDB(dbname_, opt)); } TEST_F(OptionsFileTest, OptionsFileName) { diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 454d12dc584..c9c47bb95f4 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -66,10 +66,14 @@ std::shared_ptr OpenDb(bool read_only = false) { return std::shared_ptr(db); } -class PerfContextTest : public testing::Test {}; +class PerfContextTest : public testing::Test { + public: + PerfContextTest() { EXPECT_OK(DestroyDB(kDbName, Options())); } + + ~PerfContextTest() { EXPECT_OK(DestroyDB(kDbName, Options())); } +}; TEST_F(PerfContextTest, SeekIntoDeletion) { - ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -524,7 +528,6 @@ TEST_F(PerfContextTest, KeyComparisonCount) { // starts to become linear to the input size. TEST_F(PerfContextTest, SeekKeyComparison) { - ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -660,7 +663,6 @@ TEST_F(PerfContextTest, ToString) { } TEST_F(PerfContextTest, MergeOperatorTime) { - ASSERT_OK(DestroyDB(kDbName, Options())); DB* db; Options options; options.create_if_missing = true; @@ -845,7 +847,6 @@ TEST_F(PerfContextTest, CPUTimer) { return; } - ASSERT_OK(DestroyDB(kDbName, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; diff --git a/db/periodic_task_scheduler_test.cc b/db/periodic_task_scheduler_test.cc index 73c13fa1384..43759fd56bf 100644 --- a/db/periodic_task_scheduler_test.cc +++ b/db/periodic_task_scheduler_test.cc @@ -189,6 +189,9 @@ TEST_F(PeriodicTaskSchedulerTest, MultiInstances) { ASSERT_OK(dbs[i]->Close()); delete dbs[i]; } + for (int i = 0; i < kInstanceNum; i++) { + ASSERT_OK(DestroyDB(test::PerThreadDBPath(std::to_string(i)), options)); + } } TEST_F(PeriodicTaskSchedulerTest, MultiEnv) { @@ -218,6 +221,7 @@ TEST_F(PeriodicTaskSchedulerTest, MultiEnv) { ASSERT_OK(db->Close()); delete db; Close(); + ASSERT_OK(DestroyDB(dbname, options2)); } #endif // !ROCKSDB_LITE diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 8592b8f313c..2a84faf85eb 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -54,10 +54,6 @@ DEFINE_int32(memtable_huge_page_size, 2 * 1024 * 1024, ""); DEFINE_int32(value_size, 40, ""); DEFINE_bool(enable_print, false, "Print options generated to console."); -// Path to the database on file system -const std::string kDbName = - ROCKSDB_NAMESPACE::test::PerThreadDBPath("prefix_test"); - namespace ROCKSDB_NAMESPACE { struct TestKey { @@ -217,27 +213,29 @@ class SamePrefixTransform : public SliceTransform { class PrefixTest : public testing::Test { public: + // Path to the database on file system + std::shared_ptr OpenDb() { DB* db; - options.create_if_missing = true; - options.write_buffer_size = FLAGS_write_buffer_size; - options.max_write_buffer_number = FLAGS_max_write_buffer_number; - options.min_write_buffer_number_to_merge = + options_.create_if_missing = true; + options_.write_buffer_size = FLAGS_write_buffer_size; + options_.max_write_buffer_number = FLAGS_max_write_buffer_number; + options_.min_write_buffer_number_to_merge = FLAGS_min_write_buffer_number_to_merge; - options.memtable_prefix_bloom_size_ratio = + options_.memtable_prefix_bloom_size_ratio = FLAGS_memtable_prefix_bloom_size_ratio; - options.memtable_huge_page_size = FLAGS_memtable_huge_page_size; + options_.memtable_huge_page_size = FLAGS_memtable_huge_page_size; - options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + options_.prefix_extractor.reset(NewFixedPrefixTransform(8)); BlockBasedTableOptions bbto; bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); bbto.whole_key_filtering = false; - options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - options.allow_concurrent_memtable_write = false; + options_.table_factory.reset(NewBlockBasedTableFactory(bbto)); + options_.allow_concurrent_memtable_write = false; - Status s = DB::Open(options, kDbName, &db); + Status s = DB::Open(options_, dbname_, &db); EXPECT_OK(s); return std::shared_ptr(db); } @@ -248,22 +246,22 @@ class PrefixTest : public testing::Test { // skip some options option_config_++; if (option_config_ < kEnd) { - options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + options_.prefix_extractor.reset(NewFixedPrefixTransform(8)); switch (option_config_) { case kHashSkipList: - options.memtable_factory.reset( + options_.memtable_factory.reset( NewHashSkipListRepFactory(bucket_count, FLAGS_skiplist_height)); return true; case kHashLinkList: - options.memtable_factory.reset( + options_.memtable_factory.reset( NewHashLinkListRepFactory(bucket_count)); return true; case kHashLinkListHugePageTlb: - options.memtable_factory.reset( + options_.memtable_factory.reset( NewHashLinkListRepFactory(bucket_count, 2 * 1024 * 1024)); return true; case kHashLinkListTriggerSkipList: - options.memtable_factory.reset( + options_.memtable_factory.reset( NewHashLinkListRepFactory(bucket_count, 0, 3)); return true; default: @@ -274,9 +272,16 @@ class PrefixTest : public testing::Test { } PrefixTest() : option_config_(kBegin) { - options.comparator = new TestKeyComparator(); + options_.comparator = new TestKeyComparator(); + dbname_ = ROCKSDB_NAMESPACE::test::PerThreadDBPath("prefix_test"); + EXPECT_OK(DestroyDB(dbname_, options_)); + } + ~PrefixTest() override { + EXPECT_OK(DestroyDB(dbname_, options_)); + delete options_.comparator; } - ~PrefixTest() override { delete options.comparator; } + + std::string dbname_; protected: enum OptionConfig { @@ -288,10 +293,10 @@ class PrefixTest : public testing::Test { kEnd }; int option_config_; - Options options; + Options options_; }; -TEST(SamePrefixTest, InDomainTest) { +TEST_F(PrefixTest, InDomainTest) { DB* db; Options options; options.create_if_missing = true; @@ -303,8 +308,8 @@ TEST(SamePrefixTest, InDomainTest) { WriteOptions write_options; ReadOptions read_options; { - ASSERT_OK(DestroyDB(kDbName, Options())); - ASSERT_OK(DB::Open(options, kDbName, &db)); + ASSERT_OK(DestroyDB(dbname_, Options())); + ASSERT_OK(DB::Open(options, dbname_, &db)); ASSERT_OK(db->Put(write_options, "HHKB pro2", "Mar 24, 2006")); ASSERT_OK(db->Put(write_options, "HHKB pro2 Type-S", "June 29, 2011")); ASSERT_OK(db->Put(write_options, "Realforce 87u", "idk")); @@ -320,11 +325,11 @@ TEST(SamePrefixTest, InDomainTest) { delete db_iter; delete db; - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); } { - ASSERT_OK(DB::Open(options, kDbName, &db)); + ASSERT_OK(DB::Open(options, dbname_, &db)); ASSERT_OK(db->Put(write_options, "pikachu", "1")); ASSERT_OK(db->Put(write_options, "Meowth", "1")); ASSERT_OK(db->Put(write_options, "Mewtwo", "idk")); @@ -337,7 +342,7 @@ TEST(SamePrefixTest, InDomainTest) { ASSERT_OK(db_iter->status()); delete db_iter; delete db; - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); } } @@ -345,9 +350,9 @@ TEST_F(PrefixTest, TestResult) { for (int num_buckets = 1; num_buckets <= 2; num_buckets++) { FirstOption(); while (NextOptions(num_buckets)) { - std::cout << "*** Mem table: " << options.memtable_factory->Name() + std::cout << "*** Mem table: " << options_.memtable_factory->Name() << " number of buckets: " << num_buckets << std::endl; - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -522,9 +527,9 @@ TEST_F(PrefixTest, PrefixValid) { for (int num_buckets = 1; num_buckets <= 2; num_buckets++) { FirstOption(); while (NextOptions(num_buckets)) { - std::cout << "*** Mem table: " << options.memtable_factory->Name() + std::cout << "*** Mem table: " << options_.memtable_factory->Name() << " number of buckets: " << num_buckets << std::endl; - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -575,9 +580,9 @@ TEST_F(PrefixTest, PrefixValid) { TEST_F(PrefixTest, DynamicPrefixIterator) { while (NextOptions(FLAGS_bucket_count)) { - std::cout << "*** Mem table: " << options.memtable_factory->Name() + std::cout << "*** Mem table: " << options_.memtable_factory->Name() << std::endl; - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -628,7 +633,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { get_perf_context()->Reset(); StopWatchNano timer(SystemClock::Default().get(), true); - auto key_prefix = options.prefix_extractor->Transform(key); + auto key_prefix = options_.prefix_extractor->Transform(key); uint64_t total_keys = 0; for (iter->Seek(key); iter->Valid() && iter->key().starts_with(key_prefix); iter->Next()) { @@ -677,14 +682,14 @@ TEST_F(PrefixTest, DynamicPrefixIterator) { TEST_F(PrefixTest, PrefixSeekModePrev) { // Only for SkipListFactory - options.memtable_factory.reset(new SkipListFactory); - options.merge_operator = MergeOperators::CreatePutOperator(); - options.write_buffer_size = 1024 * 1024; + options_.memtable_factory.reset(new SkipListFactory); + options_.merge_operator = MergeOperators::CreatePutOperator(); + options_.write_buffer_size = 1024 * 1024; Random rnd(1); for (size_t m = 1; m < 100; m++) { std::cout << "[" + std::to_string(m) + "]" + "*** Mem table: " - << options.memtable_factory->Name() << std::endl; - ASSERT_OK(DestroyDB(kDbName, Options())); + << options_.memtable_factory->Name() << std::endl; + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -798,10 +803,10 @@ TEST_F(PrefixTest, PrefixSeekModePrev2) { // after seek(15), iter1 will be at 21 and iter2 will be 33. // Then if call Prev() in prefix mode where SeekForPrev(21) gets called, // iter2 should turn to invalid state because of bloom filter. - options.memtable_factory.reset(new SkipListFactory); - options.write_buffer_size = 1024 * 1024; + options_.memtable_factory.reset(new SkipListFactory); + options_.write_buffer_size = 1024 * 1024; std::string v13("v13"); - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -829,15 +834,15 @@ TEST_F(PrefixTest, PrefixSeekModePrev2) { TEST_F(PrefixTest, PrefixSeekModePrev3) { // Only for SkipListFactory // test SeekToLast() with iterate_upper_bound_ in prefix_seek_mode - options.memtable_factory.reset(new SkipListFactory); - options.write_buffer_size = 1024 * 1024; + options_.memtable_factory.reset(new SkipListFactory); + options_.write_buffer_size = 1024 * 1024; std::string v14("v14"); TestKey upper_bound_key = TestKey(1, 5); std::string s; Slice upper_bound = TestKeyToSlice(s, upper_bound_key); { - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; @@ -859,7 +864,7 @@ TEST_F(PrefixTest, PrefixSeekModePrev3) { ASSERT_EQ(iter->value(), v14); } { - ASSERT_OK(DestroyDB(kDbName, Options())); + ASSERT_OK(DestroyDB(dbname_, Options())); auto db = OpenDb(); WriteOptions write_options; ReadOptions read_options; diff --git a/env/env_test.cc b/env/env_test.cc index f4e9d50b239..d6b9e5a5821 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -2031,7 +2031,9 @@ TEST_P(EnvPosixTestWithParam, ConsistentChildrenAttributes) { ASSERT_OK(env_->GetFileSize(path, &size)); ASSERT_EQ(size, 4096 * i); ASSERT_EQ(size, file_attrs_iter->size_bytes); + ASSERT_OK(env_->DeleteFile(path)); } + ASSERT_OK(env_->DeleteDir(test_base_dir)); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace(); } @@ -2620,6 +2622,7 @@ TEST_F(EnvTest, IsDirectory) { } ASSERT_OK(Env::Default()->IsDirectory(test_file_path, &is_dir)); ASSERT_FALSE(is_dir); + ASSERT_OK(DestroyDir(Env::Default(), test_directory_)); } TEST_F(EnvTest, EnvWriteVerificationTest) { @@ -2642,6 +2645,7 @@ TEST_F(EnvTest, EnvWriteVerificationTest) { v_info.checksum = Slice(checksum); s = file->Append(Slice(test_data), v_info); ASSERT_OK(s); + ASSERT_OK(DestroyDir(Env::Default(), test_directory_)); } class CreateEnvTest : public testing::Test { diff --git a/logging/auto_roll_logger_test.cc b/logging/auto_roll_logger_test.cc index 8e94a78c824..6c6581a491e 100644 --- a/logging/auto_roll_logger_test.cc +++ b/logging/auto_roll_logger_test.cc @@ -50,30 +50,22 @@ void LogMessage(const InfoLogLevel log_level, Logger* logger, class AutoRollLoggerTest : public testing::Test { public: - static void InitTestDb() { - // TODO replace the `system` calls with Env/FileSystem APIs. -#ifdef OS_WIN - // Replace all slashes in the path so windows CompSpec does not - // become confused - std::string testDbDir(kTestDbDir); - std::replace_if( - testDbDir.begin(), testDbDir.end(), [](char ch) { return ch == '/'; }, - '\\'); - std::string deleteDbDirCmd = - "if exist " + testDbDir + " rd /s /q " + testDbDir; - ASSERT_TRUE(system(deleteDbDirCmd.c_str()) == 0); - - std::string testDir(kTestDir); - std::replace_if( - testDir.begin(), testDir.end(), [](char ch) { return ch == '/'; }, - '\\'); - std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir; -#else - std::string deleteCmd = "rm -rf " + kTestDir + " " + kTestDbDir; -#endif - ASSERT_TRUE(system(deleteCmd.c_str()) == 0); - ASSERT_OK(Env::Default()->CreateDir(kTestDir)); - ASSERT_OK(Env::Default()->CreateDir(kTestDbDir)); + AutoRollLoggerTest() { + env_ = Env::Default(); + test_dir_ = test::PerThreadDBPath(env_, "db_log_test"); + log_file_ = test_dir_ + "/LOG"; + test_db_dir_ = test::PerThreadDBPath(env_, "db_log_test_db"); + + RecreateLogDir(); + } + + ~AutoRollLoggerTest() { EXPECT_OK(DestroyDir(env_, test_dir_)); } + + void RecreateLogDir() { + EXPECT_OK(DestroyDir(env_, test_dir_)); + EXPECT_OK(DestroyDir(env_, test_db_dir_)); + EXPECT_OK(env_->CreateDir(test_dir_)); + EXPECT_OK(env_->CreateDir(test_db_dir_)); } void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, @@ -82,11 +74,11 @@ class AutoRollLoggerTest : public testing::Test { const std::shared_ptr& sc, AutoRollLogger* logger, size_t time, const std::string& log_message); - // return list of files under kTestDir that contains "LOG" + // return list of files under test_dir_ that contains "LOG" std::vector GetLogFiles() { std::vector ret; std::vector files; - Status s = default_env->GetChildren(kTestDir, &files); + Status s = env_->GetChildren(test_dir_, &files); // Should call ASSERT_OK() here but it doesn't compile. It's not // worth the time figuring out why. EXPECT_TRUE(s.ok()); @@ -98,10 +90,10 @@ class AutoRollLoggerTest : public testing::Test { return ret; } - // Delete all log files under kTestDir + // Delete all log files under test_dir_ void CleanupLogFiles() { for (const std::string& f : GetLogFiles()) { - ASSERT_OK(default_env->DeleteFile(kTestDir + "/" + f)); + ASSERT_OK(env_->DeleteFile(test_dir_ + "/" + f)); } } @@ -119,21 +111,14 @@ class AutoRollLoggerTest : public testing::Test { } static const std::string kSampleMessage; - static const std::string kTestDir; - static const std::string kTestDbDir; - static const std::string kLogFile; - static Env* default_env; + std::string test_dir_; + std::string test_db_dir_; + std::string log_file_; + Env* env_; }; const std::string AutoRollLoggerTest::kSampleMessage( "this is the message to be written to the log file!!"); -const std::string AutoRollLoggerTest::kTestDir( - test::PerThreadDBPath("db_log_test")); -const std::string AutoRollLoggerTest::kTestDbDir( - test::PerThreadDBPath("db_log_test_db")); -const std::string AutoRollLoggerTest::kLogFile( - test::PerThreadDBPath("db_log_test") + "/LOG"); -Env* AutoRollLoggerTest::default_env = Env::Default(); void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, @@ -172,7 +157,7 @@ void AutoRollLoggerTest::RollLogFileByTimeTest( uint64_t actual_ctime; uint64_t total_log_size; - EXPECT_OK(fs->GetFileSize(kLogFile, IOOptions(), &total_log_size, nullptr)); + EXPECT_OK(fs->GetFileSize(log_file_, IOOptions(), &total_log_size, nullptr)); expected_ctime = logger->TEST_ctime(); logger->SetCallNowMicrosEveryNRecords(0); @@ -204,12 +189,11 @@ void AutoRollLoggerTest::RollLogFileByTimeTest( } TEST_F(AutoRollLoggerTest, RollLogFileBySize) { - InitTestDb(); size_t log_max_size = 1024 * 5; size_t keep_log_file_num = 10; - AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), kTestDir, - "", log_max_size, 0, keep_log_file_num); + AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), + test_dir_, "", log_max_size, 0, keep_log_file_num); RollLogFileBySizeTest(&logger, log_max_size, kSampleMessage + ":RollLogFileBySize"); @@ -223,24 +207,22 @@ TEST_F(AutoRollLoggerTest, RollLogFileByTime) { size_t log_size = 1024 * 5; size_t keep_log_file_num = 10; - InitTestDb(); // -- Test the existence of file during the server restart. - ASSERT_EQ(Status::NotFound(), default_env->FileExists(kLogFile)); - AutoRollLogger logger(default_env->GetFileSystem(), nsc, kTestDir, "", - log_size, time, keep_log_file_num); - ASSERT_OK(default_env->FileExists(kLogFile)); + ASSERT_EQ(Status::NotFound(), env_->FileExists(log_file_)); + AutoRollLogger logger(env_->GetFileSystem(), nsc, test_dir_, "", log_size, + time, keep_log_file_num); + ASSERT_OK(env_->FileExists(log_file_)); - RollLogFileByTimeTest(default_env->GetFileSystem(), nsc, &logger, time, + RollLogFileByTimeTest(env_->GetFileSystem(), nsc, &logger, time, kSampleMessage + ":RollLogFileByTime"); } TEST_F(AutoRollLoggerTest, SetInfoLogLevel) { - InitTestDb(); Options options; options.info_log_level = InfoLogLevel::FATAL_LEVEL; options.max_log_file_size = 1024; std::shared_ptr logger; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); auto* auto_roll_logger = dynamic_cast(logger.get()); ASSERT_NE(nullptr, auto_roll_logger); ASSERT_EQ(InfoLogLevel::FATAL_LEVEL, auto_roll_logger->GetInfoLogLevel()); @@ -256,7 +238,6 @@ TEST_F(AutoRollLoggerTest, SetInfoLogLevel) { TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) { // If only 'log_max_size' options is specified, then every time // when rocksdb is restarted, a new empty log file will be created. - InitTestDb(); // WORKAROUND: // avoid complier's complaint of "comparison between signed // and unsigned integer expressions" because literal 0 is @@ -267,7 +248,7 @@ TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) { AutoRollLogger* logger = new AutoRollLogger(FileSystem::Default(), SystemClock::Default(), - kTestDir, "", log_size, 0, keep_log_file_num); + test_dir_, "", log_size, 0, keep_log_file_num); LogMessage(logger, kSampleMessage.c_str()); ASSERT_GT(logger->GetLogFileSize(), kZero); @@ -275,7 +256,7 @@ TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) { // reopens the log file and an empty log file will be created. logger = new AutoRollLogger(FileSystem::Default(), SystemClock::Default(), - kTestDir, "", log_size, 0, 10); + test_dir_, "", log_size, 0, 10); ASSERT_EQ(logger->GetLogFileSize(), kZero); delete logger; } @@ -284,11 +265,9 @@ TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) { size_t time = 2, log_max_size = 1024 * 5; size_t keep_log_file_num = 10; - InitTestDb(); - auto nsc = std::make_shared(SystemClock::Default(), true); - AutoRollLogger logger(FileSystem::Default(), nsc, kTestDir, "", log_max_size, + AutoRollLogger logger(FileSystem::Default(), nsc, test_dir_, "", log_max_size, time, keep_log_file_num); // Test the ability to roll by size @@ -307,18 +286,17 @@ TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { DBOptions options; auto nsc = std::make_shared(SystemClock::Default(), true); - std::unique_ptr nse(new CompositeEnvWrapper(Env::Default(), nsc)); + std::unique_ptr nse(new CompositeEnvWrapper(env_, nsc)); std::shared_ptr logger; // Normal logger - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); ASSERT_TRUE(dynamic_cast(logger.get())); // Only roll by size - InitTestDb(); options.max_log_file_size = 1024; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); AutoRollLogger* auto_roll_logger = dynamic_cast(logger.get()); ASSERT_TRUE(auto_roll_logger); @@ -327,20 +305,20 @@ TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { // Only roll by Time options.env = nse.get(); - InitTestDb(); + options.max_log_file_size = 0; options.log_file_time_to_roll = 2; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); auto_roll_logger = dynamic_cast(logger.get()); RollLogFileByTimeTest(options.env->GetFileSystem(), nsc, auto_roll_logger, options.log_file_time_to_roll, kSampleMessage + ":CreateLoggerFromOptions - time"); // roll by both Time and size - InitTestDb(); + RecreateLogDir(); options.max_log_file_size = 1024 * 5; options.log_file_time_to_roll = 2; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); auto_roll_logger = dynamic_cast(logger.get()); RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size, kSampleMessage + ":CreateLoggerFromOptions - both"); @@ -351,11 +329,11 @@ TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { // Set keep_log_file_num { const size_t kFileNum = 3; - InitTestDb(); + RecreateLogDir(); options.max_log_file_size = 512; options.log_file_time_to_roll = 2; options.keep_log_file_num = kFileNum; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); auto_roll_logger = dynamic_cast(logger.get()); // Roll the log 4 times, and it will trim to 3 files. @@ -378,12 +356,12 @@ TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { // db_log_dir. { const size_t kFileNum = 3; - InitTestDb(); + RecreateLogDir(); options.max_log_file_size = 512; options.log_file_time_to_roll = 2; options.keep_log_file_num = kFileNum; - options.db_log_dir = kTestDir; - ASSERT_OK(CreateLoggerFromOptions(kTestDbDir, options, &logger)); + options.db_log_dir = test_dir_; + ASSERT_OK(CreateLoggerFromOptions(test_db_dir_, options, &logger)); auto_roll_logger = dynamic_cast(logger.get()); // Roll the log 4 times, and it will trim to 3 files. @@ -411,10 +389,10 @@ TEST_F(AutoRollLoggerTest, AutoDeleting) { for (int attempt = 0; attempt < 2; attempt++) { // In the first attemp, db_log_dir is not set, while in the // second it is set. - std::string dbname = (attempt == 0) ? kTestDir : "/test/dummy/dir"; - std::string db_log_dir = (attempt == 0) ? "" : kTestDir; + std::string dbname = (attempt == 0) ? test_dir_ : "/test/dummy/dir"; + std::string db_log_dir = (attempt == 0) ? "" : test_dir_; - InitTestDb(); + RecreateLogDir(); const size_t kMaxFileSize = 512; { size_t log_num = 8; @@ -454,9 +432,8 @@ TEST_F(AutoRollLoggerTest, LogFlushWhileRolling) { DBOptions options; std::shared_ptr logger; - InitTestDb(); options.max_log_file_size = 1024 * 5; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); AutoRollLogger* auto_roll_logger = dynamic_cast(logger.get()); ASSERT_TRUE(auto_roll_logger); @@ -489,15 +466,13 @@ TEST_F(AutoRollLoggerTest, LogFlushWhileRolling) { #endif // OS_WIN TEST_F(AutoRollLoggerTest, InfoLogLevel) { - InitTestDb(); - size_t log_size = 8192; size_t log_lines = 0; // an extra-scope to force the AutoRollLogger to flush the log file when it // becomes out of scope. { AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), - kTestDir, "", log_size, 0, 10); + test_dir_, "", log_size, 0, 10); for (int log_level = InfoLogLevel::HEADER_LEVEL; log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) { logger.SetInfoLogLevel((InfoLogLevel)log_level); @@ -523,7 +498,7 @@ TEST_F(AutoRollLoggerTest, InfoLogLevel) { log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1; } } - std::ifstream inFile(AutoRollLoggerTest::kLogFile.c_str()); + std::ifstream inFile(log_file_.c_str()); size_t lines = std::count(std::istreambuf_iterator(inFile), std::istreambuf_iterator(), '\n'); ASSERT_EQ(log_lines, lines); @@ -531,12 +506,10 @@ TEST_F(AutoRollLoggerTest, InfoLogLevel) { } TEST_F(AutoRollLoggerTest, Close) { - InitTestDb(); - size_t log_size = 8192; size_t log_lines = 0; - AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), kTestDir, - "", log_size, 0, 10); + AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), + test_dir_, "", log_size, 0, 10); for (int log_level = InfoLogLevel::HEADER_LEVEL; log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) { logger.SetInfoLogLevel((InfoLogLevel)log_level); @@ -563,7 +536,7 @@ TEST_F(AutoRollLoggerTest, Close) { } ASSERT_EQ(logger.Close(), Status::OK()); - std::ifstream inFile(AutoRollLoggerTest::kLogFile.c_str()); + std::ifstream inFile(log_file_.c_str()); size_t lines = std::count(std::istreambuf_iterator(inFile), std::istreambuf_iterator(), '\n'); ASSERT_EQ(log_lines, lines); @@ -572,14 +545,15 @@ TEST_F(AutoRollLoggerTest, Close) { // Test the logger Header function for roll over logs // We expect the new logs creates as roll over to carry the headers specified -static std::vector GetOldFileNames(const std::string& path) { +static std::vector GetOldFileNames(Env* env, + const std::string& path) { std::vector ret; const std::string dirname = path.substr(/*start=*/0, path.find_last_of("/")); const std::string fname = path.substr(path.find_last_of("/") + 1); std::vector children; - EXPECT_OK(Env::Default()->GetChildren(dirname, &children)); + EXPECT_OK(env->GetChildren(dirname, &children)); // We know that the old log files are named [path] // Return all entities that match the pattern @@ -600,10 +574,10 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) { // test_num == 0 -> standard call to Header() // test_num == 1 -> call to Log() with InfoLogLevel::HEADER_LEVEL for (int test_num = 0; test_num < 2; test_num++) { - InitTestDb(); + RecreateLogDir(); AutoRollLogger logger(FileSystem::Default(), SystemClock::Default(), - kTestDir, /*db_log_dir=*/"", LOG_MAX_SIZE, + test_dir_, /*db_log_dir=*/"", LOG_MAX_SIZE, /*log_file_time_to_roll=*/0, /*keep_log_file_num=*/10); @@ -635,7 +609,7 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) { // Flush the log for the latest file LogFlush(&logger); - const auto oldfiles = GetOldFileNames(newfname); + const auto oldfiles = GetOldFileNames(env_, newfname); ASSERT_EQ(oldfiles.size(), (size_t)2); @@ -651,22 +625,14 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) { TEST_F(AutoRollLoggerTest, LogFileExistence) { ROCKSDB_NAMESPACE::DB* db; ROCKSDB_NAMESPACE::Options options; -#ifdef OS_WIN - // Replace all slashes in the path so windows CompSpec does not - // become confused - std::string testDir(kTestDir); - std::replace_if( - testDir.begin(), testDir.end(), [](char ch) { return ch == '/'; }, '\\'); - std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir; -#else - std::string deleteCmd = "rm -rf " + kTestDir; -#endif - ASSERT_EQ(system(deleteCmd.c_str()), 0); + + ASSERT_OK(DestroyDir(env_, test_dir_)); options.max_log_file_size = 100 * 1024 * 1024; options.create_if_missing = true; - ASSERT_OK(ROCKSDB_NAMESPACE::DB::Open(options, kTestDir, &db)); - ASSERT_OK(default_env->FileExists(kLogFile)); + ASSERT_OK(ROCKSDB_NAMESPACE::DB::Open(options, test_dir_, &db)); + ASSERT_OK(env_->FileExists(log_file_)); delete db; + ASSERT_OK(DestroyDB(test_dir_, options)); } TEST_F(AutoRollLoggerTest, FileCreateFailure) { @@ -680,29 +646,27 @@ TEST_F(AutoRollLoggerTest, FileCreateFailure) { } TEST_F(AutoRollLoggerTest, RenameOnlyWhenExists) { - InitTestDb(); - SpecialEnv env(Env::Default()); + SpecialEnv env(env_); Options options; options.env = &env; // Originally no LOG exists. Should not see a rename. { std::shared_ptr logger; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); ASSERT_EQ(0, env.rename_count_); } // Now a LOG exists. Create a new one should see a rename. { std::shared_ptr logger; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); ASSERT_EQ(1, env.rename_count_); } } TEST_F(AutoRollLoggerTest, RenameError) { - InitTestDb(); - SpecialEnv env(Env::Default()); + SpecialEnv env(env_); env.rename_error_ = true; Options options; options.env = &env; @@ -710,14 +674,14 @@ TEST_F(AutoRollLoggerTest, RenameError) { // Originally no LOG exists. Should not be impacted by rename error. { std::shared_ptr logger; - ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_OK(CreateLoggerFromOptions(test_dir_, options, &logger)); ASSERT_TRUE(logger != nullptr); } // Now a LOG exists. Rename error should cause failure. { std::shared_ptr logger; - ASSERT_NOK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_NOK(CreateLoggerFromOptions(test_dir_, options, &logger)); ASSERT_TRUE(logger == nullptr); } } diff --git a/table/cuckoo/cuckoo_table_builder_test.cc b/table/cuckoo/cuckoo_table_builder_test.cc index be1c62117da..a5d972515f7 100644 --- a/table/cuckoo/cuckoo_table_builder_test.cc +++ b/table/cuckoo/cuckoo_table_builder_test.cc @@ -41,6 +41,12 @@ class CuckooBuilderTest : public testing::Test { file_options_ = FileOptions(options); } + ~CuckooBuilderTest() override { + if (!fname.empty()) { + EXPECT_OK(env_->DeleteFile(fname)); + } + } + void CheckFileContents(const std::vector& keys, const std::vector& values, const std::vector& expected_locations, diff --git a/table/cuckoo/cuckoo_table_reader_test.cc b/table/cuckoo/cuckoo_table_reader_test.cc index d3d1490c6ef..b3592f8e61f 100644 --- a/table/cuckoo/cuckoo_table_reader_test.cc +++ b/table/cuckoo/cuckoo_table_reader_test.cc @@ -73,6 +73,12 @@ class CuckooReaderTest : public testing::Test { file_options = FileOptions(options); } + ~CuckooReaderTest() { + if (!fname.empty()) { + env->DeleteFile(fname).PermitUncheckedError(); + } + } + void SetUp(int num) { num_items = num; hash_map.clear(); diff --git a/table/table_test.cc b/table/table_test.cc index d5fff82da4f..782f4d6cfcc 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -570,10 +570,15 @@ class DBConstructor : public Constructor { public: explicit DBConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp) { + dbname_ = test::PerThreadDBPath("table_testdb"); db_ = nullptr; NewDB(); } - ~DBConstructor() override { delete db_; } + ~DBConstructor() override { + delete db_; + EXPECT_OK(DestroyDB(dbname_, Options())); + } + Status FinishImpl(const Options& /*options*/, const ImmutableOptions& /*ioptions*/, const MutableCFOptions& /*moptions*/, @@ -599,18 +604,17 @@ class DBConstructor : public Constructor { DB* db() const override { return db_; } private: + std::string dbname_; void NewDB() { - std::string name = test::PerThreadDBPath("table_testdb"); - Options options; options.comparator = comparator_; - Status status = DestroyDB(name, options); + Status status = DestroyDB(dbname_, options); ASSERT_TRUE(status.ok()) << status.ToString(); options.create_if_missing = true; options.error_if_exists = true; options.write_buffer_size = 10000; // Something small to force merging - status = DB::Open(options, name, &db_); + status = DB::Open(options, dbname_, &db_); ASSERT_TRUE(status.ok()) << status.ToString(); } @@ -4478,6 +4482,7 @@ TEST_F(PrefixTest, PrefixAndWholeKeyTest) { // Trigger compaction. ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr)); delete db; + ASSERT_OK(DestroyDB(kDBPath, options)); // In the second round, turn whole_key_filtering off and expect // rocksdb still works. } diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index c8604bf439b..bc3ec8180e8 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -26,6 +26,8 @@ class ReduceLevelTest : public testing::Test { db_ = nullptr; } + ~ReduceLevelTest() { EXPECT_OK(DestroyDB(dbname_, Options())); } + Status OpenDB(bool create_if_missing, int levels); Status Put(const std::string& k, const std::string& v) { diff --git a/tools/trace_analyzer_test.cc b/tools/trace_analyzer_test.cc index d7f9e4da81f..4e759a24e13 100644 --- a/tools/trace_analyzer_test.cc +++ b/tools/trace_analyzer_test.cc @@ -55,7 +55,7 @@ class TraceAnalyzerTest : public testing::Test { dbname_ = test_path_ + "/db"; } - ~TraceAnalyzerTest() override {} + ~TraceAnalyzerTest() override { EXPECT_OK(DestroyDir(env_, test_path_)); } void GenerateTrace(std::string trace_path) { Options options; diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index d780a1b2b26..1cccfc04787 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -669,6 +669,12 @@ class BackupEngineTest : public testing::Test { backup_chroot_env_->DeleteFile(latest_backup_).PermitUncheckedError(); } + ~BackupEngineTest() { + EXPECT_OK( + DestroyDir(Env::Default(), test::PerThreadDBPath("db_for_backup"))); + EXPECT_OK(DestroyDir(Env::Default(), test::PerThreadDBPath("db_backups"))); + } + void SetEnvsFromFileSystems() { db_chroot_env_.reset( new CompositeEnvWrapper(Env::Default(), db_chroot_fs_)); diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index 3da753d5f3b..270d7ef092f 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -740,6 +740,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { delete snapshotDB; snapshotDB = nullptr; delete txdb; + ASSERT_OK(DestroyDB(dbname, CurrentOptions())); } TEST_F(CheckpointTest, CheckpointInvalidDirectoryName) { diff --git a/utilities/memory/memory_test.cc b/utilities/memory/memory_test.cc index 0b043af0eae..b14eee805f5 100644 --- a/utilities/memory/memory_test.cc +++ b/utilities/memory/memory_test.cc @@ -21,9 +21,23 @@ namespace ROCKSDB_NAMESPACE { class MemoryTest : public testing::Test { public: MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) { - assert(Env::Default()->CreateDirIfMissing(kDbDir).ok()); + EXPECT_OK(Env::Default()->CreateDirIfMissing(kDbDir)); + Options opt; + for (int i = 0; i < kNumDBs; ++i) { + EXPECT_OK(DestroyDB(GetDBName(i), opt)); + } } + ~MemoryTest() { + Options opt; + for (int i = 0; i < kNumDBs; ++i) { + EXPECT_OK(DestroyDB(GetDBName(i), opt)); + } + EXPECT_OK(Env::Default()->DeleteDir(kDbDir)); + } + + static constexpr int kNumDBs = 10; + std::string GetDBName(int id) { return kDbDir + "db_" + std::to_string(id); } void UpdateUsagesHistory(const std::vector& dbs) { @@ -92,7 +106,6 @@ class MemoryTest : public testing::Test { TEST_F(MemoryTest, SharedBlockCacheTotal) { std::vector dbs; std::vector usage_by_type; - const int kNumDBs = 10; const int kKeySize = 100; const int kValueSize = 500; Options opt; @@ -145,7 +158,6 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) { std::vector dbs; std::vector usage_by_type; std::vector> vec_handles; - const int kNumDBs = 10; // These key/value sizes ensure each KV has its own memtable. Note that the // minimum write_buffer_size allowed is 64 KB. const int kKeySize = 100; diff --git a/utilities/merge_operators/string_append/stringappend_test.cc b/utilities/merge_operators/string_append/stringappend_test.cc index 22b6144af65..1f577c546f6 100644 --- a/utilities/merge_operators/string_append/stringappend_test.cc +++ b/utilities/merge_operators/string_append/stringappend_test.cc @@ -29,45 +29,6 @@ namespace ROCKSDB_NAMESPACE { -// Path to the database on file system -const std::string kDbName = test::PerThreadDBPath("stringappend_test"); - -namespace { -// OpenDb opens a (possibly new) rocksdb database with a StringAppendOperator -std::shared_ptr OpenNormalDb(const std::string& delim) { - DB* db; - Options options; - options.create_if_missing = true; - MergeOperator* mergeOperator; - if (delim.size() == 1) { - mergeOperator = new StringAppendOperator(delim[0]); - } else { - mergeOperator = new StringAppendOperator(delim); - } - options.merge_operator.reset(mergeOperator); - EXPECT_OK(DB::Open(options, kDbName, &db)); - return std::shared_ptr(db); -} - -#ifndef ROCKSDB_LITE // TtlDb is not supported in Lite -// Open a TtlDB with a non-associative StringAppendTESTOperator -std::shared_ptr OpenTtlDb(const std::string& delim) { - DBWithTTL* db; - Options options; - options.create_if_missing = true; - MergeOperator* mergeOperator; - if (delim.size() == 1) { - mergeOperator = new StringAppendTESTOperator(delim[0]); - } else { - mergeOperator = new StringAppendTESTOperator(delim); - } - options.merge_operator.reset(mergeOperator); - EXPECT_OK(DBWithTTL::Open(options, kDbName, &db, 123456)); - return std::shared_ptr(db); -} -#endif // !ROCKSDB_LITE -} // namespace - /// StringLists represents a set of string-lists, each with a key-index. /// Supports Append(list, string) and Get(list) class StringLists { @@ -124,35 +85,48 @@ class StringLists { class StringAppendOperatorTest : public testing::Test, public ::testing::WithParamInterface { public: - StringAppendOperatorTest() { + StringAppendOperatorTest() : use_ttl_(false) { + dbname_ = test::PerThreadDBPath("stringappend_test"); EXPECT_OK( - DestroyDB(kDbName, Options())); // Start each test with a fresh DB - } - - void SetUp() override { + DestroyDB(dbname_, Options())); // Start each test with a fresh DB #ifndef ROCKSDB_LITE // TtlDb is not supported in Lite - bool if_use_ttl = GetParam(); - if (if_use_ttl) { + use_ttl_ = GetParam(); +#endif // !ROCKSDB_LITE + if (use_ttl_) { fprintf(stderr, "Running tests with ttl db and generic operator.\n"); - StringAppendOperatorTest::SetOpenDbFunction(&OpenTtlDb); - return; + } else { + fprintf(stderr, "Running tests with regular db and operator.\n"); } -#endif // !ROCKSDB_LITE - fprintf(stderr, "Running tests with regular db and operator.\n"); - StringAppendOperatorTest::SetOpenDbFunction(&OpenNormalDb); } - using OpenFuncPtr = std::shared_ptr (*)(const std::string&); + ~StringAppendOperatorTest() { + EXPECT_OK(DestroyDB(dbname_, Options())); // Clean up after ourselves + } - // Allows user to open databases with different configurations. - // e.g.: Can open a DB or a TtlDB, etc. - static void SetOpenDbFunction(OpenFuncPtr func) { OpenDb = func; } + std::shared_ptr OpenDb(const std::string& delim) { + Options options; + options.create_if_missing = true; + if (delim.size() == 1) { + options.merge_operator.reset(new StringAppendTESTOperator(delim[0])); + } else { + options.merge_operator.reset(new StringAppendTESTOperator(delim)); + } +#ifndef ROCKSDB_LITE // TtlDb is not supported in Lite + if (use_ttl_) { + DBWithTTL* dbttl; + EXPECT_OK(DBWithTTL::Open(options, dbname_, &dbttl, 123456)); + return std::shared_ptr(dbttl); + } +#endif // ROCKSDB_LITE + DB* db; + EXPECT_OK(DB::Open(options, dbname_, &db)); + return std::shared_ptr(db); + } - protected: - static OpenFuncPtr OpenDb; + private: + bool use_ttl_; + std::string dbname_; }; -StringAppendOperatorTest::OpenFuncPtr StringAppendOperatorTest::OpenDb = - nullptr; // THE TEST CASES BEGIN HERE diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 1c3b41ff29d..be8a3ce6d40 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -757,6 +757,7 @@ TEST_F(OptionsUtilTest, WalDirInOptins) { delete db; ASSERT_OK(LoadLatestOptions(dbname_, options.env, &db_opts, &cf_descs)); ASSERT_EQ(db_opts.wal_dir, ""); + ASSERT_OK(DestroyDB(dbname_, options)); } } // namespace ROCKSDB_NAMESPACE diff --git a/utilities/transactions/transaction_test.h b/utilities/transactions/transaction_test.h index 0b86453a409..760c265a889 100644 --- a/utilities/transactions/transaction_test.h +++ b/utilities/transactions/transaction_test.h @@ -61,6 +61,7 @@ class TransactionTestBase : public ::testing::Test { options.write_buffer_size = 4 * 1024; options.unordered_write = write_ordering == kUnorderedWrite; options.level0_file_num_compaction_trigger = 2; + options.info_log_level = INFO_LEVEL; options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); special_env.skip_fsync_ = true; env = new FaultInjectionTestEnv(&special_env); diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 86a9511a44b..d1e34c7e69c 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -288,6 +288,7 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { delete cf_handle; delete db; + ASSERT_OK(DestroyDB(dbname, options)); } TEST(CommitEntry64b, BasicTest) {