From bc6dcc493c977f3b06ad95abf493273a693b0e12 Mon Sep 17 00:00:00 2001 From: Konstantin Knizhnik Date: Tue, 26 Jul 2022 13:40:44 +0300 Subject: [PATCH] Revert "Update last written LSN for gin/gist index metadata (#182)" (#183) This reverts commit 7517d1cec45224841eac327cad7e0ddc81c734ff. Revert "Large last written lsn cache (#177)" This reverts commit 595ac69260719d8d7b43c09ab7dfd8f232542e50. --- contrib/neon/pagestore_smgr.c | 26 ++- src/backend/access/gin/gininsert.c | 3 +- src/backend/access/gist/gistbuild.c | 10 +- src/backend/access/spgist/spginsert.c | 4 +- src/backend/access/transam/xlog.c | 214 ++--------------------- src/backend/commands/dbcommands.c | 5 +- src/backend/replication/walsender.c | 7 - src/backend/storage/lmgr/lwlocknames.txt | 1 - src/backend/utils/misc/guc.c | 10 -- src/include/access/xlog.h | 13 +- 10 files changed, 37 insertions(+), 256 deletions(-) diff --git a/contrib/neon/pagestore_smgr.c b/contrib/neon/pagestore_smgr.c index 1beef4c95ec..5fdfea5e487 100644 --- a/contrib/neon/pagestore_smgr.c +++ b/contrib/neon/pagestore_smgr.c @@ -558,7 +558,7 @@ zenith_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, * Remember the LSN on this page. When we read the page again, we must * read the same or newer version of it. */ - SetLastWrittenLSNForBlock(lsn, reln->smgr_rnode.node.relNode, blocknum); + SetLastWrittenPageLSN(lsn); } @@ -603,7 +603,7 @@ zm_adjust_lsn(XLogRecPtr lsn) * Return LSN for requesting pages and number of blocks from page server */ static XLogRecPtr -zenith_get_request_lsn(bool *latest, Oid rnode, BlockNumber blkno) +zenith_get_request_lsn(bool *latest) { XLogRecPtr lsn; @@ -630,9 +630,9 @@ zenith_get_request_lsn(bool *latest, Oid rnode, BlockNumber blkno) * so our request cannot concern those. */ *latest = true; - lsn = GetLastWrittenLSN(rnode, blkno); + lsn = GetLastWrittenPageLSN(); Assert(lsn != InvalidXLogRecPtr); - elog(DEBUG1, "zenith_get_request_lsn GetLastWrittenLSN lsn %X/%X ", + elog(DEBUG1, "zenith_get_request_lsn GetLastWrittenPageLSN lsn %X/%X ", (uint32) ((lsn) >> 32), (uint32) (lsn)); lsn = zm_adjust_lsn(lsn); @@ -716,7 +716,7 @@ zenith_exists(SMgrRelation reln, ForkNumber forkNum) return false; } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node.relNode, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = zenith_get_request_lsn(&latest); { ZenithExistsRequest request = { .req.tag = T_ZenithExistsRequest, @@ -791,7 +791,7 @@ zenith_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) * * FIXME: This is currently not just an optimization, but required for * correctness. Postgres can call smgrnblocks() on the newly-created - * relation. Currently, we don't call SetLastWrittenLSN() when a new + * relation. Currently, we don't call SetLastWrittenPageLSN() when a new * relation created, so if we didn't remember the size in the relsize * cache, we might call smgrnblocks() on the newly-created relation before * the creation WAL record hass been received by the page server. @@ -904,8 +904,6 @@ zenith_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, if (IS_LOCAL_REL(reln)) mdextend(reln, forkNum, blkno, buffer, skipFsync); #endif - - SetLastWrittenLSNForRelation(lsn, reln->smgr_rnode.node.relNode); } /* @@ -1081,7 +1079,7 @@ zenith_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, elog(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node.relNode, blkno); + request_lsn = zenith_get_request_lsn(&latest); zenith_read_at_lsn(reln->smgr_rnode.node, forkNum, blkno, request_lsn, latest, buffer); #ifdef DEBUG_COMPARE_LOCAL @@ -1286,7 +1284,7 @@ zenith_nblocks(SMgrRelation reln, ForkNumber forknum) return n_blocks; } - request_lsn = zenith_get_request_lsn(&latest, reln->smgr_rnode.node.relNode, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = zenith_get_request_lsn(&latest); { ZenithNblocksRequest request = { .req.tag = T_ZenithNblocksRequest, @@ -1346,7 +1344,7 @@ zenith_dbsize(Oid dbNode) XLogRecPtr request_lsn; bool latest; - request_lsn = zenith_get_request_lsn(&latest, InvalidOid, REL_METADATA_PSEUDO_BLOCKNO); + request_lsn = zenith_get_request_lsn(&latest); { ZenithDbSizeRequest request = { .req.tag = T_ZenithDbSizeRequest, @@ -1433,11 +1431,7 @@ zenith_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) */ XLogFlush(lsn); - /* - * Truncate may affect several chunks of relations. So we should either update last written LSN for all of them, - * either update LSN for "dummy" metadata block. Second approach seems to be more efficient. - */ - SetLastWrittenLSNForRelation(lsn, reln->smgr_rnode.node.relNode); + SetLastWrittenPageLSN(lsn); #ifdef DEBUG_COMPARE_LOCAL if (IS_LOCAL_REL(reln)) diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 75ea7c846a6..dfad28d1f61 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -421,9 +421,8 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) log_newpage_range(index, MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index), true); - SetLastWrittenLSNForBlockRange(XactLastRecEnd, index->rd_smgr->smgr_rnode.node.relNode, 0, RelationGetNumberOfBlocks(index)); - SetLastWrittenLSNForRelation(XactLastRecEnd, index->rd_smgr->smgr_rnode.node.relNode); } + SetLastWrittenPageLSN(XactLastRecEnd); smgr_end_unlogged_build(index->rd_smgr); diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 7dd87e7e1e7..8fb778012d1 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -335,11 +335,9 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) log_newpage_range(index, MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index), true); - SetLastWrittenLSNForBlockRange(XactLastRecEnd, - index->rd_smgr->smgr_rnode.node.relNode, - 0, RelationGetNumberOfBlocks(index)); - SetLastWrittenLSNForRelation(XactLastRecEnd, index->rd_smgr->smgr_rnode.node.relNode); } + SetLastWrittenPageLSN(XactLastRecEnd); + smgr_end_unlogged_build(index->rd_smgr); } @@ -471,9 +469,7 @@ gist_indexsortbuild(GISTBuildState *state) lsn = log_newpage(&state->indexrel->rd_node, MAIN_FORKNUM, GIST_ROOT_BLKNO, pagestate->page, true); - SetLastWrittenLSNForBlock(lsn, state->indexrel->rd_smgr->smgr_rnode.node.relNode, - GIST_ROOT_BLKNO); - SetLastWrittenLSNForRelation(lsn, state->indexrel->rd_smgr->smgr_rnode.node.relNode); + SetLastWrittenPageLSN(lsn); } pfree(pagestate->page); diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index b01479f802a..d85dd54e4df 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -143,10 +143,8 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) log_newpage_range(index, MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index), true); - SetLastWrittenLSNForBlockRange(XactLastRecEnd, index->rd_smgr->smgr_rnode.node.relNode, - 0, RelationGetNumberOfBlocks(index)); - SetLastWrittenLSNForRelation(XactLastRecEnd, index->rd_smgr->smgr_rnode.node.relNode); } + SetLastWrittenPageLSN(XactLastRecEnd); smgr_end_unlogged_build(index->rd_smgr); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 335ac47e04d..88cb16d3ea8 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -112,7 +112,6 @@ int wal_retrieve_retry_interval = 5000; int max_slot_wal_keep_size_mb = -1; bool track_wal_io_timing = false; uint64 predefined_sysidentifier; -int lastWrittenLsnCacheSize; #ifdef WAL_DEBUG bool XLOG_DEBUG = false; @@ -182,28 +181,6 @@ const struct config_enum_entry recovery_target_action_options[] = { {NULL, 0, false} }; - -/* - * We are not taken in account dbnode, spcnode, forknum fields of - * relation tag, because possibility of collision is assumed to be small - * and should not affect performance. And reducing cache key size speed-up - * hash calculation and comparison. - */ -typedef struct LastWrittenLsnCacheKey -{ - Oid relid; - BlockNumber bucket; -} LastWrittenLsnCacheKey; - -typedef struct LastWrittenLsnCacheEntry -{ - LastWrittenLsnCacheKey key; - XLogRecPtr lsn; - /* L2-List for LRU replacement algorithm */ - struct LastWrittenLsnCacheEntry* next; - struct LastWrittenLsnCacheEntry* prev; -} LastWrittenLsnCacheEntry; - /* * Statistics for current checkpoint are collected in this global struct. * Because only the checkpointer or a stand-alone backend can perform @@ -773,17 +750,6 @@ typedef struct XLogCtlData XLogRecPtr lastFpwDisableRecPtr; XLogRecPtr lastWrittenPageLSN; - /* - * Maximal last written LSN for pages not present in lastWrittenLsnCache - */ - XLogRecPtr maxLastWrittenLsn; - - /* - * Double linked list to implement LRU replacement policy for last written LSN cache. - * Access to this list as well as to last written LSN cache is protected by 'LastWrittenLsnLock'. - */ - LastWrittenLsnCacheEntry lastWrittenLsnLRU; - /* neon: copy of startup's RedoStartLSN for walproposer's use */ XLogRecPtr RedoStartLSN; @@ -795,7 +761,6 @@ typedef struct XLogCtlData slock_t info_lck; /* locks shared variables shown above */ } XLogCtlData; - static XLogCtlData *XLogCtl = NULL; /* a private copy of XLogCtl->Insert.WALInsertLocks, for convenience */ @@ -806,19 +771,6 @@ static WALInsertLockPadded *WALInsertLocks = NULL; */ static ControlFileData *ControlFile = NULL; -#define LAST_WRITTEN_LSN_CACHE_BUCKET 1024 /* blocks = 8Mb */ - - -/* - * Cache of last written LSN for each relation chunk (hash bucket). - * Also to provide request LSN for smgrnblocks, smgrexists there is pseudokey=InvalidBlockId which stores LSN of last - * relation metadata update. - * Size of the cache is limited by GUC variable lastWrittenLsnCacheSize ("lsn_cache_size"), - * pages are replaced using LRU algorithm, based on L2-list. - * Access to this cache is protected by 'LastWrittenLsnLock'. - */ -static HTAB *lastWrittenLsnCache; - /* * Calculate the amount of space left on the page after 'endptr'. Beware * multiple evaluation! @@ -5179,8 +5131,11 @@ LocalProcessControlFile(bool reset) ReadControlFile(); } -static Size -XLOGCtlShmemSize(void) +/* + * Initialization of shared memory for XLOG + */ +Size +XLOGShmemSize(void) { Size size; @@ -5220,16 +5175,6 @@ XLOGCtlShmemSize(void) return size; } -/* - * Initialization of shared memory for XLOG - */ -Size -XLOGShmemSize(void) -{ - return XLOGCtlShmemSize() + - hash_estimate_size(lastWrittenLsnCacheSize, sizeof(LastWrittenLsnCacheEntry)); -} - void XLOGShmemInit(void) { @@ -5259,15 +5204,6 @@ XLOGShmemInit(void) XLogCtl = (XLogCtlData *) ShmemInitStruct("XLOG Ctl", XLOGShmemSize(), &foundXLog); - { - static HASHCTL info; - info.keysize = sizeof(LastWrittenLsnCacheKey); - info.entrysize = sizeof(LastWrittenLsnCacheEntry); - lastWrittenLsnCache = ShmemInitHash("last_written_lsn_cache", - lastWrittenLsnCacheSize, lastWrittenLsnCacheSize, - &info, - HASH_ELEM | HASH_BLOBS); - } localControlFile = ControlFile; ControlFile = (ControlFileData *) ShmemInitStruct("Control File", sizeof(ControlFileData), &foundCFile); @@ -8152,8 +8088,7 @@ StartupXLOG(void) XLogCtl->LogwrtRqst.Write = EndOfLog; XLogCtl->LogwrtRqst.Flush = EndOfLog; - XLogCtl->maxLastWrittenLsn = EndOfLog; - XLogCtl->lastWrittenLsnLRU.next = XLogCtl->lastWrittenLsnLRU.prev = &XLogCtl->lastWrittenLsnLRU; + XLogCtl->lastWrittenPageLSN = EndOfLog; LocalSetXLogInsertAllowed(); @@ -8876,144 +8811,29 @@ GetInsertRecPtr(void) } /* - * GetLastWrittenLSN -- Returns maximal LSN of written page. - * It returns an upper bound for the last written LSN of a given page, - * either from a cached last written LSN or a global maximum last written LSN. - * If rnode is InvalidOid then we calculate maximum among all cached LSN and maxLastWrittenLsn. - * If cache is large enough ,iterting through all hash items may be rather expensive. - * But GetLastWrittenLSN(InvalidOid) is used only by zenith_dbsize which is not performance critical. + * GetLastWrittenPageLSN -- Returns maximal LSN of written page */ XLogRecPtr -GetLastWrittenLSN(Oid rnode, BlockNumber blkno) +GetLastWrittenPageLSN(void) { XLogRecPtr lsn; - LastWrittenLsnCacheEntry* entry; - - LWLockAcquire(LastWrittenLsnLock, LW_SHARED); - - /* Maximal last written LSN among all non-cached pages */ - lsn = XLogCtl->maxLastWrittenLsn; - - if (rnode != InvalidOid) - { - LastWrittenLsnCacheKey key; - key.relid = rnode; - key.bucket = blkno / LAST_WRITTEN_LSN_CACHE_BUCKET; - entry = hash_search(lastWrittenLsnCache, &key, HASH_FIND, NULL); - if (entry != NULL) - lsn = entry->lsn; - } - else - { - HASH_SEQ_STATUS seq; - /* Find maximum of all cached LSNs */ - hash_seq_init(&seq, lastWrittenLsnCache); - while ((entry = (LastWrittenLsnCacheEntry *) hash_seq_search(&seq)) != NULL) - { - if (entry->lsn > lsn) - lsn = entry->lsn; - } - } - LWLockRelease(LastWrittenLsnLock); + SpinLockAcquire(&XLogCtl->info_lck); + lsn = XLogCtl->lastWrittenPageLSN; + SpinLockRelease(&XLogCtl->info_lck); return lsn; } /* - * SetLastWrittenLSNForBlockRange -- Set maximal LSN of written page range. - * We maintain cache of last written LSNs with limited size and LRU replacement - * policy. To reduce cache size we store max LSN not for each page, but for - * bucket (1024 blocks). This cache allows to use old LSN when - * requesting pages of unchanged or appended relations. - * - * rnode can be InvalidOid, in this case maxLastWrittenLsn is updated. - * SetLastWrittenLsn with InvalidOid - * is used by createdb and dbase_redo functions. - */ -void -SetLastWrittenLSNForBlockRange(XLogRecPtr lsn, Oid rnode, BlockNumber from, BlockNumber till) -{ - if (lsn == InvalidXLogRecPtr) - return; - - LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE); - if (rnode == InvalidOid) - { - if (lsn > XLogCtl->maxLastWrittenLsn) - XLogCtl->maxLastWrittenLsn = lsn; - } - else - { - LastWrittenLsnCacheEntry* entry; - LastWrittenLsnCacheKey key; - bool found; - BlockNumber bucket; - - key.relid = rnode; - for (bucket = from / LAST_WRITTEN_LSN_CACHE_BUCKET; - bucket <= till / LAST_WRITTEN_LSN_CACHE_BUCKET; - bucket++) - { - key.bucket = bucket; - entry = hash_search(lastWrittenLsnCache, &key, HASH_ENTER, &found); - if (found) - { - if (lsn > entry->lsn) - entry->lsn = lsn; - /* Unlink from LRU list */ - entry->next->prev = entry->prev; - entry->prev->next = entry->next; - } - else - { - entry->lsn = lsn; - if (hash_get_num_entries(lastWrittenLsnCache) > lastWrittenLsnCacheSize) - { - /* Replace least recently used entry */ - LastWrittenLsnCacheEntry* victim = XLogCtl->lastWrittenLsnLRU.prev; - /* Adjust max LSN for not cached relations/chunks if needed */ - if (victim->lsn > XLogCtl->maxLastWrittenLsn) - XLogCtl->maxLastWrittenLsn = victim->lsn; - - victim->next->prev = victim->prev; - victim->prev->next = victim->next; - hash_search(lastWrittenLsnCache, victim, HASH_REMOVE, NULL); - } - } - /* Link to the head of LRU list */ - entry->next = XLogCtl->lastWrittenLsnLRU.next; - entry->prev = &XLogCtl->lastWrittenLsnLRU; - XLogCtl->lastWrittenLsnLRU.next = entry->next->prev = entry; - } - } - LWLockRelease(LastWrittenLsnLock); -} - -/* - * SetLastWrittenLSNForBlock -- Set maximal LSN for block + * SetLastWrittenPageLSN -- Set maximal LSN of written page */ void -SetLastWrittenLSNForBlock(XLogRecPtr lsn, Oid rnode, BlockNumber blkno) +SetLastWrittenPageLSN(XLogRecPtr lsn) { - SetLastWrittenLSNForBlockRange(lsn, rnode, blkno, blkno); -} - -/* - * SetLastWrittenLSNForRelation -- Set maximal LSN for relation metadata - */ -void -SetLastWrittenLSNForRelation(XLogRecPtr lsn, Oid rnode) -{ - SetLastWrittenLSNForBlock(lsn, rnode, REL_METADATA_PSEUDO_BLOCKNO); -} - -/* - * SetLastWrittenLSNForDatabase -- Set maximal LSN for the whole database - */ -void -SetLastWrittenLSNForDatabase(XLogRecPtr lsn) -{ - SetLastWrittenLSNForBlock(lsn, InvalidOid, 0); + SpinLockAcquire(&XLogCtl->info_lck); + if (lsn > XLogCtl->lastWrittenPageLSN) + XLogCtl->lastWrittenPageLSN = lsn; + SpinLockRelease(&XLogCtl->info_lck); } /* diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 0f8293072d0..509e482c355 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -674,7 +674,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) lsn = XLogInsert(RM_DBASE_ID, XLOG_DBASE_CREATE | XLR_SPECIAL_REL_UPDATE); - SetLastWrittenLSNForDatabase(lsn); + SetLastWrittenPageLSN(lsn); } } table_endscan(scan); @@ -2224,7 +2224,8 @@ dbase_redo(XLogReaderState *record) */ { XLogRecPtr lsn = record->EndRecPtr; - SetLastWrittenLSNForDatabase(lsn); + + SetLastWrittenPageLSN(lsn); } } else if (info == XLOG_DBASE_DROP) diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 8f9de58d158..a841beebf3f 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -2058,13 +2058,6 @@ ProcessStandbyReply(XLogRecPtr writePtr, if (!am_cascading_walsender) SyncRepReleaseWaiters(); - /* - * walproposer use trunclateLsn instead of flushPtr for confirmed - * received location, so we shouldn't update restart_lsn here. - */ - if (am_wal_proposer) - return; - /* * walproposer use trunclateLsn instead of flushPtr for confirmed * received location, so we shouldn't update restart_lsn here. diff --git a/src/backend/storage/lmgr/lwlocknames.txt b/src/backend/storage/lmgr/lwlocknames.txt index b4652c33ff6..6c7cf6c2956 100644 --- a/src/backend/storage/lmgr/lwlocknames.txt +++ b/src/backend/storage/lmgr/lwlocknames.txt @@ -53,4 +53,3 @@ XactTruncationLock 44 # 45 was XactTruncationLock until removal of BackendRandomLock WrapLimitsVacuumLock 46 NotifyQueueTailLock 47 -LastWrittenLsnLock 48 diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index e2c18ad85b7..368e91531ed 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2367,16 +2367,6 @@ static struct config_int ConfigureNamesInt[] = NULL, NULL, NULL }, - { - {"lsn_cache_size", PGC_POSTMASTER, UNGROUPED, - gettext_noop("Size of las written LSN cache used by Neon."), - NULL - }, - &lastWrittenLsnCacheSize, - 1024, 10, 1000000, /* 1024 is enough to hold 10GB database with 8Mb bucket */ - NULL, NULL, NULL - }, - { {"temp_buffers", PGC_USERSET, RESOURCES_MEM, gettext_noop("Sets the maximum number of temporary buffers used by each session."), diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 9cc214352f2..66fe9dfcd9e 100644 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -31,11 +31,6 @@ extern int sync_method; extern PGDLLIMPORT TimeLineID ThisTimeLineID; /* current TLI */ -/* - * Pseudo block number used to associate LSN with relation metadata (relation size) - */ -#define REL_METADATA_PSEUDO_BLOCKNO InvalidBlockNumber - /* * Prior to 8.4, all activity during recovery was carried out by the startup * process. This local variable continues to be used in many parts of the @@ -137,7 +132,6 @@ extern char *PrimaryConnInfo; extern char *PrimarySlotName; extern bool wal_receiver_create_temp_slot; extern bool track_wal_io_timing; -extern int lastWrittenLsnCacheSize; /* indirectly set via GUC system */ extern TransactionId recoveryTargetXid; @@ -357,11 +351,8 @@ extern XLogRecPtr GetFlushRecPtr(void); extern XLogRecPtr GetLastImportantRecPtr(void); extern void RemovePromoteSignalFiles(void); -extern void SetLastWrittenLSNForBlock(XLogRecPtr lsn, Oid relfilenode, BlockNumber blkno); -extern void SetLastWrittenLSNForBlockRange(XLogRecPtr lsn, Oid relfilenode, BlockNumber from, BlockNumber till); -extern void SetLastWrittenLSNForDatabase(XLogRecPtr lsn); -extern void SetLastWrittenLSNForRelation(XLogRecPtr lsn, Oid relfilenode); -extern XLogRecPtr GetLastWrittenLSN(Oid relfilenode, BlockNumber blkno); +extern void SetLastWrittenPageLSN(XLogRecPtr lsn); +extern XLogRecPtr GetLastWrittenPageLSN(void); extern XLogRecPtr GetRedoStartLsn(void);