From 0a3c4071145bb050b7e5e8101471a592e5639b0d Mon Sep 17 00:00:00 2001 From: Amul Sul Date: Mon, 27 Jul 2020 02:13:36 -0400 Subject: [PATCH v8 4/5] Error or Assert before START_CRIT_SECTION for WAL write Based on the following criteria added the Assert or the Error when system is prohibited: - Added ERROR for the function which can be reachable without valid XID in case of VACUUM or CONCURRENT CREATE INDEX. For that added common static inline function CheckWALPermitted(). - Added ASSERT for the function which cannot be reached without valid XID; Assert to ensure XID validation. For that added AssertWALPermitted_HaveXID(). To enforce the rule to have aforesaid assert or error check before entering a critical section for WAL write, a new walpermit_checked_state assert only flag is added. If these check is missing then XLogBeginInsert() will have an assertion if it is in critical section. If we are not doing WAL insert inside the critical section then the above checking is not necessary, we can rely on XLogBeginInsert() for that check & report an error. --- contrib/pg_surgery/heap_surgery.c | 10 ++++-- src/backend/access/brin/brin.c | 4 +++ src/backend/access/brin/brin_pageops.c | 21 ++++++++++-- src/backend/access/brin/brin_revmap.c | 10 +++++- src/backend/access/gin/ginbtree.c | 15 +++++++-- src/backend/access/gin/gindatapage.c | 18 +++++++++-- src/backend/access/gin/ginfast.c | 11 +++++-- src/backend/access/gin/gininsert.c | 4 +++ src/backend/access/gin/ginutil.c | 9 +++++- src/backend/access/gin/ginvacuum.c | 11 ++++++- src/backend/access/gist/gist.c | 25 ++++++++++++--- src/backend/access/gist/gistvacuum.c | 13 ++++++-- src/backend/access/hash/hash.c | 19 +++++++++-- src/backend/access/hash/hashinsert.c | 9 +++++- src/backend/access/hash/hashovfl.c | 22 ++++++++++--- src/backend/access/hash/hashpage.c | 9 ++++++ src/backend/access/heap/heapam.c | 26 ++++++++++++++- src/backend/access/heap/pruneheap.c | 12 +++++-- src/backend/access/heap/vacuumlazy.c | 18 +++++++++-- src/backend/access/heap/visibilitymap.c | 22 +++++++++++-- src/backend/access/nbtree/nbtdedup.c | 3 ++ src/backend/access/nbtree/nbtinsert.c | 10 +++++- src/backend/access/nbtree/nbtpage.c | 39 +++++++++++++++++++---- src/backend/access/spgist/spgdoinsert.c | 13 ++++++++ src/backend/access/spgist/spgvacuum.c | 22 +++++++++++-- src/backend/access/transam/multixact.c | 5 ++- src/backend/access/transam/twophase.c | 9 ++++++ src/backend/access/transam/varsup.c | 4 +++ src/backend/access/transam/walprohibit.c | 10 ++++++ src/backend/access/transam/xact.c | 6 ++++ src/backend/access/transam/xlog.c | 27 ++++++++++++---- src/backend/access/transam/xloginsert.c | 13 ++++++-- src/backend/commands/sequence.c | 16 ++++++++++ src/backend/commands/variable.c | 9 ++++-- src/backend/postmaster/checkpointer.c | 4 +++ src/backend/storage/buffer/bufmgr.c | 10 +++--- src/backend/storage/freespace/freespace.c | 10 +++++- src/backend/storage/lmgr/lock.c | 6 ++-- src/backend/utils/cache/relmapper.c | 3 ++ src/include/miscadmin.h | 27 ++++++++++++++++ 40 files changed, 463 insertions(+), 71 deletions(-) diff --git a/contrib/pg_surgery/heap_surgery.c b/contrib/pg_surgery/heap_surgery.c index eb96b4bb36d..53d8c9cea28 100644 --- a/contrib/pg_surgery/heap_surgery.c +++ b/contrib/pg_surgery/heap_surgery.c @@ -14,6 +14,7 @@ #include "access/heapam.h" #include "access/visibilitymap.h" +#include "access/walprohibit.h" #include "catalog/pg_am_d.h" #include "catalog/pg_proc_d.h" #include "miscadmin.h" @@ -89,6 +90,7 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) OffsetNumber curr_start_ptr, next_start_ptr; bool include_this_tid[MaxHeapTuplesPerPage]; + bool needwal; if (RecoveryInProgress()) ereport(ERROR, @@ -100,6 +102,7 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) sanity_check_tid_array(ta, &ntids); rel = relation_open(relid, RowExclusiveLock); + needwal = RelationNeedsWAL(rel); /* Check target relation. */ sanity_check_relation(rel); @@ -217,6 +220,9 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) if (heap_force_opt == HEAP_FORCE_KILL && PageIsAllVisible(page)) visibilitymap_pin(rel, blkno, &vmbuf); + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) from here until all the changes are logged. */ START_CRIT_SECTION(); @@ -297,12 +303,12 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) MarkBufferDirty(buf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) log_newpage_buffer(buf, true); } /* WAL log the VM page if it was modified. */ - if (did_modify_vm && RelationNeedsWAL(rel)) + if (did_modify_vm && needwal) log_newpage_buffer(vmbuf, false); END_CRIT_SECTION(); diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 1f72562c603..47142193706 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -24,6 +24,7 @@ #include "access/relscan.h" #include "access/table.h" #include "access/tableam.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "catalog/index.h" #include "catalog/pg_am.h" @@ -759,6 +760,9 @@ brinbuildempty(Relation index) ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + /* Building indexes will have an XID */ + AssertWALPermittedHaveXID(); + /* Initialize and xlog metabuffer. */ START_CRIT_SECTION(); brin_metapage_init(BufferGetPage(metabuf), BrinGetPagesPerRange(index), diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c index 87de0b855b5..8b377a679ab 100644 --- a/src/backend/access/brin/brin_pageops.c +++ b/src/backend/access/brin/brin_pageops.c @@ -14,6 +14,7 @@ #include "access/brin_pageops.h" #include "access/brin_revmap.h" #include "access/brin_xlog.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -65,6 +66,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, Buffer newbuf; BlockNumber newblk = InvalidBlockNumber; bool extended; + bool needwal = RelationNeedsWAL(idxrel); Assert(newsz == MAXALIGN(newsz)); @@ -176,13 +178,16 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, if (((BrinPageFlags(oldpage) & BRIN_EVACUATE_PAGE) == 0) && brin_can_do_samepage_update(oldbuf, origsz, newsz)) { + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); if (!PageIndexTupleOverwrite(oldpage, oldoff, (Item) unconstify(BrinTuple *, newtup), newsz)) elog(ERROR, "failed to replace BRIN tuple"); MarkBufferDirty(oldbuf); /* XLOG stuff */ - if (RelationNeedsWAL(idxrel)) + if (needwal) { xl_brin_samepage_update xlrec; XLogRecPtr recptr; @@ -240,6 +245,9 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, revmapbuf = brinLockRevmapPageForUpdate(revmap, heapBlk); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* @@ -267,7 +275,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, MarkBufferDirty(revmapbuf); /* XLOG stuff */ - if (RelationNeedsWAL(idxrel)) + if (needwal) { xl_brin_update xlrec; XLogRecPtr recptr; @@ -351,6 +359,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, Buffer revmapbuf; ItemPointerData tid; bool extended; + bool needwal; Assert(itemsz == MAXALIGN(itemsz)); @@ -405,6 +414,10 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, page = BufferGetPage(*buffer); blk = BufferGetBlockNumber(*buffer); + needwal = RelationNeedsWAL(idxrel); + if (needwal) + CheckWALPermitted(); + /* Execute the actual insertion */ START_CRIT_SECTION(); if (extended) @@ -424,7 +437,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, MarkBufferDirty(revmapbuf); /* XLOG stuff */ - if (RelationNeedsWAL(idxrel)) + if (needwal) { xl_brin_insert xlrec; XLogRecPtr recptr; @@ -881,6 +894,8 @@ brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer) "brin_initialize_empty_new_buffer: initializing blank page %u", BufferGetBlockNumber(buffer))); + CheckWALPermitted(); + START_CRIT_SECTION(); page = BufferGetPage(buffer); brin_page_init(page, BRIN_PAGETYPE_REGULAR); diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index 35746714a7c..fd766da445d 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -26,6 +26,7 @@ #include "access/brin_tuple.h" #include "access/brin_xlog.h" #include "access/rmgr.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -340,6 +341,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) OffsetNumber revmapOffset; OffsetNumber regOffset; ItemId lp; + bool needwal; revmap = brinRevmapInitialize(idxrel, &pagesPerRange, NULL); @@ -404,6 +406,10 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) * crashed or aborted summarization; remove them silently. */ + needwal = RelationNeedsWAL(idxrel); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); ItemPointerSetInvalid(&invalidIptr); @@ -415,7 +421,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) MarkBufferDirty(regBuf); MarkBufferDirty(revmapBuf); - if (RelationNeedsWAL(idxrel)) + if (needwal) { xl_brin_desummarize xlrec; XLogRecPtr recptr; @@ -613,6 +619,8 @@ revmap_physical_extend(BrinRevmap *revmap) return; } + AssertWALPermittedHaveXID(); + /* * Ok, we have now locked the metapage and the target block. Re-initialize * the target block as a revmap page, and update the metapage. diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 82788a5c367..f31590dcd75 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -16,6 +16,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "storage/predicate.h" @@ -332,6 +333,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, { Page page = BufferGetPage(stack->buffer); bool result; + bool needwal; GinPlaceToPageRC rc; uint16 xlflags = 0; Page childpage = NULL; @@ -377,6 +379,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, insertdata, updateblkno, &ptp_workspace, &newlpage, &newrpage); + needwal = RelationNeedsWAL(btree->index) && !btree->isBuild; if (rc == GPTP_NO_WORK) { @@ -385,10 +388,13 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } else if (rc == GPTP_INSERT) { + if (needwal) + CheckWALPermitted(); + /* It will fit, perform the insertion */ START_CRIT_SECTION(); - if (RelationNeedsWAL(btree->index) && !btree->isBuild) + if (needwal) { XLogBeginInsert(); XLogRegisterBuffer(0, stack->buffer, REGBUF_STANDARD); @@ -409,7 +415,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, MarkBufferDirty(childbuf); } - if (RelationNeedsWAL(btree->index) && !btree->isBuild) + if (needwal) { XLogRecPtr recptr; ginxlogInsert xlrec; @@ -547,6 +553,9 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } } + if (needwal) + CheckWALPermitted(); + /* * OK, we have the new contents of the left page in a temporary copy * now (newlpage), and likewise for the new contents of the @@ -587,7 +596,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } /* write WAL record */ - if (RelationNeedsWAL(btree->index) && !btree->isBuild) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 7a2690e97f2..0abc5990100 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -16,6 +16,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "lib/ilist.h" #include "miscadmin.h" @@ -811,6 +812,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) if (removedsomething) { bool modified; + bool needwal; /* * Make sure we have a palloc'd copy of all segments, after the first @@ -835,8 +837,12 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) } } - if (RelationNeedsWAL(indexrel)) + needwal = RelationNeedsWAL(indexrel); + if (needwal) + { + CheckWALPermitted(); computeLeafRecompressWALData(leaf); + } /* Apply changes to page */ START_CRIT_SECTION(); @@ -845,7 +851,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) MarkBufferDirty(buffer); - if (RelationNeedsWAL(indexrel)) + if (needwal) { XLogRecPtr recptr; @@ -1777,6 +1783,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, int nrootitems; int rootsize; bool is_build = (buildStats != NULL); + bool needwal; /* Construct the new root page in memory first. */ tmppage = (Page) palloc(BLCKSZ); @@ -1825,12 +1832,17 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, */ PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno); + needwal = RelationNeedsWAL(index) && !is_build; + + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); PageRestoreTempPage(tmppage, page); MarkBufferDirty(buffer); - if (RelationNeedsWAL(index) && !is_build) + if (needwal) { XLogRecPtr recptr; ginxlogCreatePostingTree data; diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 2e41b34d8d5..b8c2a993408 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -20,6 +20,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" +#include "access/walprohibit.h" #include "access/xlog.h" #include "access/xloginsert.h" #include "catalog/pg_am.h" @@ -68,6 +69,8 @@ writeListPage(Relation index, Buffer buffer, PGAlignedBlock workspace; char *ptr; + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); GinInitBuffer(buffer, GIN_LIST); @@ -548,6 +551,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, Page metapage; GinMetaPageData *metadata; BlockNumber blknoToDelete; + bool needwal = RelationNeedsWAL(index); metapage = BufferGetPage(metabuffer); metadata = GinPageGetMeta(metapage); @@ -586,8 +590,11 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, * prepare the XLogInsert machinery for that before entering the * critical section. */ - if (RelationNeedsWAL(index)) + if (needwal) + { + CheckWALPermitted(); XLogEnsureRecordSpace(data.ndeleted, 0); + } START_CRIT_SECTION(); @@ -625,7 +632,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, MarkBufferDirty(buffers[i]); } - if (RelationNeedsWAL(index)) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 77433dc8a41..989d82ffcaf 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -17,6 +17,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" #include "access/tableam.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "catalog/index.h" #include "miscadmin.h" @@ -447,6 +448,9 @@ ginbuildempty(Relation index) ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(RootBuffer, BUFFER_LOCK_EXCLUSIVE); + /* Building indexes will have an XID */ + AssertWALPermittedHaveXID(); + /* Initialize and xlog metabuffer and root buffer. */ START_CRIT_SECTION(); GinInitMetabuffer(MetaBuffer); diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index ef9b56fd363..b48ea1a746a 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -17,6 +17,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" #include "access/reloptions.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" @@ -659,12 +660,18 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build) Buffer metabuffer; Page metapage; GinMetaPageData *metadata; + bool needwal; metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); LockBuffer(metabuffer, GIN_EXCLUSIVE); metapage = BufferGetPage(metabuffer); metadata = GinPageGetMeta(metapage); + needwal = RelationNeedsWAL(index) && !is_build; + + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); metadata->nTotalPages = stats->nTotalPages; @@ -684,7 +691,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build) MarkBufferDirty(metabuffer); - if (RelationNeedsWAL(index) && !is_build) + if (needwal) { XLogRecPtr recptr; ginxlogUpdateMeta data; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 0935a6d9e53..d91ca2b391c 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -16,6 +16,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "commands/vacuum.h" #include "miscadmin.h" @@ -136,6 +137,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn Page page, parentPage; BlockNumber rightlink; + bool needwal; /* * This function MUST be called only if someone of parent pages hold @@ -159,6 +161,10 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn */ PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink); + needwal = RelationNeedsWAL(gvs->index); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* Unlink the page by changing left sibling's rightlink */ @@ -195,7 +201,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn MarkBufferDirty(lBuffer); MarkBufferDirty(dBuffer); - if (RelationNeedsWAL(gvs->index)) + if (needwal) { XLogRecPtr recptr; ginxlogDeletePage data; @@ -650,6 +656,9 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, if (resPage) { + if (RelationNeedsWAL(gvs.index)) + CheckWALPermitted(); + START_CRIT_SECTION(); PageRestoreTempPage(resPage, page); MarkBufferDirty(buffer); diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 25b42e38f22..4a870a062ba 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -16,6 +16,7 @@ #include "access/gist_private.h" #include "access/gistscan.h" +#include "access/walprohibit.h" #include "catalog/pg_collation.h" #include "commands/vacuum.h" #include "miscadmin.h" @@ -135,6 +136,9 @@ gistbuildempty(Relation index) buffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + /* Building indexes will have an XID */ + AssertWALPermittedHaveXID(); + /* Initialize and xlog buffer */ START_CRIT_SECTION(); GISTInitBuffer(buffer, F_LEAF); @@ -234,6 +238,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, XLogRecPtr recptr; int i; bool is_split; + bool needwal = RelationNeedsWAL(rel); /* * Refuse to modify a page that's incompletely split. This should not @@ -465,9 +470,12 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, * insertion for that. NB: The number of pages and data segments * specified here must match the calculations in gistXLogSplit()! */ - if (!is_build && RelationNeedsWAL(rel)) + if (!is_build && needwal) XLogEnsureRecordSpace(npage, 1 + npage * 2); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* @@ -500,7 +508,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, recptr = GistBuildLSN; else { - if (RelationNeedsWAL(rel)) + if (needwal) recptr = gistXLogSplit(is_leaf, dist, oldrlink, oldnsn, leftchildbuf, markfollowright); @@ -526,6 +534,9 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, } else { + if (needwal) + CheckWALPermitted(); + /* * Enough space. We always get here if ntup==0. */ @@ -567,7 +578,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, recptr = GistBuildLSN; else { - if (RelationNeedsWAL(rel)) + if (needwal) { OffsetNumber ndeloffs = 0, deloffs[1]; @@ -1641,6 +1652,7 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) OffsetNumber offnum, maxoff; TransactionId latestRemovedXid = InvalidTransactionId; + bool needwal = RelationNeedsWAL(rel); Assert(GistPageIsLeaf(page)); @@ -1659,13 +1671,16 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) deletable[ndeletable++] = offnum; } - if (XLogStandbyInfoActive() && RelationNeedsWAL(rel)) + if (XLogStandbyInfoActive() && needwal) latestRemovedXid = index_compute_xid_horizon_for_tuples(rel, heapRel, buffer, deletable, ndeletable); if (ndeletable > 0) { + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); PageIndexMultiDelete(page, deletable, ndeletable); @@ -1682,7 +1697,7 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index a9c616c7724..bbb3ebb19ad 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -17,6 +17,7 @@ #include "access/genam.h" #include "access/gist_private.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "commands/vacuum.h" #include "lib/integerset.h" #include "miscadmin.h" @@ -260,6 +261,7 @@ gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) Buffer buffer; Page page; BlockNumber recurse_to; + bool needwal = RelationNeedsWAL(rel); restart: recurse_to = InvalidBlockNumber; @@ -341,6 +343,9 @@ restart: */ if (ntodelete > 0) { + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); MarkBufferDirty(buffer); @@ -348,7 +353,7 @@ restart: PageIndexMultiDelete(page, todelete, ntodelete); GistMarkTuplesDeleted(page); - if (RelationNeedsWAL(rel)) + if (needwal) { XLogRecPtr recptr; @@ -580,6 +585,7 @@ gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexTuple idxtuple; XLogRecPtr recptr; FullTransactionId txid; + bool needwal = RelationNeedsWAL(info->index); /* * Check that the leaf is still empty and deletable. @@ -634,6 +640,9 @@ gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, */ txid = ReadNextFullTransactionId(); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* mark the page as deleted */ @@ -645,7 +654,7 @@ gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, MarkBufferDirty(parentBuffer); PageIndexTupleDelete(parentPage, downlink); - if (RelationNeedsWAL(info->index)) + if (needwal) recptr = gistXLogPageDelete(leafBuffer, txid, parentBuffer, downlink); else recptr = gistGetFakeLSN(info->index); diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 7c9ccf446c8..f4903a43bb5 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -22,6 +22,7 @@ #include "access/hash_xlog.h" #include "access/relscan.h" #include "access/tableam.h" +#include "access/walprohibit.h" #include "catalog/index.h" #include "commands/progress.h" #include "commands/vacuum.h" @@ -467,6 +468,7 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, Buffer metabuf = InvalidBuffer; HashMetaPage metap; HashMetaPage cachedmetap; + bool needwal; tuples_removed = 0; num_index_tuples = 0; @@ -573,6 +575,10 @@ loop_top: goto loop_top; } + needwal = RelationNeedsWAL(rel); + if (needwal) + CheckWALPermitted(); + /* Okay, we're really done. Update tuple count in metapage. */ START_CRIT_SECTION(); @@ -603,7 +609,7 @@ loop_top: MarkBufferDirty(metabuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_hash_update_meta_page xlrec; XLogRecPtr recptr; @@ -690,6 +696,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, Buffer buf; Bucket new_bucket PG_USED_FOR_ASSERTS_ONLY = InvalidBucket; bool bucket_dirty = false; + bool needwal = RelationNeedsWAL(rel); blkno = bucket_blkno; buf = bucket_buf; @@ -788,6 +795,9 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, */ if (ndeletable > 0) { + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -809,7 +819,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, MarkBufferDirty(buf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_hash_delete xlrec; XLogRecPtr recptr; @@ -883,6 +893,9 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, page = BufferGetPage(bucket_buf); bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page); + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -890,7 +903,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, MarkBufferDirty(bucket_buf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 2ebe671967b..2eab69efa91 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -17,6 +17,7 @@ #include "access/hash.h" #include "access/hash_xlog.h" +#include "access/walprohibit.h" #include "miscadmin.h" #include "storage/buf_internals.h" #include "storage/lwlock.h" @@ -193,6 +194,8 @@ restart_insert: */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + AssertWALPermittedHaveXID(); + /* Do the update. No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -360,6 +363,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) if (ndeletable > 0) { TransactionId latestRemovedXid; + bool needwal = RelationNeedsWAL(rel); latestRemovedXid = index_compute_xid_horizon_for_tuples(rel, hrel, buf, @@ -370,6 +374,9 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -393,7 +400,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) MarkBufferDirty(metabuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_hash_vacuum_one_page xlrec; XLogRecPtr recptr; diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 00f0a940116..e7c5dd3e3ce 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -19,6 +19,7 @@ #include "access/hash.h" #include "access/hash_xlog.h" +#include "access/walprohibit.h" #include "miscadmin.h" #include "utils/rel.h" @@ -312,6 +313,8 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin) found: + AssertWALPermittedHaveXID(); + /* * Do the update. No ereport(ERROR) until changes are logged. We want to * log the changes for bitmap page and overflow page together to avoid @@ -510,6 +513,7 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, Buffer prevbuf = InvalidBuffer; Buffer nextbuf = InvalidBuffer; bool update_metap = false; + bool needwal; /* Get information from the doomed page */ _hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE); @@ -573,10 +577,15 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, /* Get write-lock on metapage to update firstfree */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + needwal = RelationNeedsWAL(rel); + /* This operation needs to log multiple tuples, prepare WAL for that */ - if (RelationNeedsWAL(rel)) + if (needwal) XLogEnsureRecordSpace(HASH_XLOG_FREE_OVFL_BUFS, 4 + nitups); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* @@ -641,7 +650,7 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, } /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_hash_squeeze_page xlrec; XLogRecPtr recptr; @@ -922,14 +931,19 @@ readpage: if (nitups > 0) { + bool needwal = RelationNeedsWAL(rel); + Assert(nitups == ndeletable); /* * This operation needs to log multiple tuples, prepare * WAL for that. */ - if (RelationNeedsWAL(rel)) + if (needwal) + { + CheckWALPermitted(); XLogEnsureRecordSpace(0, 3 + nitups); + } START_CRIT_SECTION(); @@ -947,7 +961,7 @@ readpage: MarkBufferDirty(rbuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { XLogRecPtr recptr; xl_hash_move_page_contents xlrec; diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index a664ecf494a..55a867dd375 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -30,6 +30,7 @@ #include "access/hash.h" #include "access/hash_xlog.h" +#include "access/walprohibit.h" #include "miscadmin.h" #include "port/pg_bitutils.h" #include "storage/lmgr.h" @@ -816,6 +817,8 @@ restart_expand: goto fail; } + AssertWALPermittedHaveXID(); + /* * Since we are scribbling on the pages in the shared buffers, establish a * critical section. Any failure in this next code leaves us with a big @@ -1172,6 +1175,8 @@ _hash_splitbucket(Relation rel, if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz)) { + AssertWALPermittedHaveXID(); + /* * Change the shared buffer state in critical section, * otherwise any error could make it unrecoverable. @@ -1223,6 +1228,8 @@ _hash_splitbucket(Relation rel, /* Exit loop if no more overflow pages in old bucket */ if (!BlockNumberIsValid(oblkno)) { + AssertWALPermittedHaveXID(); + /* * Change the shared buffer state in critical section, otherwise * any error could make it unrecoverable. @@ -1269,6 +1276,8 @@ _hash_splitbucket(Relation rel, npage = BufferGetPage(bucket_nbuf); nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 1585861a021..4d6052224fa 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -47,6 +47,7 @@ #include "access/transam.h" #include "access/valid.h" #include "access/visibilitymap.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -1898,6 +1899,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, */ CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber); + AssertWALPermittedHaveXID(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -2172,6 +2175,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, &vmbuffer, NULL); page = BufferGetPage(buffer); + AssertWALPermittedHaveXID(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -2690,6 +2695,8 @@ l1: xid, LockTupleExclusive, true, &new_xmax, &new_infomask, &new_infomask2); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* @@ -3442,6 +3449,8 @@ l2: Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple)); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* Clear obsolete visibility flags ... */ @@ -3615,6 +3624,8 @@ l2: bms_overlap(modified_attrs, id_attrs), &old_key_copied); + AssertWALPermittedHaveXID(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -4548,6 +4559,8 @@ failed: GetCurrentTransactionId(), mode, false, &xid, &new_infomask, &new_infomask2); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* @@ -5339,6 +5352,8 @@ l4: VISIBILITYMAP_ALL_FROZEN)) cleared_all_frozen = true; + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* ... and set them */ @@ -5497,6 +5512,8 @@ heap_finish_speculative(Relation relation, ItemPointer tid) StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber, "invalid speculative token constant"); + AssertWALPermittedHaveXID(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -5605,6 +5622,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid) elog(ERROR, "attempted to kill a non-speculative tuple"); Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data)); + AssertWALPermittedHaveXID(); + /* * No need to check for serializable conflicts here. There is never a * need for a combocid, either. No need to extract replica identity, or @@ -5721,6 +5740,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) HeapTupleHeader htup; uint32 oldlen; uint32 newlen; + bool needwal; /* * For now, we don't allow parallel updates. Unlike a regular update, @@ -5751,6 +5771,10 @@ heap_inplace_update(Relation relation, HeapTuple tuple) if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff) elog(ERROR, "wrong tuple length"); + needwal = RelationNeedsWAL(relation); + if (needwal) + CheckWALPermitted(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -5761,7 +5785,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(relation)) + if (needwal) { xl_heap_inplace xlrec; XLogRecPtr recptr; diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index bc510e2e9b3..9dcae7d2153 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -18,6 +18,7 @@ #include "access/heapam_xlog.h" #include "access/htup_details.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xlog.h" #include "catalog/catalog.h" #include "miscadmin.h" @@ -94,11 +95,11 @@ heap_page_prune_opt(Relation relation, Buffer buffer) Size minfree; /* - * We can't write WAL in recovery mode, so there's no point trying to + * We can't write WAL during read-only mode, so there's no point trying to * clean the page. The primary will likely issue a cleaning WAL record soon * anyway, so this is no particular loss. */ - if (RecoveryInProgress()) + if (!XLogInsertAllowed()) return; /* @@ -232,6 +233,7 @@ heap_page_prune(Relation relation, Buffer buffer, OffsetNumber offnum, maxoff; PruneState prstate; + bool needwal; /* * Our strategy is to scan the page and make lists of items to change, @@ -286,6 +288,10 @@ heap_page_prune(Relation relation, Buffer buffer, if (off_loc) *off_loc = InvalidOffsetNumber; + needwal = RelationNeedsWAL(relation); + if (needwal) + CheckWALPermitted(); + /* Any error while applying the changes is critical */ START_CRIT_SECTION(); @@ -319,7 +325,7 @@ heap_page_prune(Relation relation, Buffer buffer, /* * Emit a WAL XLOG_HEAP2_CLEAN record showing what we did */ - if (RelationNeedsWAL(relation)) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 4f2f38168dc..1869df5f03f 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -59,6 +59,7 @@ #include "access/parallel.h" #include "access/transam.h" #include "access/visibilitymap.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "catalog/storage.h" @@ -759,6 +760,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, BlockNumber nblocks, blkno; HeapTupleData tuple; + bool needwal = RelationNeedsWAL(onerel); TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid; TransactionId relminmxid = onerel->rd_rel->relminmxid; BlockNumber empty_pages, @@ -1201,6 +1203,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, */ if (!PageIsAllVisible(page)) { + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* mark buffer dirty before writing a WAL record */ @@ -1216,7 +1221,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, * page has been previously WAL-logged, and if not, do that * now. */ - if (RelationNeedsWAL(onerel) && + if (needwal && PageGetLSN(page) == InvalidXLogRecPtr) log_newpage_buffer(buf, true); @@ -1482,6 +1487,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, */ if (nfrozen > 0) { + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); MarkBufferDirty(buf); @@ -1499,7 +1507,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, } /* Now WAL-log freezing if necessary */ - if (RelationNeedsWAL(onerel)) + if (needwal) { XLogRecPtr recptr; @@ -1932,6 +1940,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, TransactionId visibility_cutoff_xid; bool all_frozen; LVSavedErrInfo saved_err_info; + bool needwal = RelationNeedsWAL(onerel); pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); @@ -1939,6 +1948,9 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, update_vacuum_error_info(vacrelstats, &saved_err_info, VACUUM_ERRCB_PHASE_VACUUM_HEAP, blkno, InvalidOffsetNumber); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); for (; tupindex < dead_tuples->num_tuples; tupindex++) @@ -1964,7 +1976,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, MarkBufferDirty(buffer); /* XLOG stuff */ - if (RelationNeedsWAL(onerel)) + if (needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index b1072183bcd..44244363968 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -88,6 +88,7 @@ #include "access/heapam_xlog.h" #include "access/visibilitymap.h" +#include "access/walprohibit.h" #include "access/xlog.h" #include "miscadmin.h" #include "port/pg_bitutils.h" @@ -249,6 +250,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); Page page; uint8 *map; + bool needwal = RelationNeedsWAL(rel); #ifdef TRACE_VISIBILITYMAP elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk); @@ -270,6 +272,16 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, map = (uint8 *) PageGetContents(page); LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); + /* + * Can reach here from VACUUM or from startup process, so need not have an + * XID. + * + * Recovery in the startup process never have wal prohibit state, skip + * permission check if reach here in the startup process. + */ + if (needwal) + InRecovery ? AssertWALPermitted() : CheckWALPermitted(); + if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) { START_CRIT_SECTION(); @@ -277,7 +289,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, map[mapByte] |= (flags << mapOffset); MarkBufferDirty(vmBuf); - if (RelationNeedsWAL(rel)) + if (needwal) { if (XLogRecPtrIsInvalid(recptr)) { @@ -476,6 +488,7 @@ visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) Buffer mapBuffer; Page page; char *map; + bool needwal; newnblocks = truncBlock + 1; @@ -489,8 +502,13 @@ visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) page = BufferGetPage(mapBuffer); map = PageGetContents(page); + needwal = (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()); + LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE); + if (needwal) + CheckWALPermitted(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -518,7 +536,7 @@ visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) * during recovery. */ MarkBufferDirty(mapBuffer); - if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()) + if (needwal) log_newpage_buffer(mapBuffer, false); END_CRIT_SECTION(); diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index f6be865b17e..b519a1268e8 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -16,6 +16,7 @@ #include "access/nbtree.h" #include "access/nbtxlog.h" +#include "access/walprohibit.h" #include "miscadmin.h" #include "utils/rel.h" @@ -271,6 +272,8 @@ _bt_dedup_one_page(Relation rel, Buffer buf, Relation heapRel, nopaque->btpo_flags &= ~BTP_HAS_GARBAGE; } + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); PageRestoreTempPage(newpage, page); diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index d36f7557c87..2c3d8aaecbd 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -19,6 +19,7 @@ #include "access/nbtxlog.h" #include "access/tableam.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "storage/lmgr.h" @@ -1246,6 +1247,8 @@ _bt_insertonpg(Relation rel, } } + AssertWALPermittedHaveXID(); + /* Do the update. No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -1898,13 +1901,16 @@ _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf, Buffer cbuf, ropaque->btpo_flags |= BTP_SPLIT_END; } + AssertWALPermittedHaveXID(); + /* * Right sibling is locked, new siblings are prepared, but original page * is not updated yet. * * NO EREPORT(ERROR) till right sibling is updated. We can get away with * not starting the critical section till here because we haven't been - * scribbling on the original page yet; see comments above. + * scribbling on the original page yet; see the comments above for grabbing + * the right sibling. */ START_CRIT_SECTION(); @@ -2467,6 +2473,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) right_item = CopyIndexTuple(item); BTreeTupleSetDownLink(right_item, rbkno); + AssertWALPermittedHaveXID(); + /* NO EREPORT(ERROR) from here till newroot op is logged */ START_CRIT_SECTION(); diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 7f392480ac0..8c3fc251a29 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -26,6 +26,7 @@ #include "access/nbtxlog.h" #include "access/tableam.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xlog.h" #include "access/xloginsert.h" #include "miscadmin.h" @@ -179,6 +180,7 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, BTMetaPageData *metad; bool needsRewrite = false; XLogRecPtr recptr; + bool needwal; /* read the metapage and check if it needs rewrite */ metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); @@ -202,6 +204,10 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, _bt_unlockbuf(rel, metabuf); _bt_lockbuf(rel, metabuf, BT_WRITE); + needwal = RelationNeedsWAL(rel); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* upgrade meta-page if needed */ @@ -214,7 +220,7 @@ _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, MarkBufferDirty(metabuf); /* write wal record if needed */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_btree_metadata md; @@ -332,6 +338,7 @@ _bt_getroot(Relation rel, int access) if (metad->btm_root == P_NONE) { Page metapg; + bool needwal; /* If access = BT_READ, caller doesn't want us to create root yet */ if (access == BT_READ) @@ -377,6 +384,10 @@ _bt_getroot(Relation rel, int access) /* Get raw page pointer for metapage */ metapg = BufferGetPage(metabuf); + needwal = RelationNeedsWAL(rel); + if (needwal) + CheckWALPermitted(); + /* NO ELOG(ERROR) till meta is updated */ START_CRIT_SECTION(); @@ -395,7 +406,7 @@ _bt_getroot(Relation rel, int access) MarkBufferDirty(metabuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_btree_newroot xlrec; XLogRecPtr recptr; @@ -1131,6 +1142,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, char *updatedbuf = NULL; Size updatedbuflen = 0; OffsetNumber updatedoffsets[MaxIndexTuplesPerPage]; + bool needwal = RelationNeedsWAL(rel); /* Shouldn't be called unless there's something to do */ Assert(ndeletable > 0 || nupdatable > 0); @@ -1145,7 +1157,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, } /* XLOG stuff -- allocate and fill buffer before critical section */ - if (nupdatable > 0 && RelationNeedsWAL(rel)) + if (nupdatable > 0 && needwal) { Size offset = 0; @@ -1175,6 +1187,9 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, } } + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -1235,7 +1250,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, MarkBufferDirty(buf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { XLogRecPtr recptr; xl_btree_vacuum xlrec_vacuum; @@ -1302,6 +1317,8 @@ _bt_delitems_delete(Relation rel, Buffer buf, latestRemovedXid = _bt_xid_horizon(rel, heapRel, page, deletable, ndeletable); + AssertWALPermittedHaveXID(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -1832,6 +1849,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) OffsetNumber nextoffset; IndexTuple itup; IndexTupleData trunctuple; + bool needwal; page = BufferGetPage(leafbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -1920,6 +1938,10 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) */ PredicateLockPageCombine(rel, leafblkno, leafrightsib); + needwal = RelationNeedsWAL(rel); + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -1971,7 +1993,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) MarkBufferDirty(leafbuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_btree_mark_page_halfdead xlrec; XLogRecPtr recptr; @@ -2064,6 +2086,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno, int targetlevel; IndexTuple leafhikey; BlockNumber nextchild; + bool needwal; page = BufferGetPage(leafbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -2277,6 +2300,10 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno, * Here we begin doing the deletion. */ + needwal = RelationNeedsWAL(rel); + if (needwal) + CheckWALPermitted(); + /* No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -2356,7 +2383,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno, MarkBufferDirty(leafbuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (needwal) { xl_btree_unlink_page xlrec; xl_btree_metadata xlmeta; diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 934d65b89f2..3c5a15c5d32 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -18,6 +18,7 @@ #include "access/genam.h" #include "access/spgist_private.h" #include "access/spgxlog.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -214,6 +215,8 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, xlrec.offnumParent = InvalidOffsetNumber; xlrec.nodeI = 0; + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); if (current->offnum == InvalidOffsetNumber || @@ -458,6 +461,8 @@ moveLeafs(Relation index, SpGistState *state, leafdata = leafptr = palloc(size); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* copy all the old tuples to new page, unless they're dead */ @@ -1109,6 +1114,8 @@ doPickSplit(Relation index, SpGistState *state, leafdata = leafptr = (char *) palloc(totalLeafSizes); + AssertWALPermittedHaveXID(); + /* Here we begin making the changes to the target pages */ START_CRIT_SECTION(); @@ -1517,6 +1524,8 @@ spgAddNodeAction(Relation index, SpGistState *state, if (PageGetExactFreeSpace(current->page) >= newInnerTuple->size - innerTuple->size) { + AssertWALPermittedHaveXID(); + /* * We can replace the inner tuple by new version in-place */ @@ -1603,6 +1612,8 @@ spgAddNodeAction(Relation index, SpGistState *state, else xlrec.parentBlk = 2; + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* insert new ... */ @@ -1788,6 +1799,8 @@ spgSplitNodeAction(Relation index, SpGistState *state, &xlrec.newPage); } + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); /* diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index e1c58933f97..3308832b85b 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -19,6 +19,7 @@ #include "access/spgist_private.h" #include "access/spgxlog.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xloginsert.h" #include "catalog/storage_xlog.h" #include "commands/vacuum.h" @@ -139,6 +140,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, int nDeletable; OffsetNumber i, max = PageGetMaxOffsetNumber(page); + bool needwal; memset(predecessor, 0, sizeof(predecessor)); memset(deletable, 0, sizeof(deletable)); @@ -323,6 +325,10 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, if (nDeletable != xlrec.nDead + xlrec.nPlaceholder + xlrec.nMove) elog(ERROR, "inconsistent counts of deletable tuples"); + needwal = RelationNeedsWAL(index); + if (needwal) + CheckWALPermitted(); + /* Do the updates */ START_CRIT_SECTION(); @@ -371,7 +377,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, MarkBufferDirty(buffer); - if (RelationNeedsWAL(index)) + if (needwal) { XLogRecPtr recptr; @@ -411,6 +417,7 @@ vacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer) OffsetNumber toDelete[MaxIndexTuplesPerPage]; OffsetNumber i, max = PageGetMaxOffsetNumber(page); + bool needwal; xlrec.nDelete = 0; @@ -447,6 +454,10 @@ vacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer) if (xlrec.nDelete == 0) return; /* nothing more to do */ + needwal = RelationNeedsWAL(index); + if (needwal) + CheckWALPermitted(); + /* Do the update */ START_CRIT_SECTION(); @@ -455,7 +466,7 @@ vacuumLeafRoot(spgBulkDeleteState *bds, Relation index, Buffer buffer) MarkBufferDirty(buffer); - if (RelationNeedsWAL(index)) + if (needwal) { XLogRecPtr recptr; @@ -502,6 +513,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) OffsetNumber itemnos[MaxIndexTuplesPerPage]; spgxlogVacuumRedirect xlrec; GlobalVisState *vistest; + bool needwal; xlrec.nToPlaceholder = 0; xlrec.newestRedirectXid = InvalidTransactionId; @@ -509,6 +521,10 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) /* XXX: providing heap relation would allow more pruning */ vistest = GlobalVisTestFor(NULL); + needwal = RelationNeedsWAL(index); + if (needwal) + CheckWALPermitted(); + START_CRIT_SECTION(); /* @@ -584,7 +600,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) if (hasUpdate) MarkBufferDirty(buffer); - if (hasUpdate && RelationNeedsWAL(index)) + if (hasUpdate && needwal) { XLogRecPtr recptr; diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index b8bedca04a4..0a88740764f 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -73,6 +73,7 @@ #include "access/transam.h" #include "access/twophase.h" #include "access/twophase_rmgr.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -1143,6 +1144,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) ExtendMultiXactMember(nextOffset, nmembers); + CheckWALPermitted(); + /* * Critical section from here until caller has written the data into the * just-reserved SLRU space; we don't want to error out with a partly @@ -2938,7 +2941,7 @@ TruncateMultiXact(MultiXactId newOldestMulti, Oid newOldestMultiDB) mxtruncinfo trunc; MultiXactId earliest; - Assert(!RecoveryInProgress()); + Assert(XLogInsertAllowed()); Assert(MultiXactState->finishedStartup); /* diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index ef4f9981e35..ff2bc8cc74b 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -82,6 +82,7 @@ #include "access/transam.h" #include "access/twophase.h" #include "access/twophase_rmgr.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -1106,6 +1107,8 @@ EndPrepare(GlobalTransaction gxact) */ XLogEnsureRecordSpace(0, records.num_chunks); + AssertWALPermittedHaveXID(); + START_CRIT_SECTION(); MyProc->delayChkpt = true; @@ -2196,6 +2199,9 @@ RecordTransactionCommitPrepared(TransactionId xid, replorigin = (replorigin_session_origin != InvalidRepOriginId && replorigin_session_origin != DoNotReplicateId); + /* COMMIT PREPARED need not have an XID */ + CheckWALPermitted(); + START_CRIT_SECTION(); /* See notes in RecordTransactionCommit */ @@ -2286,6 +2292,9 @@ RecordTransactionAbortPrepared(TransactionId xid, elog(PANIC, "cannot abort transaction %u, it was already committed", xid); + /* ROLLBACK PREPARED need not have an XID */ + CheckWALPermitted(); + START_CRIT_SECTION(); /* diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index a4944faa32e..0c7a2362f25 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -17,6 +17,7 @@ #include "access/commit_ts.h" #include "access/subtrans.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "commands/dbcommands.h" @@ -75,6 +76,9 @@ GetNewTransactionId(bool isSubXact) if (RecoveryInProgress()) elog(ERROR, "cannot assign TransactionIds during recovery"); + /* do not assign transaction id in read-only mode */ + CheckWALPermitted(); + LWLockAcquire(XidGenLock, LW_EXCLUSIVE); full_xid = ShmemVariableCache->nextXid; diff --git a/src/backend/access/transam/walprohibit.c b/src/backend/access/transam/walprohibit.c index 75f3924cc97..f273f75b41f 100644 --- a/src/backend/access/transam/walprohibit.c +++ b/src/backend/access/transam/walprohibit.c @@ -24,6 +24,16 @@ #include "utils/fmgroids.h" #include "utils/fmgrprotos.h" +/* + * Assert flag to enforce WAL insert permission check rule before starting a + * critical section for the WAL writes. For this, either of CheckWALPermitted, + * AssertWALPermittedHaveXID, or AssertWALPermitted must be called before + * starting the critical section. + */ +#ifdef USE_ASSERT_CHECKING +WALPermitCheckState walpermit_checked_state = WALPERMIT_UNCHECKED; +#endif + /* * Shared-memory WAL prohibit state */ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 188c299bed9..abda095e735 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -26,6 +26,7 @@ #include "access/subtrans.h" #include "access/transam.h" #include "access/twophase.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -1317,6 +1318,8 @@ RecordTransactionCommit(void) /* Tell bufmgr and smgr to prepare for commit */ BufmgrCommit(); + AssertWALPermittedHaveXID(); + /* * Mark ourselves as within our "commit critical section". This * forces any concurrent checkpoint to wait until we've updated @@ -1677,6 +1680,9 @@ RecordTransactionAbort(bool isSubXact) elog(PANIC, "cannot abort transaction %u, it was already committed", xid); + /* We'll be reaching here with valid XID only. */ + AssertWALPermittedHaveXID(); + /* Fetch the data we need for the abort record */ nrels = smgrGetPendingDeletes(false, &rels); nchildren = xactGetCommittedChildren(&children); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index bfba75bbe80..253409ca065 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1025,7 +1025,7 @@ XLogInsertRecord(XLogRecData *rdata, /* cross-check on whether we should be here or not */ if (!XLogInsertAllowed()) - elog(ERROR, "cannot make new WAL entries during recovery"); + elog(ERROR, "cannot make new WAL entries now"); /*---------- * @@ -2860,9 +2860,11 @@ XLogFlush(XLogRecPtr record) * trying to flush the WAL, we should update minRecoveryPoint instead. We * test XLogInsertAllowed(), not InRecovery, because we need checkpointer * to act this way too, and because when it tries to write the - * end-of-recovery checkpoint, it should indeed flush. + * end-of-recovery checkpoint, it should indeed flush. Also, WAL prohibit + * state should not restrict WAL flushing. Otherwise, the dirty buffer + * cannot be evicted until WAL has been flushed up to the buffer's LSN. */ - if (!XLogInsertAllowed()) + if (!XLogInsertAllowed() && !IsWALProhibited()) { UpdateMinRecoveryPoint(record, false); return; @@ -8826,6 +8828,8 @@ CreateCheckPoint(int flags) /* sanity check */ if (RecoveryInProgress() && (flags & CHECKPOINT_END_OF_RECOVERY) == 0) elog(ERROR, "can't create a checkpoint during recovery"); + if (!XLogInsertAllowed() && !RecoveryInProgress()) + elog(ERROR, "can't create a checkpoint while system is read only "); /* * Initialize InitXLogInsert working areas before entering the critical @@ -8855,6 +8859,8 @@ CreateCheckPoint(int flags) MemSet(&CheckpointStats, 0, sizeof(CheckpointStats)); CheckpointStats.ckpt_start_t = GetCurrentTimestamp(); + AssertWALPermitted(); + /* * Use a critical section to force system panic if we have trouble. */ @@ -9083,6 +9089,8 @@ CreateCheckPoint(int flags) if (!shutdown && XLogStandbyInfoActive()) LogStandbySnapshot(); + AssertWALPermitted(); + START_CRIT_SECTION(); /* @@ -9240,6 +9248,8 @@ CreateEndOfRecoveryRecord(void) LocalSetXLogInsertAllowed(); + AssertWALPermitted(); + START_CRIT_SECTION(); XLogBeginInsert(); @@ -9886,7 +9896,7 @@ void UpdateFullPageWrites(void) { XLogCtlInsert *Insert = &XLogCtl->Insert; - bool recoveryInProgress; + bool WALInsertAllowed; /* * Do nothing if full_page_writes has not been changed. @@ -9900,10 +9910,10 @@ UpdateFullPageWrites(void) /* * Perform this outside critical section so that the WAL insert - * initialization done by RecoveryInProgress() doesn't trigger an + * initialization done by XLogInsertAllowed() doesn't trigger an * assertion failure. */ - recoveryInProgress = RecoveryInProgress(); + WALInsertAllowed = XLogInsertAllowed(); START_CRIT_SECTION(); @@ -9925,8 +9935,11 @@ UpdateFullPageWrites(void) * Write an XLOG_FPW_CHANGE record. This allows us to keep track of * full_page_writes during archive recovery, if required. */ - if (XLogStandbyInfoActive() && !recoveryInProgress) + if (XLogStandbyInfoActive() && WALInsertAllowed) { + /* Assured that WAL permission has been checked */ + AssertWALPermitted(); + XLogBeginInsert(); XLogRegisterData((char *) (&fullPageWrites), sizeof(bool)); diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 1f0e4e01e69..710806143d4 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -19,6 +19,7 @@ #include "postgres.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xlog_internal.h" @@ -126,9 +127,14 @@ XLogBeginInsert(void) Assert(mainrdata_last == (XLogRecData *) &mainrdata_head); Assert(mainrdata_len == 0); + /* + * WAL permission must have checked before entering the critical section. + * Otherwise, WAL prohibited error will force system panic. + */ + Assert(walpermit_checked_state != WALPERMIT_UNCHECKED || !CritSectionCount); + /* cross-check on whether we should be here or not */ - if (!XLogInsertAllowed()) - elog(ERROR, "cannot make new WAL entries during recovery"); + CheckWALPermitted(); if (begininsert_called) elog(ERROR, "XLogBeginInsert was already called"); @@ -210,6 +216,9 @@ XLogResetInsertion(void) mainrdata_last = (XLogRecData *) &mainrdata_head; curinsert_flags = 0; begininsert_called = false; + + /* Reset walpermit_checked flag */ + RESET_WALPERMIT_CHECKED_STATE(); } /* diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 6aab73bfd44..8dacf48db24 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -20,6 +20,7 @@ #include "access/relation.h" #include "access/table.h" #include "access/transam.h" +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -378,8 +379,13 @@ fill_seq_with_data(Relation rel, HeapTuple tuple) /* check the comment above nextval_internal()'s equivalent call. */ if (RelationNeedsWAL(rel)) + { GetTopTransactionId(); + /* Cannot have valid XID without WAL permission */ + AssertWALPermittedHaveXID(); + } + START_CRIT_SECTION(); MarkBufferDirty(buf); @@ -766,8 +772,13 @@ nextval_internal(Oid relid, bool check_permissions) * (Have to do that here, so we're outside the critical section) */ if (logit && RelationNeedsWAL(seqrel)) + { GetTopTransactionId(); + /* Cannot have valid XID without WAL permission */ + AssertWALPermittedHaveXID(); + } + /* ready to change the on-disk (or really, in-buffer) tuple */ START_CRIT_SECTION(); @@ -977,8 +988,13 @@ do_setval(Oid relid, int64 next, bool iscalled) /* check the comment above nextval_internal()'s equivalent call. */ if (RelationNeedsWAL(seqrel)) + { GetTopTransactionId(); + /* Cannot have valid XID without WAL permission */ + AssertWALPermittedHaveXID(); + } + /* ready to change the on-disk (or really, in-buffer) tuple */ START_CRIT_SECTION(); diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 484f7ea2c0e..acc34d2ad7c 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -501,11 +501,14 @@ check_transaction_read_only(bool *newval, void **extra, GucSource source) GUC_check_errmsg("transaction read-write mode must be set before any query"); return false; } - /* Can't go to r/w mode while recovery is still active */ - if (RecoveryInProgress()) + /* + * Can't go to r/w mode while recovery is still active or while in WAL + * prohibit state + */ + if (!XLogInsertAllowed()) { GUC_check_errcode(ERRCODE_FEATURE_NOT_SUPPORTED); - GUC_check_errmsg("cannot set transaction read-write mode during recovery"); + GUC_check_errmsg("cannot set transaction read-write mode while system is read only"); return false; } } diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index e2ff484d367..a12e026ae02 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -942,6 +942,10 @@ RequestCheckpoint(int flags) int old_failed, old_started; + /* The checkpoint is allowed in recovery but not in WAL prohibit state */ + if (!RecoveryInProgress()) + CheckWALPermitted(); + /* * If in a standalone backend, just do it ourselves. */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index a2a963bd5b4..186cc47be1d 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3638,13 +3638,15 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std) { /* * If we must not write WAL, due to a relfilenode-specific - * condition or being in recovery, don't dirty the page. We can - * set the hint, just not dirty the page as a result so the hint - * is lost when we evict the page or shutdown. + * condition or in general, don't dirty the page. We can + * set the hint, but must not dirty the page as a result, lest + * we trigger WAL generation. Unless the page is dirtied again + * later, the hint will be lost when the page is evicted, or at + * shutdown. * * See src/backend/storage/page/README for longer discussion. */ - if (RecoveryInProgress() || + if (!XLogInsertAllowed() || RelFileNodeSkippingWAL(bufHdr->tag.rnode)) return; diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index 6a96126b0c2..b05b0fe5f41 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -24,6 +24,7 @@ #include "postgres.h" #include "access/htup_details.h" +#include "access/walprohibit.h" #include "access/xlogutils.h" #include "miscadmin.h" #include "storage/freespace.h" @@ -285,12 +286,19 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks) */ if (first_removed_slot > 0) { + bool needwal; + buf = fsm_readbuf(rel, first_removed_address, false); if (!BufferIsValid(buf)) return InvalidBlockNumber; /* nothing to do; the FSM was already * smaller */ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + needwal = (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()); + + if (needwal) + CheckWALPermitted(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -305,7 +313,7 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks) * during recovery. */ MarkBufferDirty(buf); - if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded()) + if (needwal) log_newpage_buffer(buf, false); END_CRIT_SECTION(); diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index d86566f4554..f949a290745 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -794,15 +794,15 @@ LockAcquireExtended(const LOCKTAG *locktag, if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes) elog(ERROR, "unrecognized lock mode: %d", lockmode); - if (RecoveryInProgress() && !InRecovery && + if (!XLogInsertAllowed() && !InRecovery && (locktag->locktag_type == LOCKTAG_OBJECT || locktag->locktag_type == LOCKTAG_RELATION) && lockmode > RowExclusiveLock) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot acquire lock mode %s on database objects while recovery is in progress", + errmsg("cannot acquire lock mode %s on database objects while system is read only", lockMethodTable->lockModeNames[lockmode]), - errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery."))); + errhint("Only RowExclusiveLock or less can be acquired on database objects while system is read only"))); #ifdef LOCK_DEBUG if (LOCK_DEBUG_ENABLED(locktag)) diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 671fbb0ed5c..90d7599a57c 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -43,6 +43,7 @@ #include #include +#include "access/walprohibit.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -841,6 +842,8 @@ write_relmap_file(bool shared, RelMapFile *newmap, xl_relmap_update xlrec; XLogRecPtr lsn; + AssertWALPermittedHaveXID(); + /* now errors are fatal ... */ START_CRIT_SECTION(); diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 72e33523984..f3ff120601e 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -94,12 +94,37 @@ extern PGDLLIMPORT volatile uint32 CritSectionCount; /* in tcop/postgres.c */ extern void ProcessInterrupts(void); +#ifdef USE_ASSERT_CHECKING +typedef enum +{ + WALPERMIT_UNCHECKED, + WALPERMIT_CHECKED, + WALPERMIT_CHECKED_AND_USED +} WALPermitCheckState; + +/* in access/walprohibit.c */ +extern WALPermitCheckState walpermit_checked_state; + +/* + * Reset walpermit_checked flag when no longer in the critical section. + * Otherwise, marked checked and used. + */ +#define RESET_WALPERMIT_CHECKED_STATE() \ +do { \ + walpermit_checked_state = CritSectionCount ? \ + WALPERMIT_CHECKED_AND_USED : WALPERMIT_UNCHECKED; \ +} while(0) +#else +#define RESET_WALPERMIT_CHECKED_STATE() ((void) 0) +#endif + #ifndef WIN32 #define CHECK_FOR_INTERRUPTS() \ do { \ if (unlikely(InterruptPending)) \ ProcessInterrupts(); \ + RESET_WALPERMIT_CHECKED_STATE(); \ } while(0) #else /* WIN32 */ @@ -109,6 +134,7 @@ do { \ pgwin32_dispatch_queued_signals(); \ if (unlikely(InterruptPending)) \ ProcessInterrupts(); \ + RESET_WALPERMIT_CHECKED_STATE(); \ } while(0) #endif /* WIN32 */ @@ -135,6 +161,7 @@ do { \ do { \ Assert(CritSectionCount > 0); \ CritSectionCount--; \ + RESET_WALPERMIT_CHECKED_STATE(); \ } while(0) -- 2.22.0