diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index 0ea11b2..8dd4cd1 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -33,6 +33,7 @@ hash_xlog_init_meta_page(XLogReaderState *record) XLogRecPtr lsn = record->EndRecPtr; Page page; Buffer metabuf; + ForkNumber forknum; xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record); @@ -44,6 +45,15 @@ hash_xlog_init_meta_page(XLogReaderState *record) page = (Page) BufferGetPage(metabuf); PageSetLSN(page, lsn); MarkBufferDirty(metabuf); + + /* + * Force the on-disk state of init forks to always be in sync with the + * state in shared buffers. See XLogReadBufferForRedoExtended. + */ + XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL); + if (forknum == INIT_FORKNUM) + FlushOneBuffer(metabuf); + /* all done */ UnlockReleaseBuffer(metabuf); } @@ -60,6 +70,7 @@ hash_xlog_init_bitmap_page(XLogReaderState *record) Page page; HashMetaPage metap; uint32 num_buckets; + ForkNumber forknum; xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) XLogRecGetData(record); @@ -70,6 +81,14 @@ hash_xlog_init_bitmap_page(XLogReaderState *record) _hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true); PageSetLSN(BufferGetPage(bitmapbuf), lsn); MarkBufferDirty(bitmapbuf); + + /* + * Force the on-disk state of init forks to always be in sync with the + * state in shared buffers. See XLogReadBufferForRedoExtended. + */ + XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL); + if (forknum == INIT_FORKNUM) + FlushOneBuffer(bitmapbuf); UnlockReleaseBuffer(bitmapbuf); /* add the new bitmap page to the metapage's list of bitmaps */ @@ -90,6 +109,10 @@ hash_xlog_init_bitmap_page(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(metabuf); + + XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL); + if (forknum == INIT_FORKNUM) + FlushOneBuffer(metabuf); } if (BufferIsValid(metabuf)) UnlockReleaseBuffer(metabuf); diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 4544889..b4bf8ab 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -345,6 +345,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) int32 ffactor; uint32 num_buckets; uint32 i; + bool use_wal; /* safety check */ if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0) @@ -352,6 +353,14 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) RelationGetRelationName(rel)); /* + * WAL log creation of pages if the relation is persistent, or this is the + * init fork of an unlogged relation. + */ + use_wal = RelationNeedsWAL(rel) || + (rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED && + forkNum == INIT_FORKNUM); + + /* * Determine the target fill factor (in tuples per bucket) for this index. * The idea is to make the fill factor correspond to pages about as full * as the user-settable fillfactor parameter says. We can compute it @@ -384,7 +393,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) metap = HashPageGetMeta(pg); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (use_wal) { xl_hash_init_meta_page xlrec; XLogRecPtr recptr; @@ -427,11 +436,12 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false); MarkBufferDirty(buf); - log_newpage(&rel->rd_node, - forkNum, - blkno, - BufferGetPage(buf), - true); + if (use_wal) + log_newpage(&rel->rd_node, + forkNum, + blkno, + BufferGetPage(buf), + true); _hash_relbuf(rel, buf); } @@ -459,7 +469,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) MarkBufferDirty(metabuf); /* XLOG stuff */ - if (RelationNeedsWAL(rel)) + if (use_wal) { xl_hash_init_bitmap_page xlrec; XLogRecPtr recptr;