From 5c87ea2183ce0f2bfae4e6aa11d2583fabbf3dcd Mon Sep 17 00:00:00 2001 From: Hari Babu Date: Thu, 29 Mar 2018 16:59:20 +1100 Subject: [PATCH 08/16] Remove HeapScanDesc usage outside heap HeapScanDesc is divided into two scan descriptors. TableScanDesc and HeapPageScanDesc. TableScanDesc has common members that are should be available across all the storage routines and HeapPageScanDesc is avaiable only for the table AM routine that supports Heap storage with page format. The HeapPageScanDesc is used internally by the heapam storage routine and also this is exposed to Bitmap Heap and Sample scan's as they depend on the Heap page format. while generating the Bitmap Heap and Sample scan's, the planner now checks whether the storage routine supports returning HeapPageScanDesc or not? Based on this decision, the planner plans above two plans. --- contrib/amcheck/verify_nbtree.c | 2 +- contrib/pgrowlocks/pgrowlocks.c | 4 +- contrib/pgstattuple/pgstattuple.c | 10 +- contrib/tsm_system_rows/tsm_system_rows.c | 18 +- contrib/tsm_system_time/tsm_system_time.c | 8 +- src/backend/access/heap/heapam.c | 424 +++++++++++++++-------------- src/backend/access/heap/heapam_handler.c | 53 ++++ src/backend/access/index/genam.c | 4 +- src/backend/access/nbtree/nbtsort.c | 2 +- src/backend/access/table/tableam.c | 56 +++- src/backend/access/tablesample/system.c | 2 +- src/backend/bootstrap/bootstrap.c | 4 +- src/backend/catalog/aclchk.c | 4 +- src/backend/catalog/index.c | 10 +- src/backend/catalog/pg_conversion.c | 2 +- src/backend/catalog/pg_db_role_setting.c | 2 +- src/backend/catalog/pg_publication.c | 2 +- src/backend/catalog/pg_subscription.c | 2 +- src/backend/commands/cluster.c | 4 +- src/backend/commands/copy.c | 2 +- src/backend/commands/dbcommands.c | 6 +- src/backend/commands/indexcmds.c | 2 +- src/backend/commands/tablecmds.c | 10 +- src/backend/commands/tablespace.c | 10 +- src/backend/commands/typecmds.c | 4 +- src/backend/commands/vacuum.c | 4 +- src/backend/executor/nodeBitmapHeapscan.c | 70 +++-- src/backend/executor/nodeSamplescan.c | 49 ++-- src/backend/executor/nodeSeqscan.c | 5 +- src/backend/optimizer/util/plancat.c | 4 +- src/backend/partitioning/partbounds.c | 2 +- src/backend/postmaster/autovacuum.c | 4 +- src/backend/postmaster/pgstat.c | 2 +- src/backend/replication/logical/launcher.c | 2 +- src/backend/rewrite/rewriteDefine.c | 2 +- src/backend/utils/init/postinit.c | 2 +- src/include/access/heapam.h | 22 +- src/include/access/relscan.h | 47 ++-- src/include/access/tableam.h | 30 +- src/include/access/tableam_common.h | 1 - src/include/access/tableamapi.h | 26 +- src/include/catalog/index.h | 4 +- src/include/nodes/execnodes.h | 4 +- 43 files changed, 523 insertions(+), 404 deletions(-) diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 70dbdcbd30..e30513ddd9 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -478,7 +478,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool readonly, if (state->heapallindexed) { IndexInfo *indexinfo = BuildIndexInfo(state->rel); - HeapScanDesc scan; + TableScanDesc scan; /* Report on extra downlink checks performed in readonly case */ if (state->readonly) diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index 6d47a446ea..cba2e63f13 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -56,7 +56,7 @@ PG_FUNCTION_INFO_V1(pgrowlocks); typedef struct { Relation rel; - HeapScanDesc scan; + TableScanDesc scan; int ncolumns; } MyData; @@ -71,7 +71,7 @@ Datum pgrowlocks(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; TupleDesc tupdesc; AttInMetadata *attinmeta; diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index d9b08796c8..bd022e031a 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -317,7 +317,8 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo) static Datum pgstat_heap(Relation rel, FunctionCallInfo fcinfo) { - HeapScanDesc scan; + TableScanDesc scan; + HeapPageScanDesc pagescan; HeapTuple tuple; BlockNumber nblocks; BlockNumber block = 0; /* next block to count free space in */ @@ -331,7 +332,8 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) scan = table_beginscan_strat(rel, SnapshotAny, 0, NULL, true, false); InitDirtySnapshot(SnapshotDirty); - nblocks = scan->rs_nblocks; /* # blocks to be scanned */ + pagescan = tableam_get_heappagescandesc(scan); + nblocks = pagescan->rs_nblocks; /* # blocks to be scanned */ /* scan the relation */ while ((tuple = table_scan_getnext(scan, ForwardScanDirection)) != NULL) @@ -367,7 +369,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) CHECK_FOR_INTERRUPTS(); buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, - RBM_NORMAL, scan->rs_strategy); + RBM_NORMAL, pagescan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); @@ -380,7 +382,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) CHECK_FOR_INTERRUPTS(); buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, - RBM_NORMAL, scan->rs_strategy); + RBM_NORMAL, pagescan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c index 83f841f0c2..a2a1141d6f 100644 --- a/contrib/tsm_system_rows/tsm_system_rows.c +++ b/contrib/tsm_system_rows/tsm_system_rows.c @@ -71,7 +71,7 @@ static BlockNumber system_rows_nextsampleblock(SampleScanState *node); static OffsetNumber system_rows_nextsampletuple(SampleScanState *node, BlockNumber blockno, OffsetNumber maxoffset); -static bool SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan); +static bool SampleOffsetVisible(OffsetNumber tupoffset, HeapPageScanDesc scan); static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate); @@ -209,7 +209,7 @@ static BlockNumber system_rows_nextsampleblock(SampleScanState *node) { SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; - HeapScanDesc scan = node->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = node->pagescan; /* First call within scan? */ if (sampler->doneblocks == 0) @@ -221,14 +221,14 @@ system_rows_nextsampleblock(SampleScanState *node) SamplerRandomState randstate; /* If relation is empty, there's nothing to scan */ - if (scan->rs_nblocks == 0) + if (pagescan->rs_nblocks == 0) return InvalidBlockNumber; /* We only need an RNG during this setup step */ sampler_random_init_state(sampler->seed, randstate); /* Compute nblocks/firstblock/step only once per query */ - sampler->nblocks = scan->rs_nblocks; + sampler->nblocks = pagescan->rs_nblocks; /* Choose random starting block within the relation */ /* (Actually this is the predecessor of the first block visited) */ @@ -258,7 +258,7 @@ system_rows_nextsampleblock(SampleScanState *node) { /* Advance lb, using uint64 arithmetic to forestall overflow */ sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks; - } while (sampler->lb >= scan->rs_nblocks); + } while (sampler->lb >= pagescan->rs_nblocks); return sampler->lb; } @@ -278,7 +278,7 @@ system_rows_nextsampletuple(SampleScanState *node, OffsetNumber maxoffset) { SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; - HeapScanDesc scan = node->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = node->pagescan; OffsetNumber tupoffset = sampler->lt; /* Quit if we've returned all needed tuples */ @@ -291,7 +291,7 @@ system_rows_nextsampletuple(SampleScanState *node, */ /* We rely on the data accumulated in pagemode access */ - Assert(scan->rs_pageatatime); + Assert(pagescan->rs_pageatatime); for (;;) { /* Advance to next possible offset on page */ @@ -308,7 +308,7 @@ system_rows_nextsampletuple(SampleScanState *node, } /* Found a candidate? */ - if (SampleOffsetVisible(tupoffset, scan)) + if (SampleOffsetVisible(tupoffset, pagescan)) { sampler->donetuples++; break; @@ -327,7 +327,7 @@ system_rows_nextsampletuple(SampleScanState *node, * so just look at the info it left in rs_vistuples[]. */ static bool -SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan) +SampleOffsetVisible(OffsetNumber tupoffset, HeapPageScanDesc scan) { int start = 0, end = scan->rs_ntuples - 1; diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c index f0c220aa4a..f9925bb8b8 100644 --- a/contrib/tsm_system_time/tsm_system_time.c +++ b/contrib/tsm_system_time/tsm_system_time.c @@ -219,7 +219,7 @@ static BlockNumber system_time_nextsampleblock(SampleScanState *node) { SystemTimeSamplerData *sampler = (SystemTimeSamplerData *) node->tsm_state; - HeapScanDesc scan = node->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = node->pagescan; instr_time cur_time; /* First call within scan? */ @@ -232,14 +232,14 @@ system_time_nextsampleblock(SampleScanState *node) SamplerRandomState randstate; /* If relation is empty, there's nothing to scan */ - if (scan->rs_nblocks == 0) + if (pagescan->rs_nblocks == 0) return InvalidBlockNumber; /* We only need an RNG during this setup step */ sampler_random_init_state(sampler->seed, randstate); /* Compute nblocks/firstblock/step only once per query */ - sampler->nblocks = scan->rs_nblocks; + sampler->nblocks = pagescan->rs_nblocks; /* Choose random starting block within the relation */ /* (Actually this is the predecessor of the first block visited) */ @@ -275,7 +275,7 @@ system_time_nextsampleblock(SampleScanState *node) { /* Advance lb, using uint64 arithmetic to forestall overflow */ sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks; - } while (sampler->lb >= scan->rs_nblocks); + } while (sampler->lb >= pagescan->rs_nblocks); return sampler->lb; } diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index c8fee6a4cb..c605d8a2b8 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -224,9 +224,9 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * lock that ensures the interesting tuple(s) won't change.) */ if (scan->rs_parallel != NULL) - scan->rs_nblocks = scan->rs_parallel->phs_nblocks; + scan->rs_pagescan.rs_nblocks = scan->rs_parallel->phs_nblocks; else - scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd); + scan->rs_pagescan.rs_nblocks = RelationGetNumberOfBlocks(scan->rs_scan.rs_rd); /* * If the table is large relative to NBuffers, use a bulk-read access @@ -240,8 +240,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * Note that heap_parallelscan_initialize has a very similar test; if you * change this, consider changing that one, too. */ - if (!RelationUsesLocalBuffers(scan->rs_rd) && - scan->rs_nblocks > NBuffers / 4) + if (!RelationUsesLocalBuffers(scan->rs_scan.rs_rd) && + scan->rs_pagescan.rs_nblocks > NBuffers / 4) { allow_strat = scan->rs_allow_strat; allow_sync = scan->rs_allow_sync; @@ -252,20 +252,20 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) if (allow_strat) { /* During a rescan, keep the previous strategy object. */ - if (scan->rs_strategy == NULL) - scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD); + if (scan->rs_pagescan.rs_strategy == NULL) + scan->rs_pagescan.rs_strategy = GetAccessStrategy(BAS_BULKREAD); } else { - if (scan->rs_strategy != NULL) - FreeAccessStrategy(scan->rs_strategy); - scan->rs_strategy = NULL; + if (scan->rs_pagescan.rs_strategy != NULL) + FreeAccessStrategy(scan->rs_pagescan.rs_strategy); + scan->rs_pagescan.rs_strategy = NULL; } if (scan->rs_parallel != NULL) { /* For parallel scan, believe whatever ParallelHeapScanDesc says. */ - scan->rs_syncscan = scan->rs_parallel->phs_syncscan; + scan->rs_pagescan.rs_syncscan = scan->rs_parallel->phs_syncscan; } else if (keep_startblock) { @@ -274,25 +274,25 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * so that rewinding a cursor doesn't generate surprising results. * Reset the active syncscan setting, though. */ - scan->rs_syncscan = (allow_sync && synchronize_seqscans); + scan->rs_pagescan.rs_syncscan = (allow_sync && synchronize_seqscans); } else if (allow_sync && synchronize_seqscans) { - scan->rs_syncscan = true; - scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks); + scan->rs_pagescan.rs_syncscan = true; + scan->rs_pagescan.rs_startblock = ss_get_location(scan->rs_scan.rs_rd, scan->rs_pagescan.rs_nblocks); } else { - scan->rs_syncscan = false; - scan->rs_startblock = 0; + scan->rs_pagescan.rs_syncscan = false; + scan->rs_pagescan.rs_startblock = 0; } - scan->rs_numblocks = InvalidBlockNumber; - scan->rs_inited = false; + scan->rs_pagescan.rs_numblocks = InvalidBlockNumber; + scan->rs_scan.rs_inited = false; scan->rs_ctup.t_data = NULL; ItemPointerSetInvalid(&scan->rs_ctup.t_self); - scan->rs_cbuf = InvalidBuffer; - scan->rs_cblock = InvalidBlockNumber; + scan->rs_scan.rs_cbuf = InvalidBuffer; + scan->rs_scan.rs_cblock = InvalidBlockNumber; /* page-at-a-time fields are always invalid when not rs_inited */ @@ -300,7 +300,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * copy the scan key, if appropriate */ if (key != NULL) - memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData)); + memcpy(scan->rs_scan.rs_key, key, scan->rs_scan.rs_nkeys * sizeof(ScanKeyData)); /* * Currently, we don't have a stats counter for bitmap heap scans (but the @@ -308,7 +308,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * update stats for tuple fetches there) */ if (!scan->rs_bitmapscan && !scan->rs_samplescan) - pgstat_count_heap_scan(scan->rs_rd); + pgstat_count_heap_scan(scan->rs_scan.rs_rd); } /* @@ -318,16 +318,19 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * numBlks is number of pages to scan (InvalidBlockNumber means "all") */ void -heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks) +heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks) { - Assert(!scan->rs_inited); /* else too late to change */ - Assert(!scan->rs_syncscan); /* else rs_startblock is significant */ + HeapScanDesc scan = (HeapScanDesc) sscan; + + Assert(!scan->rs_scan.rs_inited); /* else too late to change */ + Assert(!scan->rs_pagescan.rs_syncscan); /* else rs_startblock is + * significant */ /* Check startBlk is valid (but allow case of zero blocks...) */ - Assert(startBlk == 0 || startBlk < scan->rs_nblocks); + Assert(startBlk == 0 || startBlk < scan->rs_pagescan.rs_nblocks); - scan->rs_startblock = startBlk; - scan->rs_numblocks = numBlks; + scan->rs_pagescan.rs_startblock = startBlk; + scan->rs_pagescan.rs_numblocks = numBlks; } /* @@ -338,8 +341,9 @@ heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks) * which tuples on the page are visible. */ void -heapgetpage(HeapScanDesc scan, BlockNumber page) +heapgetpage(TableScanDesc sscan, BlockNumber page) { + HeapScanDesc scan = (HeapScanDesc) sscan; Buffer buffer; Snapshot snapshot; Page dp; @@ -349,13 +353,13 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) ItemId lpp; bool all_visible; - Assert(page < scan->rs_nblocks); + Assert(page < scan->rs_pagescan.rs_nblocks); /* release previous scan buffer, if any */ - if (BufferIsValid(scan->rs_cbuf)) + if (BufferIsValid(scan->rs_scan.rs_cbuf)) { - ReleaseBuffer(scan->rs_cbuf); - scan->rs_cbuf = InvalidBuffer; + ReleaseBuffer(scan->rs_scan.rs_cbuf); + scan->rs_scan.rs_cbuf = InvalidBuffer; } /* @@ -366,20 +370,20 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) CHECK_FOR_INTERRUPTS(); /* read page using selected strategy */ - scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page, - RBM_NORMAL, scan->rs_strategy); - scan->rs_cblock = page; + scan->rs_scan.rs_cbuf = ReadBufferExtended(scan->rs_scan.rs_rd, MAIN_FORKNUM, page, + RBM_NORMAL, scan->rs_pagescan.rs_strategy); + scan->rs_scan.rs_cblock = page; - if (!scan->rs_pageatatime) + if (!scan->rs_pagescan.rs_pageatatime) return; - buffer = scan->rs_cbuf; - snapshot = scan->rs_snapshot; + buffer = scan->rs_scan.rs_cbuf; + snapshot = scan->rs_scan.rs_snapshot; /* * Prune and repair fragmentation for the whole page, if possible. */ - heap_page_prune_opt(scan->rs_rd, buffer); + heap_page_prune_opt(scan->rs_scan.rs_rd, buffer); /* * We must hold share lock on the buffer content while examining tuple @@ -389,7 +393,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) LockBuffer(buffer, BUFFER_LOCK_SHARE); dp = BufferGetPage(buffer); - TestForOldSnapshot(snapshot, scan->rs_rd, dp); + TestForOldSnapshot(snapshot, scan->rs_scan.rs_rd, dp); lines = PageGetMaxOffsetNumber(dp); ntup = 0; @@ -424,7 +428,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) HeapTupleData loctup; bool valid; - loctup.t_tableOid = RelationGetRelid(scan->rs_rd); + loctup.t_tableOid = RelationGetRelid(scan->rs_scan.rs_rd); loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp); loctup.t_len = ItemIdGetLength(lpp); ItemPointerSet(&(loctup.t_self), page, lineoff); @@ -432,20 +436,20 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) if (all_visible) valid = true; else - valid = HeapTupleSatisfiesVisibility(scan->rs_rd->rd_tableamroutine, &loctup, snapshot, buffer); + valid = HeapTupleSatisfiesVisibility(scan->rs_scan.rs_rd->rd_tableamroutine, &loctup, snapshot, buffer); - CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup, + CheckForSerializableConflictOut(valid, scan->rs_scan.rs_rd, &loctup, buffer, snapshot); if (valid) - scan->rs_vistuples[ntup++] = lineoff; + scan->rs_pagescan.rs_vistuples[ntup++] = lineoff; } } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); Assert(ntup <= MaxHeapTuplesPerPage); - scan->rs_ntuples = ntup; + scan->rs_pagescan.rs_ntuples = ntup; } /* ---------------- @@ -478,7 +482,7 @@ heapgettup(HeapScanDesc scan, ScanKey key) { HeapTuple tuple = &(scan->rs_ctup); - Snapshot snapshot = scan->rs_snapshot; + Snapshot snapshot = scan->rs_scan.rs_snapshot; bool backward = ScanDirectionIsBackward(dir); BlockNumber page; bool finished; @@ -493,14 +497,14 @@ heapgettup(HeapScanDesc scan, */ if (ScanDirectionIsForward(dir)) { - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) + if (scan->rs_pagescan.rs_nblocks == 0 || scan->rs_pagescan.rs_numblocks == 0) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } @@ -513,29 +517,29 @@ heapgettup(HeapScanDesc scan, /* Other processes might have already finished the scan. */ if (page == InvalidBlockNumber) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } } else - page = scan->rs_startblock; /* first page */ - heapgetpage(scan, page); + page = scan->rs_pagescan.rs_startblock; /* first page */ + heapgetpage((TableScanDesc) scan, page); lineoff = FirstOffsetNumber; /* first offnum */ - scan->rs_inited = true; + scan->rs_scan.rs_inited = true; } else { /* continue from previously returned page/tuple */ - page = scan->rs_cblock; /* current page */ + page = scan->rs_scan.rs_cblock; /* current page */ lineoff = /* next offnum */ OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self))); } - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(snapshot, scan->rs_rd, dp); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(snapshot, scan->rs_scan.rs_rd, dp); lines = PageGetMaxOffsetNumber(dp); /* page and lineoff now reference the physically next tid */ @@ -546,14 +550,14 @@ heapgettup(HeapScanDesc scan, /* backward parallel scan not supported */ Assert(scan->rs_parallel == NULL); - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) + if (scan->rs_pagescan.rs_nblocks == 0 || scan->rs_pagescan.rs_numblocks == 0) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } @@ -564,30 +568,30 @@ heapgettup(HeapScanDesc scan, * time, and much more likely that we'll just bollix things for * forward scanners. */ - scan->rs_syncscan = false; + scan->rs_pagescan.rs_syncscan = false; /* start from last page of the scan */ - if (scan->rs_startblock > 0) - page = scan->rs_startblock - 1; + if (scan->rs_pagescan.rs_startblock > 0) + page = scan->rs_pagescan.rs_startblock - 1; else - page = scan->rs_nblocks - 1; - heapgetpage(scan, page); + page = scan->rs_pagescan.rs_nblocks - 1; + heapgetpage((TableScanDesc) scan, page); } else { /* continue from previously returned page/tuple */ - page = scan->rs_cblock; /* current page */ + page = scan->rs_scan.rs_cblock; /* current page */ } - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(snapshot, scan->rs_rd, dp); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(snapshot, scan->rs_scan.rs_rd, dp); lines = PageGetMaxOffsetNumber(dp); - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { lineoff = lines; /* final offnum */ - scan->rs_inited = true; + scan->rs_scan.rs_inited = true; } else { @@ -603,20 +607,20 @@ heapgettup(HeapScanDesc scan, /* * ``no movement'' scan direction: refetch prior tuple */ - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } page = ItemPointerGetBlockNumber(&(tuple->t_self)); - if (page != scan->rs_cblock) - heapgetpage(scan, page); + if (page != scan->rs_scan.rs_cblock) + heapgetpage((TableScanDesc) scan, page); /* Since the tuple was previously fetched, needn't lock page here */ - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(snapshot, scan->rs_rd, dp); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(snapshot, scan->rs_scan.rs_rd, dp); lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self)); lpp = PageGetItemId(dp, lineoff); Assert(ItemIdIsNormal(lpp)); @@ -647,21 +651,21 @@ heapgettup(HeapScanDesc scan, /* * if current tuple qualifies, return it. */ - valid = HeapTupleSatisfiesVisibility(scan->rs_rd->rd_tableamroutine, + valid = HeapTupleSatisfiesVisibility(scan->rs_scan.rs_rd->rd_tableamroutine, tuple, snapshot, - scan->rs_cbuf); + scan->rs_scan.rs_cbuf); - CheckForSerializableConflictOut(valid, scan->rs_rd, tuple, - scan->rs_cbuf, snapshot); + CheckForSerializableConflictOut(valid, scan->rs_scan.rs_rd, tuple, + scan->rs_scan.rs_cbuf, snapshot); if (valid && key != NULL) - HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd), + HeapKeyTest(tuple, RelationGetDescr(scan->rs_scan.rs_rd), nkeys, key, valid); if (valid) { - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_UNLOCK); return; } } @@ -686,17 +690,17 @@ heapgettup(HeapScanDesc scan, * if we get here, it means we've exhausted the items on this page and * it's time to move to the next. */ - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_UNLOCK); /* * advance to next/prior page and detect end of scan */ if (backward) { - finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); + finished = (page == scan->rs_pagescan.rs_startblock) || + (scan->rs_pagescan.rs_numblocks != InvalidBlockNumber ? --scan->rs_pagescan.rs_numblocks == 0 : false); if (page == 0) - page = scan->rs_nblocks; + page = scan->rs_pagescan.rs_nblocks; page--; } else if (scan->rs_parallel != NULL) @@ -707,10 +711,10 @@ heapgettup(HeapScanDesc scan, else { page++; - if (page >= scan->rs_nblocks) + if (page >= scan->rs_pagescan.rs_nblocks) page = 0; - finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); + finished = (page == scan->rs_pagescan.rs_startblock) || + (scan->rs_pagescan.rs_numblocks != InvalidBlockNumber ? --scan->rs_pagescan.rs_numblocks == 0 : false); /* * Report our new scan position for synchronization purposes. We @@ -724,8 +728,8 @@ heapgettup(HeapScanDesc scan, * a little bit backwards on every invocation, which is confusing. * We don't guarantee any specific ordering in general, though. */ - if (scan->rs_syncscan) - ss_report_location(scan->rs_rd, page); + if (scan->rs_pagescan.rs_syncscan) + ss_report_location(scan->rs_scan.rs_rd, page); } /* @@ -733,21 +737,21 @@ heapgettup(HeapScanDesc scan, */ if (finished) { - if (BufferIsValid(scan->rs_cbuf)) - ReleaseBuffer(scan->rs_cbuf); - scan->rs_cbuf = InvalidBuffer; - scan->rs_cblock = InvalidBlockNumber; + if (BufferIsValid(scan->rs_scan.rs_cbuf)) + ReleaseBuffer(scan->rs_scan.rs_cbuf); + scan->rs_scan.rs_cbuf = InvalidBuffer; + scan->rs_scan.rs_cblock = InvalidBlockNumber; tuple->t_data = NULL; - scan->rs_inited = false; + scan->rs_scan.rs_inited = false; return; } - heapgetpage(scan, page); + heapgetpage((TableScanDesc) scan, page); - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(snapshot, scan->rs_rd, dp); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(snapshot, scan->rs_scan.rs_rd, dp); lines = PageGetMaxOffsetNumber((Page) dp); linesleft = lines; if (backward) @@ -798,14 +802,14 @@ heapgettup_pagemode(HeapScanDesc scan, */ if (ScanDirectionIsForward(dir)) { - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) + if (scan->rs_pagescan.rs_nblocks == 0 || scan->rs_pagescan.rs_numblocks == 0) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } @@ -818,28 +822,28 @@ heapgettup_pagemode(HeapScanDesc scan, /* Other processes might have already finished the scan. */ if (page == InvalidBlockNumber) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } } else - page = scan->rs_startblock; /* first page */ - heapgetpage(scan, page); + page = scan->rs_pagescan.rs_startblock; /* first page */ + heapgetpage((TableScanDesc) scan, page); lineindex = 0; - scan->rs_inited = true; + scan->rs_scan.rs_inited = true; } else { /* continue from previously returned page/tuple */ - page = scan->rs_cblock; /* current page */ - lineindex = scan->rs_cindex + 1; + page = scan->rs_scan.rs_cblock; /* current page */ + lineindex = scan->rs_pagescan.rs_cindex + 1; } - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(scan->rs_snapshot, scan->rs_rd, dp); - lines = scan->rs_ntuples; + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(scan->rs_scan.rs_snapshot, scan->rs_scan.rs_rd, dp); + lines = scan->rs_pagescan.rs_ntuples; /* page and lineindex now reference the next visible tid */ linesleft = lines - lineindex; @@ -849,14 +853,14 @@ heapgettup_pagemode(HeapScanDesc scan, /* backward parallel scan not supported */ Assert(scan->rs_parallel == NULL); - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) + if (scan->rs_pagescan.rs_nblocks == 0 || scan->rs_pagescan.rs_numblocks == 0) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } @@ -867,33 +871,33 @@ heapgettup_pagemode(HeapScanDesc scan, * time, and much more likely that we'll just bollix things for * forward scanners. */ - scan->rs_syncscan = false; + scan->rs_pagescan.rs_syncscan = false; /* start from last page of the scan */ - if (scan->rs_startblock > 0) - page = scan->rs_startblock - 1; + if (scan->rs_pagescan.rs_startblock > 0) + page = scan->rs_pagescan.rs_startblock - 1; else - page = scan->rs_nblocks - 1; - heapgetpage(scan, page); + page = scan->rs_pagescan.rs_nblocks - 1; + heapgetpage((TableScanDesc) scan, page); } else { /* continue from previously returned page/tuple */ - page = scan->rs_cblock; /* current page */ + page = scan->rs_scan.rs_cblock; /* current page */ } - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(scan->rs_snapshot, scan->rs_rd, dp); - lines = scan->rs_ntuples; + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(scan->rs_scan.rs_snapshot, scan->rs_scan.rs_rd, dp); + lines = scan->rs_pagescan.rs_ntuples; - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { lineindex = lines - 1; - scan->rs_inited = true; + scan->rs_scan.rs_inited = true; } else { - lineindex = scan->rs_cindex - 1; + lineindex = scan->rs_pagescan.rs_cindex - 1; } /* page and lineindex now reference the previous visible tid */ @@ -904,20 +908,20 @@ heapgettup_pagemode(HeapScanDesc scan, /* * ``no movement'' scan direction: refetch prior tuple */ - if (!scan->rs_inited) + if (!scan->rs_scan.rs_inited) { - Assert(!BufferIsValid(scan->rs_cbuf)); + Assert(!BufferIsValid(scan->rs_scan.rs_cbuf)); tuple->t_data = NULL; return; } page = ItemPointerGetBlockNumber(&(tuple->t_self)); - if (page != scan->rs_cblock) - heapgetpage(scan, page); + if (page != scan->rs_scan.rs_cblock) + heapgetpage((TableScanDesc) scan, page); /* Since the tuple was previously fetched, needn't lock page here */ - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(scan->rs_snapshot, scan->rs_rd, dp); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(scan->rs_scan.rs_snapshot, scan->rs_scan.rs_rd, dp); lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self)); lpp = PageGetItemId(dp, lineoff); Assert(ItemIdIsNormal(lpp)); @@ -926,8 +930,8 @@ heapgettup_pagemode(HeapScanDesc scan, tuple->t_len = ItemIdGetLength(lpp); /* check that rs_cindex is in sync */ - Assert(scan->rs_cindex < scan->rs_ntuples); - Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]); + Assert(scan->rs_pagescan.rs_cindex < scan->rs_pagescan.rs_ntuples); + Assert(lineoff == scan->rs_pagescan.rs_vistuples[scan->rs_pagescan.rs_cindex]); return; } @@ -940,7 +944,7 @@ heapgettup_pagemode(HeapScanDesc scan, { while (linesleft > 0) { - lineoff = scan->rs_vistuples[lineindex]; + lineoff = scan->rs_pagescan.rs_vistuples[lineindex]; lpp = PageGetItemId(dp, lineoff); Assert(ItemIdIsNormal(lpp)); @@ -951,7 +955,7 @@ heapgettup_pagemode(HeapScanDesc scan, /* * if current tuple qualifies, return it. */ - if (HeapTupleSatisfiesVisibility(scan->rs_rd->rd_tableamroutine, tuple, scan->rs_snapshot, scan->rs_cbuf)) + if (HeapTupleSatisfiesVisibility(scan->rs_scan.rs_rd->rd_tableamroutine, tuple, scan->rs_scan.rs_snapshot, scan->rs_scan.rs_cbuf)) { /* * if current tuple qualifies, return it. @@ -960,19 +964,19 @@ heapgettup_pagemode(HeapScanDesc scan, { bool valid; - HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd), + HeapKeyTest(tuple, RelationGetDescr(scan->rs_scan.rs_rd), nkeys, key, valid); if (valid) { - scan->rs_cindex = lineindex; - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + scan->rs_pagescan.rs_cindex = lineindex; + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_UNLOCK); return; } } else { - scan->rs_cindex = lineindex; - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + scan->rs_pagescan.rs_cindex = lineindex; + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_UNLOCK); return; } } @@ -991,7 +995,7 @@ heapgettup_pagemode(HeapScanDesc scan, * if we get here, it means we've exhausted the items on this page and * it's time to move to the next. */ - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_UNLOCK); /* * if we get here, it means we've exhausted the items on this page and @@ -999,10 +1003,10 @@ heapgettup_pagemode(HeapScanDesc scan, */ if (backward) { - finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); + finished = (page == scan->rs_pagescan.rs_startblock) || + (scan->rs_pagescan.rs_numblocks != InvalidBlockNumber ? --scan->rs_pagescan.rs_numblocks == 0 : false); if (page == 0) - page = scan->rs_nblocks; + page = scan->rs_pagescan.rs_nblocks; page--; } else if (scan->rs_parallel != NULL) @@ -1013,10 +1017,10 @@ heapgettup_pagemode(HeapScanDesc scan, else { page++; - if (page >= scan->rs_nblocks) + if (page >= scan->rs_pagescan.rs_nblocks) page = 0; - finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); + finished = (page == scan->rs_pagescan.rs_startblock) || + (scan->rs_pagescan.rs_numblocks != InvalidBlockNumber ? --scan->rs_pagescan.rs_numblocks == 0 : false); /* * Report our new scan position for synchronization purposes. We @@ -1030,8 +1034,8 @@ heapgettup_pagemode(HeapScanDesc scan, * a little bit backwards on every invocation, which is confusing. * We don't guarantee any specific ordering in general, though. */ - if (scan->rs_syncscan) - ss_report_location(scan->rs_rd, page); + if (scan->rs_pagescan.rs_syncscan) + ss_report_location(scan->rs_scan.rs_rd, page); } /* @@ -1039,21 +1043,21 @@ heapgettup_pagemode(HeapScanDesc scan, */ if (finished) { - if (BufferIsValid(scan->rs_cbuf)) - ReleaseBuffer(scan->rs_cbuf); - scan->rs_cbuf = InvalidBuffer; - scan->rs_cblock = InvalidBlockNumber; + if (BufferIsValid(scan->rs_scan.rs_cbuf)) + ReleaseBuffer(scan->rs_scan.rs_cbuf); + scan->rs_scan.rs_cbuf = InvalidBuffer; + scan->rs_scan.rs_cblock = InvalidBlockNumber; tuple->t_data = NULL; - scan->rs_inited = false; + scan->rs_scan.rs_inited = false; return; } - heapgetpage(scan, page); + heapgetpage((TableScanDesc) scan, page); - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - dp = BufferGetPage(scan->rs_cbuf); - TestForOldSnapshot(scan->rs_snapshot, scan->rs_rd, dp); - lines = scan->rs_ntuples; + LockBuffer(scan->rs_scan.rs_cbuf, BUFFER_LOCK_SHARE); + dp = BufferGetPage(scan->rs_scan.rs_cbuf); + TestForOldSnapshot(scan->rs_scan.rs_snapshot, scan->rs_scan.rs_rd, dp); + lines = scan->rs_pagescan.rs_ntuples; linesleft = lines; if (backward) lineindex = lines - 1; @@ -1383,7 +1387,7 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, return r; } -HeapScanDesc +TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, @@ -1410,12 +1414,12 @@ heap_beginscan(Relation relation, Snapshot snapshot, */ scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData)); - scan->rs_rd = relation; - scan->rs_snapshot = snapshot; - scan->rs_nkeys = nkeys; + scan->rs_scan.rs_rd = relation; + scan->rs_scan.rs_snapshot = snapshot; + scan->rs_scan.rs_nkeys = nkeys; scan->rs_bitmapscan = is_bitmapscan; scan->rs_samplescan = is_samplescan; - scan->rs_strategy = NULL; /* set in initscan */ + scan->rs_pagescan.rs_strategy = NULL; /* set in initscan */ scan->rs_allow_strat = allow_strat; scan->rs_allow_sync = allow_sync; scan->rs_temp_snap = temp_snap; @@ -1424,7 +1428,7 @@ heap_beginscan(Relation relation, Snapshot snapshot, /* * we can use page-at-a-time mode if it's an MVCC-safe snapshot */ - scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot); + scan->rs_pagescan.rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot); /* * For a seqscan in a serializable transaction, acquire a predicate lock @@ -1448,13 +1452,13 @@ heap_beginscan(Relation relation, Snapshot snapshot, * initscan() and we don't want to allocate memory again */ if (nkeys > 0) - scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); + scan->rs_scan.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); else - scan->rs_key = NULL; + scan->rs_scan.rs_key = NULL; initscan(scan, key, false); - return scan; + return (TableScanDesc) scan; } /* ---------------- @@ -1462,21 +1466,23 @@ heap_beginscan(Relation relation, Snapshot snapshot, * ---------------- */ void -heap_rescan(HeapScanDesc scan, ScanKey key, bool set_params, +heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode) { + HeapScanDesc scan = (HeapScanDesc) sscan; + if (set_params) { scan->rs_allow_strat = allow_strat; scan->rs_allow_sync = allow_sync; - scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot); + scan->rs_pagescan.rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_scan.rs_snapshot); } /* * unpin scan buffers */ - if (BufferIsValid(scan->rs_cbuf)) - ReleaseBuffer(scan->rs_cbuf); + if (BufferIsValid(scan->rs_scan.rs_cbuf)) + ReleaseBuffer(scan->rs_scan.rs_cbuf); /* * reinitialize scan descriptor @@ -1507,29 +1513,31 @@ heap_rescan(HeapScanDesc scan, ScanKey key, bool set_params, * ---------------- */ void -heap_endscan(HeapScanDesc scan) +heap_endscan(TableScanDesc sscan) { + HeapScanDesc scan = (HeapScanDesc) sscan; + /* Note: no locking manipulations needed */ /* * unpin scan buffers */ - if (BufferIsValid(scan->rs_cbuf)) - ReleaseBuffer(scan->rs_cbuf); + if (BufferIsValid(scan->rs_scan.rs_cbuf)) + ReleaseBuffer(scan->rs_scan.rs_cbuf); /* * decrement relation reference count and free scan descriptor storage */ - RelationDecrementReferenceCount(scan->rs_rd); + RelationDecrementReferenceCount(scan->rs_scan.rs_rd); - if (scan->rs_key) - pfree(scan->rs_key); + if (scan->rs_scan.rs_key) + pfree(scan->rs_scan.rs_key); - if (scan->rs_strategy != NULL) - FreeAccessStrategy(scan->rs_strategy); + if (scan->rs_pagescan.rs_strategy != NULL) + FreeAccessStrategy(scan->rs_pagescan.rs_strategy); if (scan->rs_temp_snap) - UnregisterSnapshot(scan->rs_snapshot); + UnregisterSnapshot(scan->rs_scan.rs_snapshot); pfree(scan); } @@ -1634,7 +1642,7 @@ retry: else { SpinLockRelease(¶llel_scan->phs_mutex); - sync_startpage = ss_get_location(scan->rs_rd, scan->rs_nblocks); + sync_startpage = ss_get_location(scan->rs_scan.rs_rd, scan->rs_pagescan.rs_nblocks); goto retry; } } @@ -1676,10 +1684,10 @@ heap_parallelscan_nextpage(HeapScanDesc scan) * starting block number, modulo nblocks. */ nallocated = pg_atomic_fetch_add_u64(¶llel_scan->phs_nallocated, 1); - if (nallocated >= scan->rs_nblocks) + if (nallocated >= scan->rs_pagescan.rs_nblocks) page = InvalidBlockNumber; /* all blocks have been allocated */ else - page = (nallocated + parallel_scan->phs_startblock) % scan->rs_nblocks; + page = (nallocated + parallel_scan->phs_startblock) % scan->rs_pagescan.rs_nblocks; /* * Report scan location. Normally, we report the current page number. @@ -1688,12 +1696,12 @@ heap_parallelscan_nextpage(HeapScanDesc scan) * doesn't slew backwards. We only report the position at the end of the * scan once, though: subsequent callers will report nothing. */ - if (scan->rs_syncscan) + if (scan->rs_pagescan.rs_syncscan) { if (page != InvalidBlockNumber) - ss_report_location(scan->rs_rd, page); - else if (nallocated == scan->rs_nblocks) - ss_report_location(scan->rs_rd, parallel_scan->phs_startblock); + ss_report_location(scan->rs_scan.rs_rd, page); + else if (nallocated == scan->rs_pagescan.rs_nblocks) + ss_report_location(scan->rs_scan.rs_rd, parallel_scan->phs_startblock); } return page; @@ -1706,12 +1714,14 @@ heap_parallelscan_nextpage(HeapScanDesc scan) * ---------------- */ void -heap_update_snapshot(HeapScanDesc scan, Snapshot snapshot) +heap_update_snapshot(TableScanDesc sscan, Snapshot snapshot) { + HeapScanDesc scan = (HeapScanDesc) sscan; + Assert(IsMVCCSnapshot(snapshot)); RegisterSnapshot(snapshot); - scan->rs_snapshot = snapshot; + scan->rs_scan.rs_snapshot = snapshot; scan->rs_temp_snap = true; } @@ -1739,17 +1749,19 @@ heap_update_snapshot(HeapScanDesc scan, Snapshot snapshot) #endif /* !defined(HEAPDEBUGALL) */ TableTuple -heap_getnext(HeapScanDesc scan, ScanDirection direction) +heap_getnext(TableScanDesc sscan, ScanDirection direction) { + HeapScanDesc scan = (HeapScanDesc) sscan; + /* Note: no locking manipulations needed */ HEAPDEBUG_1; /* heap_getnext( info ) */ - if (scan->rs_pageatatime) + if (scan->rs_pagescan.rs_pageatatime) heapgettup_pagemode(scan, direction, - scan->rs_nkeys, scan->rs_key); + scan->rs_scan.rs_nkeys, scan->rs_scan.rs_key); else - heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key); + heapgettup(scan, direction, scan->rs_scan.rs_nkeys, scan->rs_scan.rs_key); if (scan->rs_ctup.t_data == NULL) { @@ -1763,7 +1775,7 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) */ HEAPDEBUG_3; /* heap_getnext returning tuple */ - pgstat_count_heap_getnext(scan->rs_rd); + pgstat_count_heap_getnext(scan->rs_scan.rs_rd); return heap_copytuple(&(scan->rs_ctup)); } @@ -1771,7 +1783,7 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) #ifdef HEAPAMSLOTDEBUGALL #define HEAPAMSLOTDEBUG_1 \ elog(DEBUG2, "heapam_getnext([%s,nkeys=%d],dir=%d) called", \ - RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction) + RelationGetRelationName(scan->rs_scan.rs_rd), scan->rs_scan.rs_nkeys, (int) direction) #define HEAPAMSLOTDEBUG_2 \ elog(DEBUG2, "heapam_getnext returning EOS") #define HEAPAMSLOTDEBUG_3 \ @@ -1783,7 +1795,7 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) #endif TupleTableSlot * -heap_getnextslot(HeapScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) +heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1791,11 +1803,11 @@ heap_getnextslot(HeapScanDesc sscan, ScanDirection direction, TupleTableSlot *sl HEAPAMSLOTDEBUG_1; /* heap_getnext( info ) */ - if (scan->rs_pageatatime) + if (scan->rs_pagescan.rs_pageatatime) heapgettup_pagemode(scan, direction, - scan->rs_nkeys, scan->rs_key); + scan->rs_scan.rs_nkeys, scan->rs_scan.rs_key); else - heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key); + heapgettup(scan, direction, scan->rs_scan.rs_nkeys, scan->rs_scan.rs_key); if (scan->rs_ctup.t_data == NULL) { @@ -1810,7 +1822,7 @@ heap_getnextslot(HeapScanDesc sscan, ScanDirection direction, TupleTableSlot *sl */ HEAPAMSLOTDEBUG_3; /* heap_getnext returning tuple */ - pgstat_count_heap_getnext(scan->rs_rd); + pgstat_count_heap_getnext(scan->rs_scan.rs_rd); return ExecStoreTuple(heap_copytuple(&(scan->rs_ctup)), slot, InvalidBuffer, true); } diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 010fef208e..e52ff51190 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -21,7 +21,9 @@ #include "postgres.h" #include "access/heapam.h" +#include "access/relscan.h" #include "access/tableamapi.h" +#include "pgstat.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/rel.h" @@ -298,6 +300,44 @@ heapam_form_tuple_by_datum(Datum data, Oid tableoid) return heap_form_tuple_by_datum(data, tableoid); } +static ParallelHeapScanDesc +heapam_get_parallelheapscandesc(TableScanDesc sscan) +{ + HeapScanDesc scan = (HeapScanDesc) sscan; + + return scan->rs_parallel; +} + +static HeapPageScanDesc +heapam_get_heappagescandesc(TableScanDesc sscan) +{ + HeapScanDesc scan = (HeapScanDesc) sscan; + + return &scan->rs_pagescan; +} + +static TableTuple +heapam_fetch_tuple_from_offset(TableScanDesc sscan, BlockNumber blkno, OffsetNumber offset) +{ + HeapScanDesc scan = (HeapScanDesc) sscan; + Page dp; + ItemId lp; + + dp = (Page) BufferGetPage(scan->rs_scan.rs_cbuf); + lp = PageGetItemId(dp, offset); + Assert(ItemIdIsNormal(lp)); + + scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); + scan->rs_ctup.t_len = ItemIdGetLength(lp); + scan->rs_ctup.t_tableOid = scan->rs_scan.rs_rd->rd_id; + ItemPointerSet(&scan->rs_ctup.t_self, blkno, offset); + + pgstat_count_heap_fetch(scan->rs_scan.rs_rd); + + return &(scan->rs_ctup); +} + + Datum heap_tableam_handler(PG_FUNCTION_ARGS) { @@ -318,6 +358,19 @@ heap_tableam_handler(PG_FUNCTION_ARGS) amroutine->scan_rescan = heap_rescan; amroutine->scan_update_snapshot = heap_update_snapshot; amroutine->hot_search_buffer = heap_hot_search_buffer; + amroutine->scan_fetch_tuple_from_offset = heapam_fetch_tuple_from_offset; + + /* + * The following routine needs to be provided when the storage support + * parallel sequential scan + */ + amroutine->scan_get_parallelheapscandesc = heapam_get_parallelheapscandesc; + + /* + * The following routine needs to be provided when the storage support + * BitmapHeap and Sample Scans + */ + amroutine->scan_get_heappagescandesc = heapam_get_heappagescandesc; amroutine->tuple_fetch = heapam_fetch; amroutine->tuple_insert = heapam_heap_insert; diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 4b709a65ac..736701bfe9 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -481,10 +481,10 @@ systable_recheck_tuple(SysScanDesc sysscan, HeapTuple tup) } else { - HeapScanDesc scan = sysscan->scan; + TableScanDesc scan = sysscan->scan; Assert(IsMVCCSnapshot(scan->rs_snapshot)); - Assert(tup == &scan->rs_ctup); + /* hari Assert(tup == &scan->rs_ctup); */ Assert(BufferIsValid(scan->rs_cbuf)); /* must hold a buffer lock to call HeapTupleSatisfiesVisibility */ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 106373fb7d..1187900373 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -1632,7 +1632,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, { SortCoordinate coordinate; BTBuildState buildstate; - HeapScanDesc scan; + TableScanDesc scan; double reltuples; IndexInfo *indexInfo; diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index ace187ba24..ec5fe5f45a 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -56,7 +56,7 @@ table_lock_tuple(Relation relation, ItemPointer tid, TableTuple * stuple, * Caller must hold a suitable lock on the correct relation. * ---------------- */ -HeapScanDesc +TableScanDesc table_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan) { Snapshot snapshot; @@ -79,6 +79,25 @@ table_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan) true, true, true, false, false, !parallel_scan->phs_snapshot_any); } +ParallelHeapScanDesc +tableam_get_parallelheapscandesc(TableScanDesc sscan) +{ + return sscan->rs_rd->rd_tableamroutine->scan_get_parallelheapscandesc(sscan); +} + +HeapPageScanDesc +tableam_get_heappagescandesc(TableScanDesc sscan) +{ + /* + * Planner should have already validated whether the current storage + * supports Page scans are not? This function will be called only from + * Bitmap Heap scan and sample scan + */ + Assert(sscan->rs_rd->rd_tableamroutine->scan_get_heappagescandesc != NULL); + + return sscan->rs_rd->rd_tableamroutine->scan_get_heappagescandesc(sscan); +} + /* * heap_setscanlimits - restrict range of a heapscan * @@ -86,7 +105,7 @@ table_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan) * numBlks is number of pages to scan (InvalidBlockNumber means "all") */ void -table_setscanlimits(HeapScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks) +table_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks) { sscan->rs_rd->rd_tableamroutine->scansetlimits(sscan, startBlk, numBlks); } @@ -105,18 +124,18 @@ table_setscanlimits(HeapScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk * block zero). Both of these default to true with plain heap_beginscan. * * heap_beginscan_bm is an alternative entry point for setting up a - * HeapScanDesc for a bitmap heap scan. Although that scan technology is + * TableScanDesc for a bitmap heap scan. Although that scan technology is * really quite unlike a standard seqscan, there is just enough commonality * to make it worth using the same data structure. * * heap_beginscan_sampling is an alternative entry point for setting up a - * HeapScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth + * TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth * using the same data structure although the behavior is rather different. * In addition to the options offered by heap_beginscan_strat, this call * also allows control of whether page-mode visibility checking is used. * ---------------- */ -HeapScanDesc +TableScanDesc table_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key) { @@ -124,7 +143,7 @@ table_beginscan(Relation relation, Snapshot snapshot, true, true, true, false, false, false); } -HeapScanDesc +TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKey key) { Oid relid = RelationGetRelid(relation); @@ -134,7 +153,7 @@ table_beginscan_catalog(Relation relation, int nkeys, ScanKey key) true, true, true, false, false, true); } -HeapScanDesc +TableScanDesc table_beginscan_strat(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync) @@ -144,7 +163,7 @@ table_beginscan_strat(Relation relation, Snapshot snapshot, false, false, false); } -HeapScanDesc +TableScanDesc table_beginscan_bm(Relation relation, Snapshot snapshot, int nkeys, ScanKey key) { @@ -152,7 +171,7 @@ table_beginscan_bm(Relation relation, Snapshot snapshot, false, false, true, true, false, false); } -HeapScanDesc +TableScanDesc table_beginscan_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode) @@ -167,7 +186,7 @@ table_beginscan_sampling(Relation relation, Snapshot snapshot, * ---------------- */ void -table_rescan(HeapScanDesc scan, +table_rescan(TableScanDesc scan, ScanKey key) { scan->rs_rd->rd_tableamroutine->scan_rescan(scan, key, false, false, false, false); @@ -183,7 +202,7 @@ table_rescan(HeapScanDesc scan, * ---------------- */ void -table_rescan_set_params(HeapScanDesc scan, ScanKey key, +table_rescan_set_params(TableScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode) { scan->rs_rd->rd_tableamroutine->scan_rescan(scan, key, true, @@ -198,7 +217,7 @@ table_rescan_set_params(HeapScanDesc scan, ScanKey key, * ---------------- */ void -table_endscan(HeapScanDesc scan) +table_endscan(TableScanDesc scan) { scan->rs_rd->rd_tableamroutine->scan_end(scan); } @@ -211,23 +230,30 @@ table_endscan(HeapScanDesc scan) * ---------------- */ void -table_scan_update_snapshot(HeapScanDesc scan, Snapshot snapshot) +table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot) { scan->rs_rd->rd_tableamroutine->scan_update_snapshot(scan, snapshot); } TableTuple -table_scan_getnext(HeapScanDesc sscan, ScanDirection direction) +table_scan_getnext(TableScanDesc sscan, ScanDirection direction) { return sscan->rs_rd->rd_tableamroutine->scan_getnext(sscan, direction); } TupleTableSlot * -table_scan_getnextslot(HeapScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) +table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) { return sscan->rs_rd->rd_tableamroutine->scan_getnextslot(sscan, direction, slot); } +TableTuple +table_tuple_fetch_from_offset(TableScanDesc sscan, BlockNumber blkno, OffsetNumber offset) +{ + return sscan->rs_rd->rd_tableamroutine->scan_fetch_tuple_from_offset(sscan, blkno, offset); +} + + /* * Insert a tuple from a slot into table AM routine */ diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c index f888e04f40..8a9e7056eb 100644 --- a/src/backend/access/tablesample/system.c +++ b/src/backend/access/tablesample/system.c @@ -183,7 +183,7 @@ static BlockNumber system_nextsampleblock(SampleScanState *node) { SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; - HeapScanDesc scan = node->ss.ss_currentScanDesc; + HeapPageScanDesc scan = node->pagescan; BlockNumber nextblock = sampler->nextblock; uint32 hashinput[2]; diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index dde8c1f0a7..988eb4f6b3 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -579,7 +579,7 @@ boot_openrel(char *relname) int i; struct typmap **app; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; if (strlen(relname) >= NAMEDATALEN) @@ -895,7 +895,7 @@ gettype(char *type) { int i; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; struct typmap **app; diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 08ef93a8eb..afae614f91 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -823,7 +823,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames) ScanKeyData key[2]; int keycount; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; keycount = 0; @@ -877,7 +877,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind) List *relations = NIL; ScanKeyData key[2]; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; ScanKeyInit(&key[0], diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 0f31815638..2691095b87 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -2124,7 +2124,7 @@ index_update_stats(Relation rel, ReindexIsProcessingHeap(RelationRelationId)) { /* don't assume syscache will work */ - HeapScanDesc pg_class_scan; + TableScanDesc pg_class_scan; ScanKeyData key[1]; ScanKeyInit(&key[0], @@ -2422,7 +2422,7 @@ IndexBuildHeapScan(Relation heapRelation, bool allow_sync, IndexBuildCallback callback, void *callback_state, - HeapScanDesc scan) + TableScanDesc scan) { return IndexBuildHeapRangeScan(heapRelation, indexRelation, indexInfo, allow_sync, @@ -2451,7 +2451,7 @@ IndexBuildHeapRangeScan(Relation heapRelation, BlockNumber numblocks, IndexBuildCallback callback, void *callback_state, - HeapScanDesc scan) + TableScanDesc scan) { bool is_system_catalog; bool checking_uniqueness; @@ -2959,7 +2959,7 @@ IndexCheckExclusion(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo) { - HeapScanDesc scan; + TableScanDesc scan; HeapTuple heapTuple; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; @@ -3273,7 +3273,7 @@ validate_index_heapscan(Relation heapRelation, Snapshot snapshot, v_i_state *state) { - HeapScanDesc scan; + TableScanDesc scan; HeapTuple heapTuple; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c index 86f426ef32..8ae4b6629b 100644 --- a/src/backend/catalog/pg_conversion.c +++ b/src/backend/catalog/pg_conversion.c @@ -150,7 +150,7 @@ RemoveConversionById(Oid conversionOid) { Relation rel; HeapTuple tuple; - HeapScanDesc scan; + TableScanDesc scan; ScanKeyData scanKeyData; ScanKeyInit(&scanKeyData, diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c index 7450bf0278..06cde51d4b 100644 --- a/src/backend/catalog/pg_db_role_setting.c +++ b/src/backend/catalog/pg_db_role_setting.c @@ -171,7 +171,7 @@ void DropSetting(Oid databaseid, Oid roleid) { Relation relsetting; - HeapScanDesc scan; + TableScanDesc scan; ScanKeyData keys[2]; HeapTuple tup; int numkeys = 0; diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index e565a14418..cf55518137 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -323,7 +323,7 @@ GetAllTablesPublicationRelations(void) { Relation classRel; ScanKeyData key[1]; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; List *result = NIL; diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index 4e42b10c47..9150439861 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -392,7 +392,7 @@ void RemoveSubscriptionRel(Oid subid, Oid relid) { Relation rel; - HeapScanDesc scan; + TableScanDesc scan; ScanKeyData skey[2]; HeapTuple tup; int nkeys = 0; diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index fcaec72026..2148b44ee3 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -763,7 +763,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, Datum *values; bool *isnull; IndexScanDesc indexScan; - HeapScanDesc heapScan; + TableScanDesc heapScan; bool use_wal; bool is_system_catalog; TransactionId OldestXmin; @@ -1693,7 +1693,7 @@ static List * get_tables_to_cluster(MemoryContext cluster_context) { Relation indRelation; - HeapScanDesc scan; + TableScanDesc scan; ScanKeyData entry; HeapTuple indexTuple; Form_pg_index index; diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 0fbfcf1c78..f9af5acda6 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -2046,7 +2046,7 @@ CopyTo(CopyState cstate) { Datum *values; bool *nulls; - HeapScanDesc scandesc; + TableScanDesc scandesc; HeapTuple tuple; values = (Datum *) palloc(num_phys_attrs * sizeof(Datum)); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 1ccc123b61..b9f33f1e29 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -99,7 +99,7 @@ static int errdetail_busy_db(int notherbackends, int npreparedxacts); Oid createdb(ParseState *pstate, const CreatedbStmt *stmt) { - HeapScanDesc scan; + TableScanDesc scan; Relation rel; Oid src_dboid; Oid src_owner; @@ -1872,7 +1872,7 @@ static void remove_dbtablespaces(Oid db_id) { Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; rel = heap_open(TableSpaceRelationId, AccessShareLock); @@ -1939,7 +1939,7 @@ check_db_file_conflict(Oid db_id) { bool result = false; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; rel = heap_open(TableSpaceRelationId, AccessShareLock); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index b830dc9992..8533f37212 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -2292,7 +2292,7 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, { Oid objectOid; Relation relationRelation; - HeapScanDesc scan; + TableScanDesc scan; ScanKeyData scan_keys[1]; HeapTuple tuple; MemoryContext private_context; diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 398d5d27e3..bd9280b20a 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -4654,7 +4654,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) bool *isnull; TupleTableSlot *oldslot; TupleTableSlot *newslot; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; MemoryContext oldCxt; List *dropped_attrs = NIL; @@ -5230,7 +5230,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be { Relation classRel; ScanKeyData key[1]; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; List *result = NIL; @@ -8435,7 +8435,7 @@ validateCheckConstraint(Relation rel, HeapTuple constrtup) Expr *origexpr; ExprState *exprstate; TupleDesc tupdesc; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; ExprContext *econtext; MemoryContext oldcxt; @@ -8518,7 +8518,7 @@ validateForeignKeyConstraint(char *conname, Oid pkindOid, Oid constraintOid) { - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; Trigger trig; Snapshot snapshot; @@ -11071,7 +11071,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) ListCell *l; ScanKeyData key[1]; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; Oid orig_tablespaceoid; Oid new_tablespaceoid; diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index dd721c63a9..0e7e2d65d2 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -404,7 +404,7 @@ DropTableSpace(DropTableSpaceStmt *stmt) { #ifdef HAVE_SYMLINK char *tablespacename = stmt->tablespacename; - HeapScanDesc scandesc; + TableScanDesc scandesc; Relation rel; HeapTuple tuple; ScanKeyData entry[1]; @@ -915,7 +915,7 @@ RenameTableSpace(const char *oldname, const char *newname) Oid tspId; Relation rel; ScanKeyData entry[1]; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; HeapTuple newtuple; Form_pg_tablespace newform; @@ -990,7 +990,7 @@ AlterTableSpaceOptions(AlterTableSpaceOptionsStmt *stmt) { Relation rel; ScanKeyData entry[1]; - HeapScanDesc scandesc; + TableScanDesc scandesc; HeapTuple tup; Oid tablespaceoid; Datum datum; @@ -1384,7 +1384,7 @@ get_tablespace_oid(const char *tablespacename, bool missing_ok) { Oid result; Relation rel; - HeapScanDesc scandesc; + TableScanDesc scandesc; HeapTuple tuple; ScanKeyData entry[1]; @@ -1430,7 +1430,7 @@ get_tablespace_name(Oid spc_oid) { char *result; Relation rel; - HeapScanDesc scandesc; + TableScanDesc scandesc; HeapTuple tuple; ScanKeyData entry[1]; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 6bd67639d1..661ebda56b 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -2378,7 +2378,7 @@ AlterDomainNotNull(List *names, bool notNull) RelToCheck *rtc = (RelToCheck *) lfirst(rt); Relation testrel = rtc->rel; TupleDesc tupdesc = RelationGetDescr(testrel); - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; Snapshot snapshot; @@ -2774,7 +2774,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin) RelToCheck *rtc = (RelToCheck *) lfirst(rt); Relation testrel = rtc->rel; TupleDesc tupdesc = RelationGetDescr(testrel); - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; Snapshot snapshot; diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 5f2069902a..e3da71f9bc 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -529,7 +529,7 @@ get_all_vacuum_rels(void) { List *vacrels = NIL; Relation pgclass; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; pgclass = heap_open(RelationRelationId, AccessShareLock); @@ -1160,7 +1160,7 @@ vac_truncate_clog(TransactionId frozenXID, { TransactionId nextXID = ReadNewTransactionId(); Relation relation; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tuple; Oid oldestxid_datoid; Oid minmulti_datoid; diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 45c9baf6c8..7e29030dfd 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -55,14 +55,14 @@ static TupleTableSlot *BitmapHeapNext(BitmapHeapScanState *node); -static void bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres); +static void bitgetpage(BitmapHeapScanState *node, TBMIterateResult *tbmres); static inline void BitmapDoneInitializingSharedState( ParallelBitmapHeapState *pstate); static inline void BitmapAdjustPrefetchIterator(BitmapHeapScanState *node, TBMIterateResult *tbmres); static inline void BitmapAdjustPrefetchTarget(BitmapHeapScanState *node); static inline void BitmapPrefetch(BitmapHeapScanState *node, - HeapScanDesc scan); + TableScanDesc scan); static bool BitmapShouldInitializeSharedState( ParallelBitmapHeapState *pstate); @@ -77,7 +77,8 @@ static TupleTableSlot * BitmapHeapNext(BitmapHeapScanState *node) { ExprContext *econtext; - HeapScanDesc scan; + TableScanDesc scan; + HeapPageScanDesc pagescan; TIDBitmap *tbm; TBMIterator *tbmiterator = NULL; TBMSharedIterator *shared_tbmiterator = NULL; @@ -93,6 +94,7 @@ BitmapHeapNext(BitmapHeapScanState *node) econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; scan = node->ss.ss_currentScanDesc; + pagescan = node->pagescan; tbm = node->tbm; if (pstate == NULL) tbmiterator = node->tbmiterator; @@ -192,8 +194,7 @@ BitmapHeapNext(BitmapHeapScanState *node) for (;;) { - Page dp; - ItemId lp; + TableTuple tuple; CHECK_FOR_INTERRUPTS(); @@ -220,7 +221,7 @@ BitmapHeapNext(BitmapHeapScanState *node) * least AccessShareLock on the table before performing any of the * indexscans, but let's be safe.) */ - if (tbmres->blockno >= scan->rs_nblocks) + if (tbmres->blockno >= pagescan->rs_nblocks) { node->tbmres = tbmres = NULL; continue; @@ -243,14 +244,14 @@ BitmapHeapNext(BitmapHeapScanState *node) * The number of tuples on this page is put into * scan->rs_ntuples; note we don't fill scan->rs_vistuples. */ - scan->rs_ntuples = tbmres->ntuples; + pagescan->rs_ntuples = tbmres->ntuples; } else { /* * Fetch the current heap page and identify candidate tuples. */ - bitgetpage(scan, tbmres); + bitgetpage(node, tbmres); } if (tbmres->ntuples >= 0) @@ -261,7 +262,7 @@ BitmapHeapNext(BitmapHeapScanState *node) /* * Set rs_cindex to first slot to examine */ - scan->rs_cindex = 0; + pagescan->rs_cindex = 0; /* Adjust the prefetch target */ BitmapAdjustPrefetchTarget(node); @@ -271,7 +272,7 @@ BitmapHeapNext(BitmapHeapScanState *node) /* * Continuing in previously obtained page; advance rs_cindex */ - scan->rs_cindex++; + pagescan->rs_cindex++; #ifdef USE_PREFETCH @@ -298,7 +299,7 @@ BitmapHeapNext(BitmapHeapScanState *node) /* * Out of range? If so, nothing more to look at on this page */ - if (scan->rs_cindex < 0 || scan->rs_cindex >= scan->rs_ntuples) + if (pagescan->rs_cindex < 0 || pagescan->rs_cindex >= pagescan->rs_ntuples) { node->tbmres = tbmres = NULL; continue; @@ -325,23 +326,14 @@ BitmapHeapNext(BitmapHeapScanState *node) /* * Okay to fetch the tuple. */ - targoffset = scan->rs_vistuples[scan->rs_cindex]; - dp = (Page) BufferGetPage(scan->rs_cbuf); - lp = PageGetItemId(dp, targoffset); - Assert(ItemIdIsNormal(lp)); - - scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); - scan->rs_ctup.t_len = ItemIdGetLength(lp); - scan->rs_ctup.t_tableOid = scan->rs_rd->rd_id; - ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset); - - pgstat_count_heap_fetch(scan->rs_rd); + targoffset = pagescan->rs_vistuples[pagescan->rs_cindex]; + tuple = table_tuple_fetch_from_offset(scan, tbmres->blockno, targoffset); /* * Set up the result slot to point to this tuple. Note that the * slot acquires a pin on the buffer. */ - ExecStoreTuple(&scan->rs_ctup, + ExecStoreTuple(tuple, slot, scan->rs_cbuf, false); @@ -381,8 +373,10 @@ BitmapHeapNext(BitmapHeapScanState *node) * interesting according to the bitmap, and visible according to the snapshot. */ static void -bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) +bitgetpage(BitmapHeapScanState *node, TBMIterateResult *tbmres) { + TableScanDesc scan = node->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = node->pagescan; BlockNumber page = tbmres->blockno; Buffer buffer; Snapshot snapshot; @@ -391,7 +385,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) /* * Acquire pin on the target heap page, trading in any pin we held before. */ - Assert(page < scan->rs_nblocks); + Assert(page < pagescan->rs_nblocks); scan->rs_cbuf = ReleaseAndReadBuffer(scan->rs_cbuf, scan->rs_rd, @@ -434,7 +428,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) ItemPointerSet(&tid, page, offnum); if (table_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot, &heapTuple, NULL, true)) - scan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid); + pagescan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid); } } else @@ -450,23 +444,21 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId lp; - HeapTupleData loctup; + TableTuple loctup; bool valid; lp = PageGetItemId(dp, offnum); if (!ItemIdIsNormal(lp)) continue; - loctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); - loctup.t_len = ItemIdGetLength(lp); - loctup.t_tableOid = scan->rs_rd->rd_id; - ItemPointerSet(&loctup.t_self, page, offnum); - valid = HeapTupleSatisfiesVisibility(scan->rs_rd->rd_tableamroutine, &loctup, snapshot, buffer); + + loctup = table_tuple_fetch_from_offset(scan, page, offnum); + valid = HeapTupleSatisfiesVisibility(scan->rs_rd->rd_tableamroutine, loctup, snapshot, buffer); if (valid) { - scan->rs_vistuples[ntup++] = offnum; - PredicateLockTuple(scan->rs_rd, &loctup, snapshot); + pagescan->rs_vistuples[ntup++] = offnum; + PredicateLockTuple(scan->rs_rd, loctup, snapshot); } - CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup, + CheckForSerializableConflictOut(valid, scan->rs_rd, loctup, buffer, snapshot); } } @@ -474,7 +466,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) LockBuffer(buffer, BUFFER_LOCK_UNLOCK); Assert(ntup <= MaxHeapTuplesPerPage); - scan->rs_ntuples = ntup; + pagescan->rs_ntuples = ntup; } /* @@ -600,7 +592,7 @@ BitmapAdjustPrefetchTarget(BitmapHeapScanState *node) * BitmapPrefetch - Prefetch, if prefetch_pages are behind prefetch_target */ static inline void -BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan) +BitmapPrefetch(BitmapHeapScanState *node, TableScanDesc scan) { #ifdef USE_PREFETCH ParallelBitmapHeapState *pstate = node->pstate; @@ -788,7 +780,7 @@ void ExecEndBitmapHeapScan(BitmapHeapScanState *node) { Relation relation; - HeapScanDesc scanDesc; + TableScanDesc scanDesc; /* * extract information from the node @@ -969,6 +961,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) 0, NULL); + scanstate->pagescan = tableam_get_heappagescandesc(scanstate->ss.ss_currentScanDesc); + /* * all done. */ diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index b3eda4e751..581494df04 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -30,9 +30,8 @@ static TupleTableSlot *SampleNext(SampleScanState *node); static void tablesample_init(SampleScanState *scanstate); static TableTuple tablesample_getnext(SampleScanState *scanstate); static bool SampleTupleVisible(TableTuple tuple, OffsetNumber tupoffset, - HeapScanDesc scan); + SampleScanState *scanstate); -/* hari */ /* ---------------------------------------------------------------- * Scan Support @@ -335,6 +334,7 @@ tablesample_init(SampleScanState *scanstate) scanstate->use_bulkread, allow_sync, scanstate->use_pagemode); + scanstate->pagescan = tableam_get_heappagescandesc(scanstate->ss.ss_currentScanDesc); } else { @@ -360,10 +360,11 @@ static TableTuple tablesample_getnext(SampleScanState *scanstate) { TsmRoutine *tsm = scanstate->tsmroutine; - HeapScanDesc scan = scanstate->ss.ss_currentScanDesc; - HeapTuple tuple = &(scan->rs_ctup); + TableScanDesc scan = scanstate->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = scanstate->pagescan; + TableTuple tuple; Snapshot snapshot = scan->rs_snapshot; - bool pagemode = scan->rs_pageatatime; + bool pagemode = pagescan->rs_pageatatime; BlockNumber blockno; Page page; bool all_visible; @@ -374,10 +375,9 @@ tablesample_getnext(SampleScanState *scanstate) /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0) + if (pagescan->rs_nblocks == 0) { Assert(!BufferIsValid(scan->rs_cbuf)); - tuple->t_data = NULL; return NULL; } if (tsm->NextSampleBlock) @@ -385,13 +385,12 @@ tablesample_getnext(SampleScanState *scanstate) blockno = tsm->NextSampleBlock(scanstate); if (!BlockNumberIsValid(blockno)) { - tuple->t_data = NULL; return NULL; } } else - blockno = scan->rs_startblock; - Assert(blockno < scan->rs_nblocks); + blockno = pagescan->rs_startblock; + Assert(blockno < pagescan->rs_nblocks); heapgetpage(scan, blockno); scan->rs_inited = true; } @@ -434,14 +433,12 @@ tablesample_getnext(SampleScanState *scanstate) if (!ItemIdIsNormal(itemid)) continue; - tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid); - tuple->t_len = ItemIdGetLength(itemid); - ItemPointerSet(&(tuple->t_self), blockno, tupoffset); + tuple = table_tuple_fetch_from_offset(scan, blockno, tupoffset); if (all_visible) visible = true; else - visible = SampleTupleVisible(tuple, tupoffset, scan); + visible = SampleTupleVisible(tuple, tupoffset, scanstate); /* in pagemode, heapgetpage did this for us */ if (!pagemode) @@ -472,14 +469,14 @@ tablesample_getnext(SampleScanState *scanstate) if (tsm->NextSampleBlock) { blockno = tsm->NextSampleBlock(scanstate); - Assert(!scan->rs_syncscan); + Assert(!pagescan->rs_syncscan); finished = !BlockNumberIsValid(blockno); } else { /* Without NextSampleBlock, just do a plain forward seqscan. */ blockno++; - if (blockno >= scan->rs_nblocks) + if (blockno >= pagescan->rs_nblocks) blockno = 0; /* @@ -492,10 +489,10 @@ tablesample_getnext(SampleScanState *scanstate) * a little bit backwards on every invocation, which is confusing. * We don't guarantee any specific ordering in general, though. */ - if (scan->rs_syncscan) + if (pagescan->rs_syncscan) ss_report_location(scan->rs_rd, blockno); - finished = (blockno == scan->rs_startblock); + finished = (blockno == pagescan->rs_startblock); } /* @@ -507,12 +504,11 @@ tablesample_getnext(SampleScanState *scanstate) ReleaseBuffer(scan->rs_cbuf); scan->rs_cbuf = InvalidBuffer; scan->rs_cblock = InvalidBlockNumber; - tuple->t_data = NULL; scan->rs_inited = false; return NULL; } - Assert(blockno < scan->rs_nblocks); + Assert(blockno < pagescan->rs_nblocks); heapgetpage(scan, blockno); /* Re-establish state for new page */ @@ -527,16 +523,19 @@ tablesample_getnext(SampleScanState *scanstate) /* Count successfully-fetched tuples as heap fetches */ pgstat_count_heap_getnext(scan->rs_rd); - return &(scan->rs_ctup); + return tuple; } /* * Check visibility of the tuple. */ static bool -SampleTupleVisible(TableTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan) //hari +SampleTupleVisible(TableTuple tuple, OffsetNumber tupoffset, SampleScanState *scanstate) { - if (scan->rs_pageatatime) + TableScanDesc scan = scanstate->ss.ss_currentScanDesc; + HeapPageScanDesc pagescan = scanstate->pagescan; + + if (pagescan->rs_pageatatime) { /* * In pageatatime mode, heapgetpage() already did visibility checks, @@ -548,12 +547,12 @@ SampleTupleVisible(TableTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan) * gain to justify the restriction. */ int start = 0, - end = scan->rs_ntuples - 1; + end = pagescan->rs_ntuples - 1; while (start <= end) { int mid = (start + end) / 2; - OffsetNumber curoffset = scan->rs_vistuples[mid]; + OffsetNumber curoffset = pagescan->rs_vistuples[mid]; if (tupoffset == curoffset) return true; diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 758dbeb9c7..b2b0a30343 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -295,9 +295,10 @@ void ExecSeqScanReInitializeDSM(SeqScanState *node, ParallelContext *pcxt) { - HeapScanDesc scan = node->ss.ss_currentScanDesc; + ParallelHeapScanDesc pscan; - heap_parallelscan_reinitialize(scan->rs_parallel); + pscan = tableam_get_parallelheapscandesc(node->ss.ss_currentScanDesc); + heap_parallelscan_reinitialize(pscan); } /* ---------------------------------------------------------------- diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 8369e3ad62..f3cd64cf62 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -21,6 +21,7 @@ #include "access/heapam.h" #include "access/htup_details.h" #include "access/nbtree.h" +#include "access/tableamapi.h" #include "access/sysattr.h" #include "access/transam.h" #include "access/xlog.h" @@ -272,7 +273,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, info->amsearchnulls = amroutine->amsearchnulls; info->amcanparallel = amroutine->amcanparallel; info->amhasgettuple = (amroutine->amgettuple != NULL); - info->amhasgetbitmap = (amroutine->amgetbitmap != NULL); + info->amhasgetbitmap = ((amroutine->amgetbitmap != NULL) + && (relation->rd_tableamroutine->scan_get_heappagescandesc != NULL)); info->amcostestimate = amroutine->amcostestimate; Assert(info->amcostestimate != NULL); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 8728d90b1c..4beb41f48f 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -646,7 +646,7 @@ check_default_partition_contents(Relation parent, Relation default_rel, Snapshot snapshot; TupleDesc tupdesc; ExprContext *econtext; - HeapScanDesc scan; + TableScanDesc scan; MemoryContext oldCxt; TupleTableSlot *tupslot; diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 6a875c6d82..a38ba57a04 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -1878,7 +1878,7 @@ get_database_list(void) { List *dblist = NIL; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; MemoryContext resultcxt; @@ -1944,7 +1944,7 @@ do_autovacuum(void) { Relation classRel; HeapTuple tuple; - HeapScanDesc relScan; + TableScanDesc relScan; Form_pg_database dbForm; List *table_oids = NIL; List *orphan_oids = NIL; diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index a0ca9ee127..dc292aa0e0 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -1207,7 +1207,7 @@ pgstat_collect_oids(Oid catalogid) HTAB *htab; HASHCTL hash_ctl; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; Snapshot snapshot; diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index d98a3e891f..8a8438286c 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -107,7 +107,7 @@ get_subscription_list(void) { List *res = NIL; Relation rel; - HeapScanDesc scan; + TableScanDesc scan; HeapTuple tup; MemoryContext resultcxt; diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 0992fb7fd8..2a2d890c0b 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -420,7 +420,7 @@ DefineQueryRewrite(const char *rulename, if (event_relation->rd_rel->relkind != RELKIND_VIEW && event_relation->rd_rel->relkind != RELKIND_MATVIEW) { - HeapScanDesc scanDesc; + TableScanDesc scanDesc; Snapshot snapshot; if (event_relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 99841b669b..0c5551730f 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -1214,7 +1214,7 @@ static bool ThereIsAtLeastOneRole(void) { Relation pg_authid_rel; - HeapScanDesc scan; + TableScanDesc scan; bool result; pg_authid_rel = heap_open(AuthIdRelationId, AccessShareLock); diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 429b065634..6aca0af7d4 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -98,6 +98,8 @@ extern Relation heap_openrv_extended(const RangeVar *relation, #define heap_close(r,l) relation_close(r,l) /* struct definitions appear in relscan.h */ +typedef struct HeapPageScanDescData *HeapPageScanDesc; +typedef struct TableScanDescData *TableScanDesc; typedef struct HeapScanDescData *HeapScanDesc; typedef struct ParallelHeapScanDescData *ParallelHeapScanDesc; @@ -107,7 +109,7 @@ typedef struct ParallelHeapScanDescData *ParallelHeapScanDesc; */ #define HeapScanIsValid(scan) PointerIsValid(scan) -extern HeapScanDesc heap_beginscan(Relation relation, Snapshot snapshot, +extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, @@ -116,22 +118,22 @@ extern HeapScanDesc heap_beginscan(Relation relation, Snapshot snapshot, bool is_bitmapscan, bool is_samplescan, bool temp_snap); -extern void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, +extern void heap_setscanlimits(TableScanDesc scan, BlockNumber startBlk, BlockNumber endBlk); -extern void heapgetpage(HeapScanDesc scan, BlockNumber page); -extern void heap_rescan(HeapScanDesc scan, ScanKey key, bool set_params, +extern void heapgetpage(TableScanDesc scan, BlockNumber page); +extern void heap_rescan(TableScanDesc scan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode); -extern void heap_rescan_set_params(HeapScanDesc scan, ScanKey key, +extern void heap_rescan_set_params(TableScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode); -extern void heap_endscan(HeapScanDesc scan); -extern TableTuple heap_getnext(HeapScanDesc scan, ScanDirection direction); -extern TupleTableSlot *heap_getnextslot(HeapScanDesc sscan, ScanDirection direction, +extern void heap_endscan(TableScanDesc scan); +extern TableTuple heap_getnext(TableScanDesc scan, ScanDirection direction); +extern TupleTableSlot *heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); extern Size heap_parallelscan_estimate(Snapshot snapshot); extern void heap_parallelscan_initialize(ParallelHeapScanDesc target, Relation relation, Snapshot snapshot); extern void heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan); -extern HeapScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc); +extern TableScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc); extern bool heap_fetch(Relation relation, ItemPointer tid, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, @@ -181,7 +183,7 @@ extern void simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup); extern void heap_sync(Relation relation); -extern void heap_update_snapshot(HeapScanDesc scan, Snapshot snapshot); +extern void heap_update_snapshot(TableScanDesc scan, Snapshot snapshot); /* in heap/pruneheap.c */ extern void heap_page_prune_opt(Relation relation, Buffer buffer); diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index e5289b8aa7..6a756abbe2 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -16,6 +16,7 @@ #include "access/genam.h" #include "access/heapam.h" +#include "access/tableam.h" #include "access/htup_details.h" #include "access/itup.h" #include "access/tupdesc.h" @@ -43,40 +44,54 @@ typedef struct ParallelHeapScanDescData char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]; } ParallelHeapScanDescData; -typedef struct HeapScanDescData +typedef struct TableScanDescData { /* scan parameters */ Relation rs_rd; /* heap relation descriptor */ Snapshot rs_snapshot; /* snapshot to see */ int rs_nkeys; /* number of scan keys */ ScanKey rs_key; /* array of scan key descriptors */ - bool rs_bitmapscan; /* true if this is really a bitmap scan */ - bool rs_samplescan; /* true if this is really a sample scan */ + + /* scan current state */ + bool rs_inited; /* false = scan not init'd yet */ + BlockNumber rs_cblock; /* current block # in scan, if any */ + Buffer rs_cbuf; /* current buffer in scan, if any */ +} TableScanDescData; + +typedef struct HeapPageScanDescData +{ bool rs_pageatatime; /* verify visibility page-at-a-time? */ - bool rs_allow_strat; /* allow or disallow use of access strategy */ - bool rs_allow_sync; /* allow or disallow use of syncscan */ - bool rs_temp_snap; /* unregister snapshot at scan end? */ /* state set up at initscan time */ BlockNumber rs_nblocks; /* total number of blocks in rel */ BlockNumber rs_startblock; /* block # to start at */ BlockNumber rs_numblocks; /* max number of blocks to scan */ + /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */ BufferAccessStrategy rs_strategy; /* access strategy for reads */ bool rs_syncscan; /* report location to syncscan logic? */ - /* scan current state */ - bool rs_inited; /* false = scan not init'd yet */ - HeapTupleData rs_ctup; /* current tuple in scan, if any */ - BlockNumber rs_cblock; /* current block # in scan, if any */ - Buffer rs_cbuf; /* current buffer in scan, if any */ - /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */ - ParallelHeapScanDesc rs_parallel; /* parallel scan information */ - /* these fields only used in page-at-a-time mode and for bitmap scans */ int rs_cindex; /* current tuple's index in vistuples */ int rs_ntuples; /* number of visible tuples on page */ OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */ +} HeapPageScanDescData; + +typedef struct HeapScanDescData +{ + /* scan parameters */ + TableScanDescData rs_scan; /* */ + HeapPageScanDescData rs_pagescan; + bool rs_bitmapscan; /* true if this is really a bitmap scan */ + bool rs_samplescan; /* true if this is really a sample scan */ + bool rs_allow_strat; /* allow or disallow use of access strategy */ + bool rs_allow_sync; /* allow or disallow use of syncscan */ + bool rs_temp_snap; /* unregister snapshot at scan end? */ + + HeapTupleData rs_ctup; /* current tuple in scan, if any */ + + /* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */ + ParallelHeapScanDesc rs_parallel; /* parallel scan information */ } HeapScanDescData; /* @@ -150,12 +165,12 @@ typedef struct ParallelIndexScanDescData char ps_snapshot_data[FLEXIBLE_ARRAY_MEMBER]; } ParallelIndexScanDescData; -/* Struct for heap-or-index scans of system tables */ +/* Struct for storage-or-index scans of system tables */ typedef struct SysScanDescData { Relation heap_rel; /* catalog being scanned */ Relation irel; /* NULL if doing heap scan */ - HeapScanDesc scan; /* only valid in heap-scan case */ + TableScanDesc scan; /* only valid in storage-scan case */ IndexScanDesc iscan; /* only valid in index-scan case */ Snapshot snapshot; /* snapshot to unregister at end of scan */ } SysScanDescData; diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index f3982c3c13..afe8eeb4fe 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -42,29 +42,31 @@ typedef List *(*InsertIndexTuples) (TupleTableSlot *slot, EState *estate, bool n /* Function pointer to let the index tuple delete from storage am */ typedef void (*DeleteIndexTuples) (Relation rel, ItemPointer tid, TransactionId old_xmin); -extern HeapScanDesc table_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan); - -extern void table_setscanlimits(HeapScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks); -extern HeapScanDesc table_beginscan(Relation relation, Snapshot snapshot, +extern TableScanDesc table_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan); +extern ParallelHeapScanDesc tableam_get_parallelheapscandesc(TableScanDesc sscan); +extern HeapPageScanDesc tableam_get_heappagescandesc(TableScanDesc sscan); +extern void table_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks); +extern TableScanDesc table_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key); -extern HeapScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKey key); -extern HeapScanDesc table_beginscan_strat(Relation relation, Snapshot snapshot, +extern TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, ScanKey key); +extern TableScanDesc table_beginscan_strat(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync); -extern HeapScanDesc table_beginscan_bm(Relation relation, Snapshot snapshot, +extern TableScanDesc table_beginscan_bm(Relation relation, Snapshot snapshot, int nkeys, ScanKey key); -extern HeapScanDesc table_beginscan_sampling(Relation relation, Snapshot snapshot, +extern TableScanDesc table_beginscan_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode); -extern void table_endscan(HeapScanDesc scan); -extern void table_rescan(HeapScanDesc scan, ScanKey key); -extern void table_rescan_set_params(HeapScanDesc scan, ScanKey key, +extern void table_endscan(TableScanDesc scan); +extern void table_rescan(TableScanDesc scan, ScanKey key); +extern void table_rescan_set_params(TableScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode); -extern void table_scan_update_snapshot(HeapScanDesc scan, Snapshot snapshot); +extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot); -extern TableTuple table_scan_getnext(HeapScanDesc sscan, ScanDirection direction); -extern TupleTableSlot *table_scan_getnextslot(HeapScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); +extern TableTuple table_scan_getnext(TableScanDesc sscan, ScanDirection direction); +extern TupleTableSlot *table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); +extern TableTuple table_tuple_fetch_from_offset(TableScanDesc sscan, BlockNumber blkno, OffsetNumber offset); extern void storage_get_latest_tid(Relation relation, Snapshot snapshot, diff --git a/src/include/access/tableam_common.h b/src/include/access/tableam_common.h index 36b72e9767..e5cc461bd8 100644 --- a/src/include/access/tableam_common.h +++ b/src/include/access/tableam_common.h @@ -28,7 +28,6 @@ /* A physical tuple coming from a table AM scan */ typedef void *TableTuple; -typedef void *TableScanDesc; /* Result codes for HeapTupleSatisfiesVacuum */ typedef enum diff --git a/src/include/access/tableamapi.h b/src/include/access/tableamapi.h index 2ab3ba62d1..29f83e0ab2 100644 --- a/src/include/access/tableamapi.h +++ b/src/include/access/tableamapi.h @@ -83,7 +83,7 @@ typedef TableTuple(*TupleFromDatum_function) (Datum data, Oid tableoid); typedef void (*RelationSync_function) (Relation relation); -typedef HeapScanDesc (*ScanBegin_function) (Relation relation, +typedef TableScanDesc (*ScanBegin_function) (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, @@ -93,22 +93,29 @@ typedef HeapScanDesc (*ScanBegin_function) (Relation relation, bool is_bitmapscan, bool is_samplescan, bool temp_snap); -typedef void (*ScanSetlimits_function) (HeapScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks); + +typedef ParallelHeapScanDesc (*ScanGetParallelheapscandesc_function) (TableScanDesc scan); +typedef HeapPageScanDesc(*ScanGetHeappagescandesc_function) (TableScanDesc scan); + +typedef void (*ScanSetlimits_function) (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks); /* must return a TupleTableSlot? */ -typedef TableTuple(*ScanGetnext_function) (HeapScanDesc scan, +typedef TableTuple(*ScanGetnext_function) (TableScanDesc scan, ScanDirection direction); -typedef TupleTableSlot *(*ScanGetnextSlot_function) (HeapScanDesc scan, +typedef TupleTableSlot *(*ScanGetnextSlot_function) (TableScanDesc scan, ScanDirection direction, TupleTableSlot *slot); -typedef void (*ScanEnd_function) (HeapScanDesc scan); +typedef TableTuple(*ScanFetchTupleFromOffset_function) (TableScanDesc scan, + BlockNumber blkno, OffsetNumber offset); + +typedef void (*ScanEnd_function) (TableScanDesc scan); -typedef void (*ScanGetpage_function) (HeapScanDesc scan, BlockNumber page); -typedef void (*ScanRescan_function) (HeapScanDesc scan, ScanKey key, bool set_params, +typedef void (*ScanGetpage_function) (TableScanDesc scan, BlockNumber page); +typedef void (*ScanRescan_function) (TableScanDesc scan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode); -typedef void (*ScanUpdateSnapshot_function) (HeapScanDesc scan, Snapshot snapshot); +typedef void (*ScanUpdateSnapshot_function) (TableScanDesc scan, Snapshot snapshot); typedef bool (*HotSearchBuffer_function) (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, @@ -150,9 +157,12 @@ typedef struct TableAmRoutine /* Operations on relation scans */ ScanBegin_function scan_begin; + ScanGetParallelheapscandesc_function scan_get_parallelheapscandesc; + ScanGetHeappagescandesc_function scan_get_heappagescandesc; ScanSetlimits_function scansetlimits; ScanGetnext_function scan_getnext; ScanGetnextSlot_function scan_getnextslot; + ScanFetchTupleFromOffset_function scan_fetch_tuple_from_offset; ScanEnd_function scan_end; ScanGetpage_function scan_getpage; ScanRescan_function scan_rescan; diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h index f20c5f789b..6cab64df10 100644 --- a/src/include/catalog/index.h +++ b/src/include/catalog/index.h @@ -116,7 +116,7 @@ extern double IndexBuildHeapScan(Relation heapRelation, bool allow_sync, IndexBuildCallback callback, void *callback_state, - HeapScanDesc scan); + TableScanDesc scan); extern double IndexBuildHeapRangeScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, @@ -126,7 +126,7 @@ extern double IndexBuildHeapRangeScan(Relation heapRelation, BlockNumber end_blockno, IndexBuildCallback callback, void *callback_state, - HeapScanDesc scan); + TableScanDesc scan); extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 1c6778b3b8..023b7327ed 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1191,7 +1191,7 @@ typedef struct ScanState { PlanState ps; /* its first field is NodeTag */ Relation ss_currentRelation; - HeapScanDesc ss_currentScanDesc; + TableScanDesc ss_currentScanDesc; TupleTableSlot *ss_ScanTupleSlot; } ScanState; @@ -1212,6 +1212,7 @@ typedef struct SeqScanState typedef struct SampleScanState { ScanState ss; + HeapPageScanDesc pagescan; List *args; /* expr states for TABLESAMPLE params */ ExprState *repeatable; /* expr state for REPEATABLE expr */ /* use struct pointer to avoid including tsmapi.h here */ @@ -1438,6 +1439,7 @@ typedef struct ParallelBitmapHeapState typedef struct BitmapHeapScanState { ScanState ss; /* its first field is NodeTag */ + HeapPageScanDesc pagescan; ExprState *bitmapqualorig; TIDBitmap *tbm; TBMIterator *tbmiterator; -- 2.16.1.windows.4