From 617256d0d74fa9604febddaa14b43ee45a6d7407 Mon Sep 17 00:00:00 2001 From: Claudio Freire Date: Tue, 27 Feb 2018 12:51:46 -0300 Subject: [PATCH 4/4] Index vacuum: Vacuum FSM after each bulkdelete call If any pages have been deleted during bulkdelete, vacuum the FSM to expose those pages to concurrent activity. Try to avoid redundant FSM vacuum at vacuumcleanup. --- src/backend/access/nbtree/nbtree.c | 22 ++++++++++++++++++++-- src/backend/access/spgist/spgvacuum.c | 18 ++++++++++++++++-- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 8158508..d673b88 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -798,6 +798,12 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, cycleid = _bt_start_vacuum(rel); btvacuumscan(info, stats, callback, callback_state, cycleid); + + if (stats->pages_deleted > 0) + { + /* vacuum the FSM to expose deleted pages, if any */ + IndexFreeSpaceMapVacuum(info->index); + } } PG_END_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); _bt_end_vacuum(rel); @@ -813,6 +819,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteResult * btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) { + bool needs_fsm_vacuum; + /* No-op in ANALYZE ONLY mode */ if (info->analyze_only) return stats; @@ -825,15 +833,25 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) * * Since we aren't going to actually delete any leaf items, there's no * need to go through all the vacuum-cycle-ID pushups. + * + * If there was a btbulkdelete call, it will vacuum the FSM too if it + * deleted any pages, so we can skip our FSM vacuum in that case only. */ if (stats == NULL) { stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); btvacuumscan(info, stats, NULL, NULL, 0); + + needs_fsm_vacuum = true; } + else + needs_fsm_vacuum = (stats->pages_deleted == 0); - /* Finally, vacuum the FSM */ - IndexFreeSpaceMapVacuum(info->index); + if (needs_fsm_vacuum) + { + /* Finally, vacuum the FSM */ + IndexFreeSpaceMapVacuum(info->index); + } /* * It's quite possible for us to be fooled by concurrent page splits into diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 72839cb..e9ed3fb 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -898,6 +898,12 @@ spgbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, spgvacuumscan(&bds); + if (stats->pages_deleted > 0) + { + /* vacuum the FSM to expose deleted pages, if any */ + IndexFreeSpaceMapVacuum(info->index); + } + return stats; } @@ -918,6 +924,7 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) { Relation index = info->index; spgBulkDeleteState bds; + bool needs_fsm_vacuum; /* No-op in ANALYZE ONLY mode */ if (info->analyze_only) @@ -938,10 +945,17 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) bds.callback_state = NULL; spgvacuumscan(&bds); + + needs_fsm_vacuum = true; } + else + needs_fsm_vacuum = stats->pages_deleted == 0; - /* Finally, vacuum the FSM */ - IndexFreeSpaceMapVacuum(index); + if (needs_fsm_vacuum) + { + /* Finally, vacuum the FSM */ + IndexFreeSpaceMapVacuum(index); + } /* * It's quite possible for us to be fooled by concurrent tuple moves into -- 1.8.4.5