Skip to content

Commit 02b5940

Browse files
Consolidate nbtree VACUUM metapage routines.
Simplify _bt_vacuum_needs_cleanup() functions's signature (it only needs a single 'rel' argument now), and move it next to its sibling function in nbtpage.c. I believe that _bt_vacuum_needs_cleanup() was originally located in nbtree.c due to an include dependency issue. That's no longer an issue. Follow-up to commit 9f3665f.
1 parent ac44595 commit 02b5940

File tree

3 files changed

+61
-69
lines changed

3 files changed

+61
-69
lines changed

src/backend/access/nbtree/nbtpage.c

Lines changed: 59 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -168,15 +168,67 @@ _bt_getmeta(Relation rel, Buffer metabuf)
168168
}
169169

170170
/*
171-
* _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup().
171+
* _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
172172
*
173-
* This routine is called at the end of each VACUUM's btvacuumcleanup()
174-
* call. Its purpose is to maintain the metapage fields that are used by
175-
* _bt_vacuum_needs_cleanup() to decide whether or not a btvacuumscan()
176-
* call should go ahead for an entire VACUUM operation.
173+
* Called by btvacuumcleanup when btbulkdelete was never called because no
174+
* index tuples needed to be deleted.
175+
*/
176+
bool
177+
_bt_vacuum_needs_cleanup(Relation rel)
178+
{
179+
Buffer metabuf;
180+
Page metapg;
181+
BTMetaPageData *metad;
182+
uint32 btm_version;
183+
BlockNumber prev_num_delpages;
184+
185+
/*
186+
* Copy details from metapage to local variables quickly.
187+
*
188+
* Note that we deliberately avoid using cached version of metapage here.
189+
*/
190+
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
191+
metapg = BufferGetPage(metabuf);
192+
metad = BTPageGetMeta(metapg);
193+
btm_version = metad->btm_version;
194+
195+
if (btm_version < BTREE_NOVAC_VERSION)
196+
{
197+
/*
198+
* Metapage needs to be dynamically upgraded to store fields that are
199+
* only present when btm_version >= BTREE_NOVAC_VERSION
200+
*/
201+
_bt_relbuf(rel, metabuf);
202+
return true;
203+
}
204+
205+
prev_num_delpages = metad->btm_last_cleanup_num_delpages;
206+
_bt_relbuf(rel, metabuf);
207+
208+
/*
209+
* Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
210+
* total size of the index. We can reasonably expect (though are not
211+
* guaranteed) to be able to recycle this many pages if we decide to do a
212+
* btvacuumscan call during the ongoing btvacuumcleanup.
213+
*
214+
* Our approach won't reliably avoid "wasted" cleanup-only btvacuumscan
215+
* calls. That is, we can end up scanning the entire index without ever
216+
* placing even 1 of the prev_num_delpages pages in the free space map, at
217+
* least in certain narrow cases (see nbtree/README section on recycling
218+
* deleted pages for details). This rarely comes up in practice.
219+
*/
220+
if (prev_num_delpages > 0 &&
221+
prev_num_delpages > RelationGetNumberOfBlocks(rel) / 20)
222+
return true;
223+
224+
return false;
225+
}
226+
227+
/*
228+
* _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup.
177229
*
178-
* See btvacuumcleanup() and _bt_vacuum_needs_cleanup() for the
179-
* definition of num_delpages.
230+
* Called at the end of btvacuumcleanup, when num_delpages value has been
231+
* finalized.
180232
*/
181233
void
182234
_bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)

src/backend/access/nbtree/nbtree.c

Lines changed: 1 addition & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -773,67 +773,6 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan)
773773
SpinLockRelease(&btscan->btps_mutex);
774774
}
775775

776-
/*
777-
* _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
778-
*
779-
* Called by btvacuumcleanup when btbulkdelete was never called because no
780-
* tuples needed to be deleted by VACUUM.
781-
*
782-
* When we return false, VACUUM can even skip the cleanup-only call to
783-
* btvacuumscan (i.e. there will be no btvacuumscan call for this index at
784-
* all). Otherwise, a cleanup-only btvacuumscan call is required.
785-
*/
786-
static bool
787-
_bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
788-
{
789-
Buffer metabuf;
790-
Page metapg;
791-
BTMetaPageData *metad;
792-
uint32 btm_version;
793-
BlockNumber prev_num_delpages;
794-
795-
/*
796-
* Copy details from metapage to local variables quickly.
797-
*
798-
* Note that we deliberately avoid using cached version of metapage here.
799-
*/
800-
metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ);
801-
metapg = BufferGetPage(metabuf);
802-
metad = BTPageGetMeta(metapg);
803-
btm_version = metad->btm_version;
804-
805-
if (btm_version < BTREE_NOVAC_VERSION)
806-
{
807-
/*
808-
* Metapage needs to be dynamically upgraded to store fields that are
809-
* only present when btm_version >= BTREE_NOVAC_VERSION
810-
*/
811-
_bt_relbuf(info->index, metabuf);
812-
return true;
813-
}
814-
815-
prev_num_delpages = metad->btm_last_cleanup_num_delpages;
816-
_bt_relbuf(info->index, metabuf);
817-
818-
/*
819-
* Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
820-
* total size of the index. We can reasonably expect (though are not
821-
* guaranteed) to be able to recycle this many pages if we decide to do a
822-
* btvacuumscan call during the ongoing btvacuumcleanup.
823-
*
824-
* Our approach won't reliably avoid "wasted" cleanup-only btvacuumscan
825-
* calls. That is, we can end up scanning the entire index without ever
826-
* placing even 1 of the prev_num_delpages pages in the free space map, at
827-
* least in certain narrow cases (see nbtree/README section on recycling
828-
* deleted pages for details). This rarely comes up in practice.
829-
*/
830-
if (prev_num_delpages > 0 &&
831-
prev_num_delpages > RelationGetNumberOfBlocks(info->index) / 20)
832-
return true;
833-
834-
return false;
835-
}
836-
837776
/*
838777
* Bulk deletion of all index entries pointing to a set of heap tuples.
839778
* The set of target tuples is specified via a callback routine that tells
@@ -894,7 +833,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
894833
if (stats == NULL)
895834
{
896835
/* Check if VACUUM operation can entirely avoid btvacuumscan() call */
897-
if (!_bt_vacuum_needs_cleanup(info))
836+
if (!_bt_vacuum_needs_cleanup(info->index))
898837
return NULL;
899838

900839
/*

src/include/access/nbtree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1170,6 +1170,7 @@ extern OffsetNumber _bt_findsplitloc(Relation rel, Page origpage,
11701170
*/
11711171
extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
11721172
bool allequalimage);
1173+
extern bool _bt_vacuum_needs_cleanup(Relation rel);
11731174
extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages);
11741175
extern void _bt_upgrademetapage(Page page);
11751176
extern Buffer _bt_getroot(Relation rel, int access);

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy