Skip to content

Commit 33d3ad4

Browse files
committed
Code review for patch to avoid second scan when vacuuming index-less
table: avoid invoking LockBufferForCleanup without need, put out the same log message we would have before, minor code beautification.
1 parent ea21eea commit 33d3ad4

File tree

1 file changed

+53
-32
lines changed

1 file changed

+53
-32
lines changed

src/backend/commands/vacuumlazy.c

Lines changed: 53 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,6 @@
1616
* perform a pass of index cleanup and page compaction, then resume the heap
1717
* scan with an empty TID array.
1818
*
19-
* As a special exception if we're processing a table with no indexes we can
20-
* vacuum each page as we go so we don't need to allocate more space than
21-
* enough to hold as many heap tuples fit on one page.
22-
*
2319
* We can limit the storage for page free space to MaxFSMPages entries,
2420
* since that's the most the free space map will be willing to remember
2521
* anyway. If the relation has fewer than that many pages with free space,
@@ -29,13 +25,18 @@
2925
* have more than MaxFSMPages entries in all. The surviving page entries
3026
* are passed to the free space map at conclusion of the scan.
3127
*
28+
* If we're processing a table with no indexes, we can just vacuum each page
29+
* as we go; there's no need to save up multiple tuples to minimize the number
30+
* of index scans performed. So we don't use maintenance_work_mem memory for
31+
* the TID array, just enough to hold as many heap tuples as fit on one page.
32+
*
3233
*
3334
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
3435
* Portions Copyright (c) 1994, Regents of the University of California
3536
*
3637
*
3738
* IDENTIFICATION
38-
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.77 2006/09/04 21:40:23 momjian Exp $
39+
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.78 2006/09/13 17:47:08 tgl Exp $
3940
*
4041
*-------------------------------------------------------------------------
4142
*/
@@ -68,6 +69,8 @@
6869

6970
typedef struct LVRelStats
7071
{
72+
/* hasindex = true means two-pass strategy; false means one-pass */
73+
bool hasindex;
7174
/* Overall statistics about rel */
7275
BlockNumber rel_pages;
7376
double rel_tuples;
@@ -110,7 +113,7 @@ static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
110113
TransactionId OldestXmin);
111114
static BlockNumber count_nondeletable_pages(Relation onerel,
112115
LVRelStats *vacrelstats, TransactionId OldestXmin);
113-
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks, unsigned nindexes);
116+
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
114117
static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
115118
ItemPointer itemptr);
116119
static void lazy_record_free_space(LVRelStats *vacrelstats,
@@ -137,7 +140,6 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
137140
LVRelStats *vacrelstats;
138141
Relation *Irel;
139142
int nindexes;
140-
bool hasindex;
141143
BlockNumber possibly_freeable;
142144
TransactionId OldestXmin,
143145
FreezeLimit;
@@ -169,7 +171,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
169171

170172
/* Open all indexes of the relation */
171173
vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
172-
hasindex = (nindexes > 0);
174+
vacrelstats->hasindex = (nindexes > 0);
173175

174176
/* Do the vacuuming */
175177
lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, FreezeLimit, OldestXmin);
@@ -195,7 +197,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
195197
vac_update_relstats(RelationGetRelid(onerel),
196198
vacrelstats->rel_pages,
197199
vacrelstats->rel_tuples,
198-
hasindex,
200+
vacrelstats->hasindex,
199201
vacrelstats->minxid, OldestXmin);
200202

201203
/* report results to the stats collector, too */
@@ -210,11 +212,13 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
210212
* This routine sets commit status bits, builds lists of dead tuples
211213
* and pages with free space, and calculates statistics on the number
212214
* of live tuples in the heap. When done, or when we run low on space
213-
* for dead-tuple TIDs, or after every page if the table has no indexes
214-
* invoke vacuuming of indexes and heap.
215+
* for dead-tuple TIDs, invoke vacuuming of indexes and heap.
215216
*
216217
* It also updates the minimum Xid found anywhere on the table in
217218
* vacrelstats->minxid, for later storing it in pg_class.relminxid.
219+
*
220+
* If there are no indexes then we just vacuum each dirty page as we
221+
* process it, since there's no point in gathering many tuples.
218222
*/
219223
static void
220224
lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
@@ -225,7 +229,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
225229
blkno;
226230
HeapTupleData tuple;
227231
char *relname;
228-
BlockNumber empty_pages;
232+
BlockNumber empty_pages,
233+
vacuumed_pages;
229234
double num_tuples,
230235
tups_vacuumed,
231236
nkeep,
@@ -242,7 +247,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
242247
get_namespace_name(RelationGetNamespace(onerel)),
243248
relname)));
244249

245-
empty_pages = 0;
250+
empty_pages = vacuumed_pages = 0;
246251
num_tuples = tups_vacuumed = nkeep = nunused = 0;
247252

248253
indstats = (IndexBulkDeleteResult **)
@@ -252,7 +257,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
252257
vacrelstats->rel_pages = nblocks;
253258
vacrelstats->nonempty_pages = 0;
254259

255-
lazy_space_alloc(vacrelstats, nblocks, nindexes);
260+
lazy_space_alloc(vacrelstats, nblocks);
256261

257262
for (blkno = 0; blkno < nblocks; blkno++)
258263
{
@@ -287,14 +292,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
287292

288293
buf = ReadBuffer(onerel, blkno);
289294

290-
/* In this phase we only need shared access to the buffer unless we're
291-
* going to do the vacuuming now which we do if there are no indexes
292-
*/
293-
294-
if (nindexes)
295-
LockBuffer(buf, BUFFER_LOCK_SHARE);
296-
else
297-
LockBufferForCleanup(buf);
295+
/* In this phase we only need shared access to the buffer */
296+
LockBuffer(buf, BUFFER_LOCK_SHARE);
298297

299298
page = BufferGetPage(buf);
300299

@@ -451,22 +450,34 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
451450
}
452451
} /* scan along page */
453452

453+
/*
454+
* If there are no indexes then we can vacuum the page right now
455+
* instead of doing a second scan.
456+
*/
457+
if (nindexes == 0 &&
458+
vacrelstats->num_dead_tuples > 0)
459+
{
460+
/* Trade in buffer share lock for super-exclusive lock */
461+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
462+
LockBufferForCleanup(buf);
463+
/* Remove tuples from heap */
464+
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
465+
/* Forget the now-vacuumed tuples, and press on */
466+
vacrelstats->num_dead_tuples = 0;
467+
vacuumed_pages++;
468+
}
469+
454470
/*
455471
* If we remembered any tuples for deletion, then the page will be
456472
* visited again by lazy_vacuum_heap, which will compute and record
457473
* its post-compaction free space. If not, then we're done with this
458-
* page, so remember its free space as-is.
474+
* page, so remember its free space as-is. (This path will always
475+
* be taken if there are no indexes.)
459476
*/
460477
if (vacrelstats->num_dead_tuples == prev_dead_count)
461478
{
462479
lazy_record_free_space(vacrelstats, blkno,
463480
PageGetFreeSpace(page));
464-
} else if (!nindexes) {
465-
/* If there are no indexes we can vacuum the page right now instead
466-
* of doing a second scan */
467-
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
468-
lazy_record_free_space(vacrelstats, blkno, PageGetFreeSpace(BufferGetPage(buf)));
469-
vacrelstats->num_dead_tuples = 0;
470481
}
471482

472483
/* Remember the location of the last page with nonremovable tuples */
@@ -499,6 +510,13 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
499510
for (i = 0; i < nindexes; i++)
500511
lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
501512

513+
/* If no indexes, make log report that lazy_vacuum_heap would've made */
514+
if (vacuumed_pages)
515+
ereport(elevel,
516+
(errmsg("\"%s\": removed %.0f row versions in %u pages",
517+
RelationGetRelationName(onerel),
518+
tups_vacuumed, vacuumed_pages)));
519+
502520
ereport(elevel,
503521
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages",
504522
RelationGetRelationName(onerel),
@@ -908,18 +926,21 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats,
908926
* See the comments at the head of this file for rationale.
909927
*/
910928
static void
911-
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks, unsigned nindexes)
929+
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
912930
{
913931
long maxtuples;
914932
int maxpages;
915933

916-
if (nindexes) {
934+
if (vacrelstats->hasindex)
935+
{
917936
maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
918937
maxtuples = Min(maxtuples, INT_MAX);
919938
maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
920939
/* stay sane if small maintenance_work_mem */
921940
maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
922-
} else {
941+
}
942+
else
943+
{
923944
maxtuples = MaxHeapTuplesPerPage;
924945
}
925946

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy