Skip to content

Commit cb97024

Browse files
committed
Move VM update code from lazy_scan_heap() to lazy_scan_prune().
Most of the output parameters of lazy_scan_prune() were being used to update the VM in lazy_scan_heap(). Moving that code into lazy_scan_prune() simplifies lazy_scan_heap() and requires less communication between the two. This change permits some further code simplification, but that is left for a separate commit. Melanie Plageman, reviewed by me. Discussion: http://postgr.es/m/CAAKRu_aM=OL85AOr-80wBsCr=vLVzhnaavqkVPRkFBtD0zsuLQ@mail.gmail.com
1 parent 0a157a4 commit cb97024

File tree

1 file changed

+115
-111
lines changed

1 file changed

+115
-111
lines changed

src/backend/access/heap/vacuumlazy.c

Lines changed: 115 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
249249
bool sharelock, Buffer vmbuffer);
250250
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
251251
BlockNumber blkno, Page page,
252+
Buffer vmbuffer, bool all_visible_according_to_vm,
252253
LVPagePruneState *prunestate);
253254
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
254255
BlockNumber blkno, Page page,
@@ -1032,117 +1033,9 @@ lazy_scan_heap(LVRelState *vacrel)
10321033
* tuple headers of remaining items with storage. It also determines
10331034
* if truncating this block is safe.
10341035
*/
1035-
lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
1036-
1037-
Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
1038-
1039-
/*
1040-
* Handle setting visibility map bit based on information from the VM
1041-
* (as of last lazy_scan_skip() call), and from prunestate
1042-
*/
1043-
if (!all_visible_according_to_vm && prunestate.all_visible)
1044-
{
1045-
uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
1046-
1047-
if (prunestate.all_frozen)
1048-
{
1049-
Assert(!TransactionIdIsValid(prunestate.visibility_cutoff_xid));
1050-
flags |= VISIBILITYMAP_ALL_FROZEN;
1051-
}
1052-
1053-
/*
1054-
* It should never be the case that the visibility map page is set
1055-
* while the page-level bit is clear, but the reverse is allowed
1056-
* (if checksums are not enabled). Regardless, set both bits so
1057-
* that we get back in sync.
1058-
*
1059-
* NB: If the heap page is all-visible but the VM bit is not set,
1060-
* we don't need to dirty the heap page. However, if checksums
1061-
* are enabled, we do need to make sure that the heap page is
1062-
* dirtied before passing it to visibilitymap_set(), because it
1063-
* may be logged. Given that this situation should only happen in
1064-
* rare cases after a crash, it is not worth optimizing.
1065-
*/
1066-
PageSetAllVisible(page);
1067-
MarkBufferDirty(buf);
1068-
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1069-
vmbuffer, prunestate.visibility_cutoff_xid,
1070-
flags);
1071-
}
1072-
1073-
/*
1074-
* As of PostgreSQL 9.2, the visibility map bit should never be set if
1075-
* the page-level bit is clear. However, it's possible that the bit
1076-
* got cleared after lazy_scan_skip() was called, so we must recheck
1077-
* with buffer lock before concluding that the VM is corrupt.
1078-
*/
1079-
else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1080-
visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1081-
{
1082-
elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1083-
vacrel->relname, blkno);
1084-
visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1085-
VISIBILITYMAP_VALID_BITS);
1086-
}
1087-
1088-
/*
1089-
* It's possible for the value returned by
1090-
* GetOldestNonRemovableTransactionId() to move backwards, so it's not
1091-
* wrong for us to see tuples that appear to not be visible to
1092-
* everyone yet, while PD_ALL_VISIBLE is already set. The real safe
1093-
* xmin value never moves backwards, but
1094-
* GetOldestNonRemovableTransactionId() is conservative and sometimes
1095-
* returns a value that's unnecessarily small, so if we see that
1096-
* contradiction it just means that the tuples that we think are not
1097-
* visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
1098-
* is correct.
1099-
*
1100-
* There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
1101-
* set, however.
1102-
*/
1103-
else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
1104-
{
1105-
elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1106-
vacrel->relname, blkno);
1107-
PageClearAllVisible(page);
1108-
MarkBufferDirty(buf);
1109-
visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1110-
VISIBILITYMAP_VALID_BITS);
1111-
}
1112-
1113-
/*
1114-
* If the all-visible page is all-frozen but not marked as such yet,
1115-
* mark it as all-frozen. Note that all_frozen is only valid if
1116-
* all_visible is true, so we must check both prunestate fields.
1117-
*/
1118-
else if (all_visible_according_to_vm && prunestate.all_visible &&
1119-
prunestate.all_frozen &&
1120-
!VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1121-
{
1122-
/*
1123-
* Avoid relying on all_visible_according_to_vm as a proxy for the
1124-
* page-level PD_ALL_VISIBLE bit being set, since it might have
1125-
* become stale -- even when all_visible is set in prunestate
1126-
*/
1127-
if (!PageIsAllVisible(page))
1128-
{
1129-
PageSetAllVisible(page);
1130-
MarkBufferDirty(buf);
1131-
}
1132-
1133-
/*
1134-
* Set the page all-frozen (and all-visible) in the VM.
1135-
*
1136-
* We can pass InvalidTransactionId as our visibility_cutoff_xid,
1137-
* since a snapshotConflictHorizon sufficient to make everything
1138-
* safe for REDO was logged when the page's tuples were frozen.
1139-
*/
1140-
Assert(!TransactionIdIsValid(prunestate.visibility_cutoff_xid));
1141-
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1142-
vmbuffer, InvalidTransactionId,
1143-
VISIBILITYMAP_ALL_VISIBLE |
1144-
VISIBILITYMAP_ALL_FROZEN);
1145-
}
1036+
lazy_scan_prune(vacrel, buf, blkno, page,
1037+
vmbuffer, all_visible_according_to_vm,
1038+
&prunestate);
11461039

11471040
/*
11481041
* Final steps for block: drop cleanup lock, record free space in the
@@ -1496,6 +1389,8 @@ lazy_scan_prune(LVRelState *vacrel,
14961389
Buffer buf,
14971390
BlockNumber blkno,
14981391
Page page,
1392+
Buffer vmbuffer,
1393+
bool all_visible_according_to_vm,
14991394
LVPagePruneState *prunestate)
15001395
{
15011396
Relation rel = vacrel->rel;
@@ -1880,6 +1775,115 @@ lazy_scan_prune(LVRelState *vacrel,
18801775
/* Can't truncate this page */
18811776
if (hastup)
18821777
vacrel->nonempty_pages = blkno + 1;
1778+
1779+
Assert(!prunestate->all_visible || !prunestate->has_lpdead_items);
1780+
1781+
/*
1782+
* Handle setting visibility map bit based on information from the VM (as
1783+
* of last lazy_scan_skip() call), and from prunestate
1784+
*/
1785+
if (!all_visible_according_to_vm && prunestate->all_visible)
1786+
{
1787+
uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
1788+
1789+
if (prunestate->all_frozen)
1790+
{
1791+
Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
1792+
flags |= VISIBILITYMAP_ALL_FROZEN;
1793+
}
1794+
1795+
/*
1796+
* It should never be the case that the visibility map page is set
1797+
* while the page-level bit is clear, but the reverse is allowed (if
1798+
* checksums are not enabled). Regardless, set both bits so that we
1799+
* get back in sync.
1800+
*
1801+
* NB: If the heap page is all-visible but the VM bit is not set, we
1802+
* don't need to dirty the heap page. However, if checksums are
1803+
* enabled, we do need to make sure that the heap page is dirtied
1804+
* before passing it to visibilitymap_set(), because it may be logged.
1805+
* Given that this situation should only happen in rare cases after a
1806+
* crash, it is not worth optimizing.
1807+
*/
1808+
PageSetAllVisible(page);
1809+
MarkBufferDirty(buf);
1810+
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1811+
vmbuffer, prunestate->visibility_cutoff_xid,
1812+
flags);
1813+
}
1814+
1815+
/*
1816+
* As of PostgreSQL 9.2, the visibility map bit should never be set if the
1817+
* page-level bit is clear. However, it's possible that the bit got
1818+
* cleared after lazy_scan_skip() was called, so we must recheck with
1819+
* buffer lock before concluding that the VM is corrupt.
1820+
*/
1821+
else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
1822+
visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
1823+
{
1824+
elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
1825+
vacrel->relname, blkno);
1826+
visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1827+
VISIBILITYMAP_VALID_BITS);
1828+
}
1829+
1830+
/*
1831+
* It's possible for the value returned by
1832+
* GetOldestNonRemovableTransactionId() to move backwards, so it's not
1833+
* wrong for us to see tuples that appear to not be visible to everyone
1834+
* yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
1835+
* never moves backwards, but GetOldestNonRemovableTransactionId() is
1836+
* conservative and sometimes returns a value that's unnecessarily small,
1837+
* so if we see that contradiction it just means that the tuples that we
1838+
* think are not visible to everyone yet actually are, and the
1839+
* PD_ALL_VISIBLE flag is correct.
1840+
*
1841+
* There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
1842+
* however.
1843+
*/
1844+
else if (prunestate->has_lpdead_items && PageIsAllVisible(page))
1845+
{
1846+
elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
1847+
vacrel->relname, blkno);
1848+
PageClearAllVisible(page);
1849+
MarkBufferDirty(buf);
1850+
visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
1851+
VISIBILITYMAP_VALID_BITS);
1852+
}
1853+
1854+
/*
1855+
* If the all-visible page is all-frozen but not marked as such yet, mark
1856+
* it as all-frozen. Note that all_frozen is only valid if all_visible is
1857+
* true, so we must check both prunestate fields.
1858+
*/
1859+
else if (all_visible_according_to_vm && prunestate->all_visible &&
1860+
prunestate->all_frozen &&
1861+
!VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
1862+
{
1863+
/*
1864+
* Avoid relying on all_visible_according_to_vm as a proxy for the
1865+
* page-level PD_ALL_VISIBLE bit being set, since it might have become
1866+
* stale -- even when all_visible is set in prunestate
1867+
*/
1868+
if (!PageIsAllVisible(page))
1869+
{
1870+
PageSetAllVisible(page);
1871+
MarkBufferDirty(buf);
1872+
}
1873+
1874+
/*
1875+
* Set the page all-frozen (and all-visible) in the VM.
1876+
*
1877+
* We can pass InvalidTransactionId as our visibility_cutoff_xid,
1878+
* since a snapshotConflictHorizon sufficient to make everything safe
1879+
* for REDO was logged when the page's tuples were frozen.
1880+
*/
1881+
Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
1882+
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
1883+
vmbuffer, InvalidTransactionId,
1884+
VISIBILITYMAP_ALL_VISIBLE |
1885+
VISIBILITYMAP_ALL_FROZEN);
1886+
}
18831887
}
18841888

18851889
/*

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy