Skip to content

Commit fcdda1e

Browse files
committed
Use ExtendBufferedRelTo() in {vm,fsm}_extend()
This uses ExtendBufferedRelTo(), introduced in 31966b1, to extend the visibilitymap and freespacemap to the size needed. It also happens to fix a warning introduced in 3d6a984, reported by Tom Lane. Discussion: https://postgr.es/m/20221029025420.eplyow6k7tgu6he3@awork3.anarazel.de Discussion: https://postgr.es/m/2194723.1680736788@sss.pgh.pa.us
1 parent bccd690 commit fcdda1e

File tree

2 files changed

+48
-115
lines changed

2 files changed

+48
-115
lines changed

src/backend/access/heap/visibilitymap.c

Lines changed: 25 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@
126126

127127
/* prototypes for internal routines */
128128
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
129-
static void vm_extend(Relation rel, BlockNumber vm_nblocks);
129+
static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
130130

131131

132132
/*
@@ -574,22 +574,29 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
574574
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
575575
}
576576

577-
/* Handle requests beyond EOF */
577+
/*
578+
* For reading we use ZERO_ON_ERROR mode, and initialize the page if
579+
* necessary. It's always safe to clear bits, so it's better to clear
580+
* corrupt pages than error out.
581+
*
582+
* We use the same path below to initialize pages when extending the
583+
* relation, as a concurrent extension can end up with vm_extend()
584+
* returning an already-initialized page.
585+
*/
578586
if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
579587
{
580588
if (extend)
581-
vm_extend(rel, blkno + 1);
589+
buf = vm_extend(rel, blkno + 1);
582590
else
583591
return InvalidBuffer;
584592
}
593+
else
594+
buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
595+
RBM_ZERO_ON_ERROR, NULL);
585596

586597
/*
587-
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
588-
* always safe to clear bits, so it's better to clear corrupt pages than
589-
* error out.
590-
*
591-
* The initialize-the-page part is trickier than it looks, because of the
592-
* possibility of multiple backends doing this concurrently, and our
598+
* Initializing the page when needed is trickier than it looks, because of
599+
* the possibility of multiple backends doing this concurrently, and our
593600
* desire to not uselessly take the buffer lock in the normal path where
594601
* the page is OK. We must take the lock to initialize the page, so
595602
* recheck page newness after we have the lock, in case someone else
@@ -602,8 +609,6 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
602609
* long as it doesn't depend on the page header having correct contents.
603610
* Current usage is safe because PageGetContents() does not require that.
604611
*/
605-
buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
606-
RBM_ZERO_ON_ERROR, NULL);
607612
if (PageIsNew(BufferGetPage(buf)))
608613
{
609614
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -618,51 +623,16 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
618623
* Ensure that the visibility map fork is at least vm_nblocks long, extending
619624
* it if necessary with zeroed pages.
620625
*/
621-
static void
626+
static Buffer
622627
vm_extend(Relation rel, BlockNumber vm_nblocks)
623628
{
624-
BlockNumber vm_nblocks_now;
625-
PGAlignedBlock pg = {0};
626-
SMgrRelation reln;
629+
Buffer buf;
627630

628-
/*
629-
* We use the relation extension lock to lock out other backends trying to
630-
* extend the visibility map at the same time. It also locks out extension
631-
* of the main fork, unnecessarily, but extending the visibility map
632-
* happens seldom enough that it doesn't seem worthwhile to have a
633-
* separate lock tag type for it.
634-
*
635-
* Note that another backend might have extended or created the relation
636-
* by the time we get the lock.
637-
*/
638-
LockRelationForExtension(rel, ExclusiveLock);
639-
640-
/*
641-
* Caution: re-using this smgr pointer could fail if the relcache entry
642-
* gets closed. It's safe as long as we only do smgr-level operations
643-
* between here and the last use of the pointer.
644-
*/
645-
reln = RelationGetSmgr(rel);
646-
647-
/*
648-
* Create the file first if it doesn't exist. If smgr_vm_nblocks is
649-
* positive then it must exist, no need for an smgrexists call.
650-
*/
651-
if ((reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == 0 ||
652-
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber) &&
653-
!smgrexists(reln, VISIBILITYMAP_FORKNUM))
654-
smgrcreate(reln, VISIBILITYMAP_FORKNUM, false);
655-
656-
/* Invalidate cache so that smgrnblocks() asks the kernel. */
657-
reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = InvalidBlockNumber;
658-
vm_nblocks_now = smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
659-
660-
/* Now extend the file */
661-
while (vm_nblocks_now < vm_nblocks)
662-
{
663-
smgrextend(reln, VISIBILITYMAP_FORKNUM, vm_nblocks_now, pg.data, false);
664-
vm_nblocks_now++;
665-
}
631+
buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
632+
EB_CREATE_FORK_IF_NEEDED |
633+
EB_CLEAR_SIZE_CACHE,
634+
vm_nblocks,
635+
RBM_ZERO_ON_ERROR);
666636

667637
/*
668638
* Send a shared-inval message to force other backends to close any smgr
@@ -671,7 +641,7 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
671641
* to keep checking for creation or extension of the file, which happens
672642
* infrequently.
673643
*/
674-
CacheInvalidateSmgr(reln->smgr_rlocator);
644+
CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
675645

676-
UnlockRelationForExtension(rel, ExclusiveLock);
646+
return buf;
677647
}

src/backend/storage/freespace/freespace.c

Lines changed: 23 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static BlockNumber fsm_get_heap_blk(FSMAddress addr, uint16 slot);
9898
static BlockNumber fsm_logical_to_physical(FSMAddress addr);
9999

100100
static Buffer fsm_readbuf(Relation rel, FSMAddress addr, bool extend);
101-
static void fsm_extend(Relation rel, BlockNumber fsm_nblocks);
101+
static Buffer fsm_extend(Relation rel, BlockNumber fsm_nblocks);
102102

103103
/* functions to convert amount of free space to a FSM category */
104104
static uint8 fsm_space_avail_to_cat(Size avail);
@@ -558,24 +558,30 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
558558
reln->smgr_cached_nblocks[FSM_FORKNUM] = 0;
559559
}
560560

561-
/* Handle requests beyond EOF */
561+
/*
562+
* For reading we use ZERO_ON_ERROR mode, and initialize the page if
563+
* necessary. The FSM information is not accurate anyway, so it's better
564+
* to clear corrupt pages than error out. Since the FSM changes are not
565+
* WAL-logged, the so-called torn page problem on crash can lead to pages
566+
* with corrupt headers, for example.
567+
*
568+
* We use the same path below to initialize pages when extending the
569+
* relation, as a concurrent extension can end up with vm_extend()
570+
* returning an already-initialized page.
571+
*/
562572
if (blkno >= reln->smgr_cached_nblocks[FSM_FORKNUM])
563573
{
564574
if (extend)
565-
fsm_extend(rel, blkno + 1);
575+
buf = fsm_extend(rel, blkno + 1);
566576
else
567577
return InvalidBuffer;
568578
}
579+
else
580+
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
569581

570582
/*
571-
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. The FSM
572-
* information is not accurate anyway, so it's better to clear corrupt
573-
* pages than error out. Since the FSM changes are not WAL-logged, the
574-
* so-called torn page problem on crash can lead to pages with corrupt
575-
* headers, for example.
576-
*
577-
* The initialize-the-page part is trickier than it looks, because of the
578-
* possibility of multiple backends doing this concurrently, and our
583+
* Initializing the page when needed is trickier than it looks, because of
584+
* the possibility of multiple backends doing this concurrently, and our
579585
* desire to not uselessly take the buffer lock in the normal path where
580586
* the page is OK. We must take the lock to initialize the page, so
581587
* recheck page newness after we have the lock, in case someone else
@@ -588,7 +594,6 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
588594
* long as it doesn't depend on the page header having correct contents.
589595
* Current usage is safe because PageGetContents() does not require that.
590596
*/
591-
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
592597
if (PageIsNew(BufferGetPage(buf)))
593598
{
594599
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -604,56 +609,14 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
604609
* it if necessary with empty pages. And by empty, I mean pages filled
605610
* with zeros, meaning there's no free space.
606611
*/
607-
static void
612+
static Buffer
608613
fsm_extend(Relation rel, BlockNumber fsm_nblocks)
609614
{
610-
BlockNumber fsm_nblocks_now;
611-
PGAlignedBlock pg = {0};
612-
SMgrRelation reln;
613-
614-
615-
/*
616-
* We use the relation extension lock to lock out other backends trying to
617-
* extend the FSM at the same time. It also locks out extension of the
618-
* main fork, unnecessarily, but extending the FSM happens seldom enough
619-
* that it doesn't seem worthwhile to have a separate lock tag type for
620-
* it.
621-
*
622-
* Note that another backend might have extended or created the relation
623-
* by the time we get the lock.
624-
*/
625-
LockRelationForExtension(rel, ExclusiveLock);
626-
627-
/*
628-
* Caution: re-using this smgr pointer could fail if the relcache entry
629-
* gets closed. It's safe as long as we only do smgr-level operations
630-
* between here and the last use of the pointer.
631-
*/
632-
reln = RelationGetSmgr(rel);
633-
634-
/*
635-
* Create the FSM file first if it doesn't exist. If
636-
* smgr_cached_nblocks[FSM_FORKNUM] is positive then it must exist, no
637-
* need for an smgrexists call.
638-
*/
639-
if ((reln->smgr_cached_nblocks[FSM_FORKNUM] == 0 ||
640-
reln->smgr_cached_nblocks[FSM_FORKNUM] == InvalidBlockNumber) &&
641-
!smgrexists(reln, FSM_FORKNUM))
642-
smgrcreate(reln, FSM_FORKNUM, false);
643-
644-
/* Invalidate cache so that smgrnblocks() asks the kernel. */
645-
reln->smgr_cached_nblocks[FSM_FORKNUM] = InvalidBlockNumber;
646-
fsm_nblocks_now = smgrnblocks(reln, FSM_FORKNUM);
647-
648-
/* Extend as needed. */
649-
while (fsm_nblocks_now < fsm_nblocks)
650-
{
651-
smgrextend(reln, FSM_FORKNUM, fsm_nblocks_now,
652-
pg.data, false);
653-
fsm_nblocks_now++;
654-
}
655-
656-
UnlockRelationForExtension(rel, ExclusiveLock);
615+
return ExtendBufferedRelTo(EB_REL(rel), FSM_FORKNUM, NULL,
616+
EB_CREATE_FORK_IF_NEEDED |
617+
EB_CLEAR_SIZE_CACHE,
618+
fsm_nblocks,
619+
RBM_ZERO_ON_ERROR);
657620
}
658621

659622
/*

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy