Skip to content

Commit dd72882

Browse files
committed
Fix locking problem in _hash_squeezebucket() / _hash_freeovflpage().
A bucket squeeze operation needs to lock each page of the bucket before releasing the prior page, but the previous coding fumbled the locking when freeing an overflow page during a bucket squeeze operation. Commit 6d46f47 introduced this bug. Amit Kapila, with help from Kuntal Ghosh and Dilip Kumar, after an initial trouble report by Jeff Janes. Reviewed by me. I also fixed a problem with a comment.
1 parent 668dbbe commit dd72882

File tree

2 files changed

+13
-30
lines changed

2 files changed

+13
-30
lines changed

src/backend/access/hash/hashovfl.c

Lines changed: 12 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -377,12 +377,11 @@ _hash_firstfreebit(uint32 map)
377377
* NB: caller must not hold lock on metapage, nor on page, that's next to
378378
* ovflbuf in the bucket chain. We don't acquire the lock on page that's
379379
* prior to ovflbuf in chain if it is same as wbuf because the caller already
380-
* has a lock on same. This function releases the lock on wbuf and caller
381-
* is responsible for releasing the pin on same.
380+
* has a lock on same.
382381
*/
383382
BlockNumber
384383
_hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
385-
bool wbuf_dirty, BufferAccessStrategy bstrategy)
384+
BufferAccessStrategy bstrategy)
386385
{
387386
HashMetaPage metap;
388387
Buffer metabuf;
@@ -447,24 +446,10 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
447446
Assert(prevopaque->hasho_bucket == bucket);
448447
prevopaque->hasho_nextblkno = nextblkno;
449448

449+
MarkBufferDirty(prevbuf);
450450
if (prevblkno != writeblkno)
451-
{
452-
MarkBufferDirty(prevbuf);
453451
_hash_relbuf(rel, prevbuf);
454-
}
455-
else
456-
{
457-
/* ensure to mark prevbuf as dirty */
458-
wbuf_dirty = true;
459-
}
460452
}
461-
462-
/* write and unlock the write buffer */
463-
if (wbuf_dirty)
464-
_hash_chgbufaccess(rel, wbuf, HASH_WRITE, HASH_NOLOCK);
465-
else
466-
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
467-
468453
if (BlockNumberIsValid(nextblkno))
469454
{
470455
Buffer nextbuf = _hash_getbuf_with_strategy(rel,
@@ -783,30 +768,28 @@ _hash_squeezebucket(Relation rel,
783768
* Tricky point here: if our read and write pages are adjacent in the
784769
* bucket chain, our write lock on wbuf will conflict with
785770
* _hash_freeovflpage's attempt to update the sibling links of the
786-
* removed page. In that case, we don't need to lock it again and we
787-
* always release the lock on wbuf in _hash_freeovflpage and then
788-
* retake it again here. This will not only simplify the code, but is
789-
* required to atomically log the changes which will be helpful when
790-
* we write WAL for hash indexes.
771+
* removed page. In that case, we don't need to lock it again.
791772
*/
792773
rblkno = ropaque->hasho_prevblkno;
793774
Assert(BlockNumberIsValid(rblkno));
794775

795776
/* free this overflow page (releases rbuf) */
796-
_hash_freeovflpage(rel, rbuf, wbuf, wbuf_dirty, bstrategy);
777+
_hash_freeovflpage(rel, rbuf, wbuf, bstrategy);
778+
779+
if (wbuf_dirty)
780+
MarkBufferDirty(wbuf);
797781

798782
/* are we freeing the page adjacent to wbuf? */
799783
if (rblkno == wblkno)
800784
{
801785
/* retain the pin on primary bucket page till end of bucket scan */
802-
if (wblkno != bucket_blkno)
803-
_hash_dropbuf(rel, wbuf);
786+
if (wblkno == bucket_blkno)
787+
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
788+
else
789+
_hash_relbuf(rel, wbuf);
804790
return;
805791
}
806792

807-
/* lock the overflow page being written, then get the previous one */
808-
_hash_chgbufaccess(rel, wbuf, HASH_NOLOCK, HASH_WRITE);
809-
810793
rbuf = _hash_getbuf_with_strategy(rel,
811794
rblkno,
812795
HASH_WRITE,

src/include/access/hash.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ extern OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
314314
/* hashovfl.c */
315315
extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin);
316316
extern BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
317-
bool wbuf_dirty, BufferAccessStrategy bstrategy);
317+
BufferAccessStrategy bstrategy);
318318
extern void _hash_initbitmap(Relation rel, HashMetaPage metap,
319319
BlockNumber blkno, ForkNumber forkNum);
320320
extern void _hash_squeezebucket(Relation rel,

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy