Skip to content

Commit 11470f5

Browse files
committed
Allow locking updated tuples in tuple_update() and tuple_delete()
Currently, in read committed transaction isolation mode (default), we have the following sequence of actions when tuple_update()/tuple_delete() finds the tuple updated by concurrent transaction. 1. Attempt to update/delete tuple with tuple_update()/tuple_delete(), which returns TM_Updated. 2. Lock tuple with tuple_lock(). 3. Re-evaluate plan qual (recheck if we still need to update/delete and calculate the new tuple for update). 4. Second attempt to update/delete tuple with tuple_update()/tuple_delete(). This attempt should be successful, since the tuple was previously locked. This patch eliminates step 2 by taking the lock during first tuple_update()/tuple_delete() call. Heap table access method saves some efforts by checking the updated tuple once instead of twice. Future undo-based table access methods, which will start from the latest row version, can immediately place a lock there. The code in nodeModifyTable.c is simplified by removing the nested switch/case. Discussion: https://postgr.es/m/CAPpHfdua-YFw3XTprfutzGp28xXLigFtzNbuFY8yPhqeq6X5kg%40mail.gmail.com Reviewed-by: Aleksander Alekseev, Pavel Borisov, Vignesh C, Mason Sharp Reviewed-by: Andres Freund, Chris Travers
1 parent 764da77 commit 11470f5

File tree

6 files changed

+285
-186
lines changed

6 files changed

+285
-186
lines changed

src/backend/access/heap/heapam_handler.c

Lines changed: 100 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,12 @@
4545
#include "utils/builtins.h"
4646
#include "utils/rel.h"
4747

48+
static TM_Result heapam_tuple_lock_internal(Relation relation, ItemPointer tid,
49+
Snapshot snapshot, TupleTableSlot *slot,
50+
CommandId cid, LockTupleMode mode,
51+
LockWaitPolicy wait_policy, uint8 flags,
52+
TM_FailureData *tmfd, bool updated);
53+
4854
static void reform_and_rewrite_tuple(HeapTuple tuple,
4955
Relation OldHeap, Relation NewHeap,
5056
Datum *values, bool *isnull, RewriteState rwstate);
@@ -299,22 +305,55 @@ heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
299305
static TM_Result
300306
heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid,
301307
Snapshot snapshot, Snapshot crosscheck, bool wait,
302-
TM_FailureData *tmfd, bool changingPart)
308+
TM_FailureData *tmfd, bool changingPart,
309+
LazyTupleTableSlot *lockedSlot)
303310
{
311+
TM_Result result;
312+
304313
/*
305314
* Currently Deleting of index tuples are handled at vacuum, in case if
306315
* the storage itself is cleaning the dead tuples by itself, it is the
307316
* time to call the index tuple deletion also.
308317
*/
309-
return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart);
318+
result = heap_delete(relation, tid, cid, crosscheck, wait,
319+
tmfd, changingPart);
320+
321+
/*
322+
* If the tuple has been concurrently updated, then get the lock on it.
323+
* (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
324+
* lock held retry of delete should succeed even if there are more
325+
* concurrent update attempts.
326+
*/
327+
if (result == TM_Updated && lockedSlot)
328+
{
329+
TupleTableSlot *evalSlot;
330+
331+
Assert(wait);
332+
333+
evalSlot = LAZY_TTS_EVAL(lockedSlot);
334+
result = heapam_tuple_lock_internal(relation, tid, snapshot,
335+
evalSlot, cid, LockTupleExclusive,
336+
LockWaitBlock,
337+
TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
338+
tmfd, true);
339+
340+
if (result == TM_Ok)
341+
{
342+
tmfd->traversed = true;
343+
return TM_Updated;
344+
}
345+
}
346+
347+
return result;
310348
}
311349

312350

313351
static TM_Result
314352
heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
315353
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
316354
bool wait, TM_FailureData *tmfd,
317-
LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
355+
LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes,
356+
LazyTupleTableSlot *lockedSlot)
318357
{
319358
bool shouldFree = true;
320359
HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
@@ -352,6 +391,32 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
352391
if (shouldFree)
353392
pfree(tuple);
354393

394+
/*
395+
* If the tuple has been concurrently updated, then get the lock on it.
396+
* (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
397+
* lock held retry of update should succeed even if there are more
398+
* concurrent update attempts.
399+
*/
400+
if (result == TM_Updated && lockedSlot)
401+
{
402+
TupleTableSlot *evalSlot;
403+
404+
Assert(wait);
405+
406+
evalSlot = LAZY_TTS_EVAL(lockedSlot);
407+
result = heapam_tuple_lock_internal(relation, otid, snapshot,
408+
evalSlot, cid, *lockmode,
409+
LockWaitBlock,
410+
TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
411+
tmfd, true);
412+
413+
if (result == TM_Ok)
414+
{
415+
tmfd->traversed = true;
416+
return TM_Updated;
417+
}
418+
}
419+
355420
return result;
356421
}
357422

@@ -360,10 +425,26 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
360425
TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
361426
LockWaitPolicy wait_policy, uint8 flags,
362427
TM_FailureData *tmfd)
428+
{
429+
return heapam_tuple_lock_internal(relation, tid, snapshot, slot, cid,
430+
mode, wait_policy, flags, tmfd, false);
431+
}
432+
433+
/*
434+
* This routine does the work for heapam_tuple_lock(), but also support
435+
* `updated` argument to re-use the work done by heapam_tuple_update() or
436+
* heapam_tuple_delete() on figuring out that tuple was concurrently updated.
437+
*/
438+
static TM_Result
439+
heapam_tuple_lock_internal(Relation relation, ItemPointer tid,
440+
Snapshot snapshot, TupleTableSlot *slot,
441+
CommandId cid, LockTupleMode mode,
442+
LockWaitPolicy wait_policy, uint8 flags,
443+
TM_FailureData *tmfd, bool updated)
363444
{
364445
BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
365446
TM_Result result;
366-
Buffer buffer;
447+
Buffer buffer = InvalidBuffer;
367448
HeapTuple tuple = &bslot->base.tupdata;
368449
bool follow_updates;
369450

@@ -374,16 +455,26 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
374455

375456
tuple_lock_retry:
376457
tuple->t_self = *tid;
377-
result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
378-
follow_updates, &buffer, tmfd);
458+
if (!updated)
459+
result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy,
460+
follow_updates, &buffer, tmfd);
461+
else
462+
result = TM_Updated;
379463

380464
if (result == TM_Updated &&
381465
(flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION))
382466
{
383-
/* Should not encounter speculative tuple on recheck */
384-
Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
467+
if (!updated)
468+
{
469+
/* Should not encounter speculative tuple on recheck */
470+
Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data));
385471

386-
ReleaseBuffer(buffer);
472+
ReleaseBuffer(buffer);
473+
}
474+
else
475+
{
476+
updated = false;
477+
}
387478

388479
if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self))
389480
{

src/backend/access/table/tableam.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,8 @@ simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
306306
GetCurrentCommandId(true),
307307
snapshot, InvalidSnapshot,
308308
true /* wait for commit */ ,
309-
&tmfd, false /* changingPart */ );
309+
&tmfd, false /* changingPart */ ,
310+
NULL);
310311

311312
switch (result)
312313
{
@@ -355,7 +356,8 @@ simple_table_tuple_update(Relation rel, ItemPointer otid,
355356
GetCurrentCommandId(true),
356357
snapshot, InvalidSnapshot,
357358
true /* wait for commit */ ,
358-
&tmfd, &lockmode, update_indexes);
359+
&tmfd, &lockmode, update_indexes,
360+
NULL);
359361

360362
switch (result)
361363
{

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy