Skip to content

Commit a034f8b

Browse files
nbtree: Rename nbtinsert.c variables for consistency.
Stop naming special area/opaque pointer variables 'lpageop' in contexts where it doesn't make sense. This is a holdover from a time when logic that performs tasks that are now spread across _bt_insertonpg(), _bt_findinsertloc(), and _bt_split() was more centralized. 'lpageop' denotes "left page", which doesn't make sense outside of contexts in which there isn't also a right page. Also acquire page flag variables up front within _bt_insertonpg(). This makes it closer to _bt_split() following refactoring commit bc3087b. This allows the page split and retail insert paths to both make use of the same variables.
1 parent 9653f24 commit a034f8b

File tree

1 file changed

+62
-59
lines changed

1 file changed

+62
-59
lines changed

src/backend/access/nbtree/nbtinsert.c

Lines changed: 62 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ static Buffer _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf,
5454
IndexTuple newitem, IndexTuple orignewitem,
5555
IndexTuple nposting, uint16 postingoff);
5656
static void _bt_insert_parent(Relation rel, Buffer buf, Buffer rbuf,
57-
BTStack stack, bool is_root, bool is_only);
57+
BTStack stack, bool isroot, bool isonly);
5858
static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
5959
static inline bool _bt_pgaddtup(Page page, Size itemsize, IndexTuple itup,
6060
OffsetNumber itup_off, bool newfirstdataitem);
@@ -306,11 +306,11 @@ _bt_search_insert(Relation rel, BTInsertState insertstate)
306306
if (_bt_conditionallockbuf(rel, insertstate->buf))
307307
{
308308
Page page;
309-
BTPageOpaque lpageop;
309+
BTPageOpaque opaque;
310310

311311
_bt_checkpage(rel, insertstate->buf);
312312
page = BufferGetPage(insertstate->buf);
313-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
313+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
314314

315315
/*
316316
* Check if the page is still the rightmost leaf page and has
@@ -320,9 +320,9 @@ _bt_search_insert(Relation rel, BTInsertState insertstate)
320320
* scantid to be unset when our caller is a checkingunique
321321
* inserter.)
322322
*/
323-
if (P_RIGHTMOST(lpageop) &&
324-
P_ISLEAF(lpageop) &&
325-
!P_IGNORE(lpageop) &&
323+
if (P_RIGHTMOST(opaque) &&
324+
P_ISLEAF(opaque) &&
325+
!P_IGNORE(opaque) &&
326326
PageGetFreeSpace(page) > insertstate->itemsz &&
327327
PageGetMaxOffsetNumber(page) >= P_HIKEY &&
328328
_bt_compare(rel, insertstate->itup_key, page, P_HIKEY) > 0)
@@ -795,17 +795,17 @@ _bt_findinsertloc(Relation rel,
795795
{
796796
BTScanInsert itup_key = insertstate->itup_key;
797797
Page page = BufferGetPage(insertstate->buf);
798-
BTPageOpaque lpageop;
798+
BTPageOpaque opaque;
799799
OffsetNumber newitemoff;
800800

801-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
801+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
802802

803803
/* Check 1/3 of a page restriction */
804804
if (unlikely(insertstate->itemsz > BTMaxItemSize(page)))
805805
_bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
806806
insertstate->itup);
807807

808-
Assert(P_ISLEAF(lpageop) && !P_INCOMPLETE_SPLIT(lpageop));
808+
Assert(P_ISLEAF(opaque) && !P_INCOMPLETE_SPLIT(opaque));
809809
Assert(!insertstate->bounds_valid || checkingunique);
810810
Assert(!itup_key->heapkeyspace || itup_key->scantid != NULL);
811811
Assert(itup_key->heapkeyspace || itup_key->scantid == NULL);
@@ -857,14 +857,14 @@ _bt_findinsertloc(Relation rel,
857857
break;
858858

859859
/* Test '<=', not '!=', since scantid is set now */
860-
if (P_RIGHTMOST(lpageop) ||
860+
if (P_RIGHTMOST(opaque) ||
861861
_bt_compare(rel, itup_key, page, P_HIKEY) <= 0)
862862
break;
863863

864864
_bt_stepright(rel, insertstate, stack);
865865
/* Update local state after stepping right */
866866
page = BufferGetPage(insertstate->buf);
867-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
867+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
868868
/* Assume duplicates (if checkingunique) */
869869
uniquedup = true;
870870
}
@@ -884,7 +884,7 @@ _bt_findinsertloc(Relation rel,
884884
*/
885885
if (PageGetFreeSpace(page) < insertstate->itemsz)
886886
{
887-
if (P_HAS_GARBAGE(lpageop))
887+
if (P_HAS_GARBAGE(opaque))
888888
{
889889
_bt_vacuum_one_page(rel, insertstate->buf, heapRel);
890890
insertstate->bounds_valid = false;
@@ -940,7 +940,7 @@ _bt_findinsertloc(Relation rel,
940940
* Before considering moving right, see if we can obtain enough
941941
* space by erasing LP_DEAD items
942942
*/
943-
if (P_HAS_GARBAGE(lpageop))
943+
if (P_HAS_GARBAGE(opaque))
944944
{
945945
_bt_vacuum_one_page(rel, insertstate->buf, heapRel);
946946
insertstate->bounds_valid = false;
@@ -964,23 +964,23 @@ _bt_findinsertloc(Relation rel,
964964
insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
965965
break;
966966

967-
if (P_RIGHTMOST(lpageop) ||
967+
if (P_RIGHTMOST(opaque) ||
968968
_bt_compare(rel, itup_key, page, P_HIKEY) != 0 ||
969969
random() <= (MAX_RANDOM_VALUE / 100))
970970
break;
971971

972972
_bt_stepright(rel, insertstate, stack);
973973
/* Update local state after stepping right */
974974
page = BufferGetPage(insertstate->buf);
975-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
975+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
976976
}
977977
}
978978

979979
/*
980980
* We should now be on the correct page. Find the offset within the page
981981
* for the new tuple. (Possibly reusing earlier search bounds.)
982982
*/
983-
Assert(P_RIGHTMOST(lpageop) ||
983+
Assert(P_RIGHTMOST(opaque) ||
984984
_bt_compare(rel, itup_key, page, P_HIKEY) <= 0);
985985

986986
newitemoff = _bt_binsrch_insert(rel, insertstate);
@@ -1025,41 +1025,41 @@ static void
10251025
_bt_stepright(Relation rel, BTInsertState insertstate, BTStack stack)
10261026
{
10271027
Page page;
1028-
BTPageOpaque lpageop;
1028+
BTPageOpaque opaque;
10291029
Buffer rbuf;
10301030
BlockNumber rblkno;
10311031

10321032
page = BufferGetPage(insertstate->buf);
1033-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
1033+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
10341034

10351035
rbuf = InvalidBuffer;
1036-
rblkno = lpageop->btpo_next;
1036+
rblkno = opaque->btpo_next;
10371037
for (;;)
10381038
{
10391039
rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE);
10401040
page = BufferGetPage(rbuf);
1041-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
1041+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
10421042

10431043
/*
10441044
* If this page was incompletely split, finish the split now. We do
10451045
* this while holding a lock on the left sibling, which is not good
10461046
* because finishing the split could be a fairly lengthy operation.
10471047
* But this should happen very seldom.
10481048
*/
1049-
if (P_INCOMPLETE_SPLIT(lpageop))
1049+
if (P_INCOMPLETE_SPLIT(opaque))
10501050
{
10511051
_bt_finish_split(rel, rbuf, stack);
10521052
rbuf = InvalidBuffer;
10531053
continue;
10541054
}
10551055

1056-
if (!P_IGNORE(lpageop))
1056+
if (!P_IGNORE(opaque))
10571057
break;
1058-
if (P_RIGHTMOST(lpageop))
1058+
if (P_RIGHTMOST(opaque))
10591059
elog(ERROR, "fell off the end of index \"%s\"",
10601060
RelationGetRelationName(rel));
10611061

1062-
rblkno = lpageop->btpo_next;
1062+
rblkno = opaque->btpo_next;
10631063
}
10641064
/* rbuf locked; unlock buf, update state for caller */
10651065
_bt_relbuf(rel, insertstate->buf);
@@ -1110,35 +1110,43 @@ _bt_insertonpg(Relation rel,
11101110
bool split_only_page)
11111111
{
11121112
Page page;
1113-
BTPageOpaque lpageop;
1113+
BTPageOpaque opaque;
1114+
bool isleaf,
1115+
isroot,
1116+
isrightmost,
1117+
isonly;
11141118
IndexTuple oposting = NULL;
11151119
IndexTuple origitup = NULL;
11161120
IndexTuple nposting = NULL;
11171121

11181122
page = BufferGetPage(buf);
1119-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
1123+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1124+
isleaf = P_ISLEAF(opaque);
1125+
isroot = P_ISROOT(opaque);
1126+
isrightmost = P_RIGHTMOST(opaque);
1127+
isonly = P_LEFTMOST(opaque) && P_RIGHTMOST(opaque);
11201128

11211129
/* child buffer must be given iff inserting on an internal page */
1122-
Assert(P_ISLEAF(lpageop) == !BufferIsValid(cbuf));
1130+
Assert(isleaf == !BufferIsValid(cbuf));
11231131
/* tuple must have appropriate number of attributes */
1124-
Assert(!P_ISLEAF(lpageop) ||
1132+
Assert(!isleaf ||
11251133
BTreeTupleGetNAtts(itup, rel) ==
11261134
IndexRelationGetNumberOfAttributes(rel));
1127-
Assert(P_ISLEAF(lpageop) ||
1135+
Assert(isleaf ||
11281136
BTreeTupleGetNAtts(itup, rel) <=
11291137
IndexRelationGetNumberOfKeyAttributes(rel));
11301138
Assert(!BTreeTupleIsPosting(itup));
11311139
Assert(MAXALIGN(IndexTupleSize(itup)) == itemsz);
11321140
/* Caller must always finish incomplete split for us */
1133-
Assert(!P_INCOMPLETE_SPLIT(lpageop));
1141+
Assert(!P_INCOMPLETE_SPLIT(opaque));
11341142

11351143
/*
11361144
* Every internal page should have exactly one negative infinity item at
11371145
* all times. Only _bt_split() and _bt_newroot() should add items that
11381146
* become negative infinity items through truncation, since they're the
11391147
* only routines that allocate new internal pages.
11401148
*/
1141-
Assert(P_ISLEAF(lpageop) || newitemoff > P_FIRSTDATAKEY(lpageop));
1149+
Assert(isleaf || newitemoff > P_FIRSTDATAKEY(opaque));
11421150

11431151
/*
11441152
* Do we need to split an existing posting list item?
@@ -1154,7 +1162,7 @@ _bt_insertonpg(Relation rel,
11541162
* its post-split version is treated as an extra step in either the
11551163
* insert or page split critical section.
11561164
*/
1157-
Assert(P_ISLEAF(lpageop) && !ItemIdIsDead(itemid));
1165+
Assert(isleaf && !ItemIdIsDead(itemid));
11581166
Assert(itup_key->heapkeyspace && itup_key->allequalimage);
11591167
oposting = (IndexTuple) PageGetItem(page, itemid);
11601168

@@ -1177,8 +1185,6 @@ _bt_insertonpg(Relation rel,
11771185
*/
11781186
if (PageGetFreeSpace(page) < itemsz)
11791187
{
1180-
bool is_root = P_ISROOT(lpageop);
1181-
bool is_only = P_LEFTMOST(lpageop) && P_RIGHTMOST(lpageop);
11821188
Buffer rbuf;
11831189

11841190
Assert(!split_only_page);
@@ -1208,12 +1214,10 @@ _bt_insertonpg(Relation rel,
12081214
* page.
12091215
*----------
12101216
*/
1211-
_bt_insert_parent(rel, buf, rbuf, stack, is_root, is_only);
1217+
_bt_insert_parent(rel, buf, rbuf, stack, isroot, isonly);
12121218
}
12131219
else
12141220
{
1215-
bool isleaf = P_ISLEAF(lpageop);
1216-
bool isrightmost = P_RIGHTMOST(lpageop);
12171221
Buffer metabuf = InvalidBuffer;
12181222
Page metapg = NULL;
12191223
BTMetaPageData *metad = NULL;
@@ -1226,7 +1230,7 @@ _bt_insertonpg(Relation rel,
12261230
* at or above the current page. We can safely acquire a lock on the
12271231
* metapage here --- see comments for _bt_newroot().
12281232
*/
1229-
if (split_only_page)
1233+
if (unlikely(split_only_page))
12301234
{
12311235
Assert(!isleaf);
12321236
Assert(BufferIsValid(cbuf));
@@ -1235,7 +1239,7 @@ _bt_insertonpg(Relation rel,
12351239
metapg = BufferGetPage(metabuf);
12361240
metad = BTPageGetMeta(metapg);
12371241

1238-
if (metad->btm_fastlevel >= lpageop->btpo.level)
1242+
if (metad->btm_fastlevel >= opaque->btpo.level)
12391243
{
12401244
/* no update wanted */
12411245
_bt_relbuf(rel, metabuf);
@@ -1262,7 +1266,7 @@ _bt_insertonpg(Relation rel,
12621266
if (metad->btm_version < BTREE_NOVAC_VERSION)
12631267
_bt_upgrademetapage(metapg);
12641268
metad->btm_fastroot = BufferGetBlockNumber(buf);
1265-
metad->btm_fastlevel = lpageop->btpo.level;
1269+
metad->btm_fastlevel = opaque->btpo.level;
12661270
MarkBufferDirty(metabuf);
12671271
}
12681272

@@ -1383,7 +1387,7 @@ _bt_insertonpg(Relation rel,
13831387
* may be used by a future inserter within _bt_search_insert().
13841388
*/
13851389
blockcache = InvalidBlockNumber;
1386-
if (isrightmost && isleaf && !P_ISROOT(lpageop))
1390+
if (isrightmost && isleaf && !isroot)
13871391
blockcache = BufferGetBlockNumber(buf);
13881392

13891393
/* Release buffer for insertion target block */
@@ -2066,16 +2070,16 @@ _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf, Buffer cbuf,
20662070
*
20672071
* stack - stack showing how we got here. Will be NULL when splitting true
20682072
* root, or during concurrent root split, where we can be inefficient
2069-
* is_root - we split the true root
2070-
* is_only - we split a page alone on its level (might have been fast root)
2073+
* isroot - we split the true root
2074+
* isonly - we split a page alone on its level (might have been fast root)
20712075
*/
20722076
static void
20732077
_bt_insert_parent(Relation rel,
20742078
Buffer buf,
20752079
Buffer rbuf,
20762080
BTStack stack,
2077-
bool is_root,
2078-
bool is_only)
2081+
bool isroot,
2082+
bool isonly)
20792083
{
20802084
/*
20812085
* Here we have to do something Lehman and Yao don't talk about: deal with
@@ -2090,12 +2094,12 @@ _bt_insert_parent(Relation rel,
20902094
* from the root. This is not super-efficient, but it's rare enough not
20912095
* to matter.
20922096
*/
2093-
if (is_root)
2097+
if (isroot)
20942098
{
20952099
Buffer rootbuf;
20962100

20972101
Assert(stack == NULL);
2098-
Assert(is_only);
2102+
Assert(isonly);
20992103
/* create a new root node and update the metapage */
21002104
rootbuf = _bt_newroot(rel, buf, rbuf);
21012105
/* release the split buffers */
@@ -2115,10 +2119,10 @@ _bt_insert_parent(Relation rel,
21152119

21162120
if (stack == NULL)
21172121
{
2118-
BTPageOpaque lpageop;
2122+
BTPageOpaque opaque;
21192123

21202124
elog(DEBUG2, "concurrent ROOT page split");
2121-
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
2125+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
21222126

21232127
/*
21242128
* We should never reach here when a leaf page split takes place
@@ -2132,12 +2136,11 @@ _bt_insert_parent(Relation rel,
21322136
* page will split, since it's faster to go through _bt_search()
21332137
* and get a stack in the usual way.
21342138
*/
2135-
Assert(!(P_ISLEAF(lpageop) &&
2139+
Assert(!(P_ISLEAF(opaque) &&
21362140
BlockNumberIsValid(RelationGetTargetBlock(rel))));
21372141

21382142
/* Find the leftmost page at the next level up */
2139-
pbuf = _bt_get_endpoint(rel, lpageop->btpo.level + 1, false,
2140-
NULL);
2143+
pbuf = _bt_get_endpoint(rel, opaque->btpo.level + 1, false, NULL);
21412144
/* Set up a phony stack entry pointing there */
21422145
stack = &fakestack;
21432146
stack->bts_blkno = BufferGetBlockNumber(pbuf);
@@ -2189,7 +2192,7 @@ _bt_insert_parent(Relation rel,
21892192
/* Recursively insert into the parent */
21902193
_bt_insertonpg(rel, NULL, pbuf, buf, stack->bts_parent,
21912194
new_item, MAXALIGN(IndexTupleSize(new_item)),
2192-
stack->bts_offset + 1, 0, is_only);
2195+
stack->bts_offset + 1, 0, isonly);
21932196

21942197
/* be tidy */
21952198
pfree(new_item);
@@ -2214,8 +2217,8 @@ _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
22142217
Buffer rbuf;
22152218
Page rpage;
22162219
BTPageOpaque rpageop;
2217-
bool was_root;
2218-
bool was_only;
2220+
bool wasroot;
2221+
bool wasonly;
22192222

22202223
Assert(P_INCOMPLETE_SPLIT(lpageop));
22212224

@@ -2236,20 +2239,20 @@ _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
22362239
metapg = BufferGetPage(metabuf);
22372240
metad = BTPageGetMeta(metapg);
22382241

2239-
was_root = (metad->btm_root == BufferGetBlockNumber(lbuf));
2242+
wasroot = (metad->btm_root == BufferGetBlockNumber(lbuf));
22402243

22412244
_bt_relbuf(rel, metabuf);
22422245
}
22432246
else
2244-
was_root = false;
2247+
wasroot = false;
22452248

22462249
/* Was this the only page on the level before split? */
2247-
was_only = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
2250+
wasonly = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
22482251

22492252
elog(DEBUG1, "finishing incomplete split of %u/%u",
22502253
BufferGetBlockNumber(lbuf), BufferGetBlockNumber(rbuf));
22512254

2252-
_bt_insert_parent(rel, lbuf, rbuf, stack, was_root, was_only);
2255+
_bt_insert_parent(rel, lbuf, rbuf, stack, wasroot, wasonly);
22532256
}
22542257

22552258
/*

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy