Skip to content

Commit 96c019f

Browse files
committed
1 parent ad98fb1 commit 96c019f

File tree

1 file changed

+95
-95
lines changed

1 file changed

+95
-95
lines changed

src/backend/utils/cache/catcache.c

Lines changed: 95 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -1418,34 +1418,34 @@ SearchCatCacheMiss(CatCache *cache,
14181418
cur_skey[2].sk_argument = v3;
14191419
cur_skey[3].sk_argument = v4;
14201420

1421-
scandesc = systable_beginscan(relation,
1422-
cache->cc_indexoid,
1423-
IndexScanOK(cache, cur_skey),
1424-
NULL,
1425-
nkeys,
1426-
cur_skey);
1421+
scandesc = systable_beginscan(relation,
1422+
cache->cc_indexoid,
1423+
IndexScanOK(cache, cur_skey),
1424+
NULL,
1425+
nkeys,
1426+
cur_skey);
14271427

1428-
ct = NULL;
1429-
stale = false;
1428+
ct = NULL;
1429+
stale = false;
14301430

1431-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1432-
{
1433-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1434-
hashValue, hashIndex);
1435-
/* upon failure, we must start the scan over */
1436-
if (ct == NULL)
1431+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
14371432
{
1438-
stale = true;
1439-
break;
1433+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1434+
hashValue, hashIndex);
1435+
/* upon failure, we must start the scan over */
1436+
if (ct == NULL)
1437+
{
1438+
stale = true;
1439+
break;
1440+
}
1441+
/* immediately set the refcount to 1 */
1442+
ResourceOwnerEnlarge(CurrentResourceOwner);
1443+
ct->refcount++;
1444+
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1445+
break; /* assume only one match */
14401446
}
1441-
/* immediately set the refcount to 1 */
1442-
ResourceOwnerEnlarge(CurrentResourceOwner);
1443-
ct->refcount++;
1444-
ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1445-
break; /* assume only one match */
1446-
}
14471447

1448-
systable_endscan(scandesc);
1448+
systable_endscan(scandesc);
14491449
} while (stale);
14501450

14511451
table_close(relation, AccessShareLock);
@@ -1710,95 +1710,95 @@ SearchCatCacheList(CatCache *cache,
17101710
cur_skey[2].sk_argument = v3;
17111711
cur_skey[3].sk_argument = v4;
17121712

1713-
scandesc = systable_beginscan(relation,
1714-
cache->cc_indexoid,
1715-
IndexScanOK(cache, cur_skey),
1716-
NULL,
1717-
nkeys,
1718-
cur_skey);
1713+
scandesc = systable_beginscan(relation,
1714+
cache->cc_indexoid,
1715+
IndexScanOK(cache, cur_skey),
1716+
NULL,
1717+
nkeys,
1718+
cur_skey);
17191719

1720-
/* The list will be ordered iff we are doing an index scan */
1721-
ordered = (scandesc->irel != NULL);
1720+
/* The list will be ordered iff we are doing an index scan */
1721+
ordered = (scandesc->irel != NULL);
17221722

1723-
stale = false;
1724-
1725-
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1726-
{
1727-
uint32 hashValue;
1728-
Index hashIndex;
1729-
bool found = false;
1730-
dlist_head *bucket;
1731-
1732-
/*
1733-
* See if there's an entry for this tuple already.
1734-
*/
1735-
ct = NULL;
1736-
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1737-
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1723+
stale = false;
17381724

1739-
bucket = &cache->cc_bucket[hashIndex];
1740-
dlist_foreach(iter, bucket)
1725+
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
17411726
{
1742-
ct = dlist_container(CatCTup, cache_elem, iter.cur);
1727+
uint32 hashValue;
1728+
Index hashIndex;
1729+
bool found = false;
1730+
dlist_head *bucket;
17431731

1744-
if (ct->dead || ct->negative)
1745-
continue; /* ignore dead and negative entries */
1732+
/*
1733+
* See if there's an entry for this tuple already.
1734+
*/
1735+
ct = NULL;
1736+
hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1737+
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
17461738

1747-
if (ct->hash_value != hashValue)
1748-
continue; /* quickly skip entry if wrong hash val */
1739+
bucket = &cache->cc_bucket[hashIndex];
1740+
dlist_foreach(iter, bucket)
1741+
{
1742+
ct = dlist_container(CatCTup, cache_elem, iter.cur);
17491743

1750-
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1751-
continue; /* not same tuple */
1744+
if (ct->dead || ct->negative)
1745+
continue; /* ignore dead and negative entries */
17521746

1753-
/*
1754-
* Found a match, but can't use it if it belongs to another
1755-
* list already
1756-
*/
1757-
if (ct->c_list)
1758-
continue;
1747+
if (ct->hash_value != hashValue)
1748+
continue; /* quickly skip entry if wrong hash val */
17591749

1760-
found = true;
1761-
break; /* A-OK */
1762-
}
1750+
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1751+
continue; /* not same tuple */
17631752

1764-
if (!found)
1765-
{
1766-
/* We didn't find a usable entry, so make a new one */
1767-
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1768-
hashValue, hashIndex);
1769-
/* upon failure, we must start the scan over */
1770-
if (ct == NULL)
1771-
{
17721753
/*
1773-
* Release refcounts on any items we already had. We dare
1774-
* not try to free them if they're now unreferenced, since
1775-
* an error while doing that would result in the PG_CATCH
1776-
* below doing extra refcount decrements. Besides, we'll
1777-
* likely re-adopt those items in the next iteration, so
1778-
* it's not worth complicating matters to try to get rid
1779-
* of them.
1754+
* Found a match, but can't use it if it belongs to
1755+
* another list already
17801756
*/
1781-
foreach(ctlist_item, ctlist)
1757+
if (ct->c_list)
1758+
continue;
1759+
1760+
found = true;
1761+
break; /* A-OK */
1762+
}
1763+
1764+
if (!found)
1765+
{
1766+
/* We didn't find a usable entry, so make a new one */
1767+
ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1768+
hashValue, hashIndex);
1769+
/* upon failure, we must start the scan over */
1770+
if (ct == NULL)
17821771
{
1783-
ct = (CatCTup *) lfirst(ctlist_item);
1784-
Assert(ct->c_list == NULL);
1785-
Assert(ct->refcount > 0);
1786-
ct->refcount--;
1772+
/*
1773+
* Release refcounts on any items we already had. We
1774+
* dare not try to free them if they're now
1775+
* unreferenced, since an error while doing that would
1776+
* result in the PG_CATCH below doing extra refcount
1777+
* decrements. Besides, we'll likely re-adopt those
1778+
* items in the next iteration, so it's not worth
1779+
* complicating matters to try to get rid of them.
1780+
*/
1781+
foreach(ctlist_item, ctlist)
1782+
{
1783+
ct = (CatCTup *) lfirst(ctlist_item);
1784+
Assert(ct->c_list == NULL);
1785+
Assert(ct->refcount > 0);
1786+
ct->refcount--;
1787+
}
1788+
/* Reset ctlist in preparation for new try */
1789+
ctlist = NIL;
1790+
stale = true;
1791+
break;
17871792
}
1788-
/* Reset ctlist in preparation for new try */
1789-
ctlist = NIL;
1790-
stale = true;
1791-
break;
17921793
}
1793-
}
17941794

1795-
/* Careful here: add entry to ctlist, then bump its refcount */
1796-
/* This way leaves state correct if lappend runs out of memory */
1797-
ctlist = lappend(ctlist, ct);
1798-
ct->refcount++;
1799-
}
1795+
/* Careful here: add entry to ctlist, then bump its refcount */
1796+
/* This way leaves state correct if lappend runs out of memory */
1797+
ctlist = lappend(ctlist, ct);
1798+
ct->refcount++;
1799+
}
18001800

1801-
systable_endscan(scandesc);
1801+
systable_endscan(scandesc);
18021802
} while (stale);
18031803

18041804
table_close(relation, AccessShareLock);

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy