Skip to content

Commit b0e5f02

Browse files
committed
Fix various typos and spelling mistakes in code comments
Author: Justin Pryzby Discussion: https://postgr.es/m/20220411020336.GB26620@telsasoft.com
1 parent bba3c35 commit b0e5f02

File tree

33 files changed

+46
-45
lines changed

33 files changed

+46
-45
lines changed

contrib/ltree/ltree.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* modified to look for -D compile flags in Makefiles, so here, in order to
2525
* get the historic behavior of LOWER_NODE not being defined on MSVC, we only
2626
* define it when not building in that environment. This is important as we
27-
* want to maintain the same LOWER_NODE behavior after a pg_update.
27+
* want to maintain the same LOWER_NODE behavior after a pg_upgrade.
2828
*/
2929
#ifndef _MSC_VER
3030
#define LOWER_NODE

src/backend/access/brin/brin_minmax_multi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ AssertCheckRanges(Ranges *ranges, FmgrInfo *cmpFn, Oid colloid)
310310
*/
311311
AssertArrayOrder(cmpFn, colloid, ranges->values, 2 * ranges->nranges);
312312

313-
/* then the single-point ranges (with nvalues boundar values ) */
313+
/* then the single-point ranges (with nvalues boundary values ) */
314314
AssertArrayOrder(cmpFn, colloid, &ranges->values[2 * ranges->nranges],
315315
ranges->nsorted);
316316

src/backend/access/heap/heapam.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1470,7 +1470,7 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
14701470
* heap_set_tidrange will have used heap_setscanlimits to limit the
14711471
* range of pages we scan to only ones that can contain the TID range
14721472
* we're scanning for. Here we must filter out any tuples from these
1473-
* pages that are outwith that range.
1473+
* pages that are outside of that range.
14741474
*/
14751475
if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
14761476
{

src/backend/access/transam/xlogreader.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ XLogReleasePreviousRecord(XLogReaderState *state)
300300
/* Release the space. */
301301
if (unlikely(record->oversized))
302302
{
303-
/* It's not in the the decode buffer, so free it to release space. */
303+
/* It's not in the decode buffer, so free it to release space. */
304304
pfree(record);
305305
}
306306
else

src/backend/access/transam/xlogrecovery.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2975,7 +2975,7 @@ ReadRecord(XLogPrefetcher *xlogprefetcher, int emode,
29752975
/*
29762976
* When not in standby mode we find that WAL ends in an incomplete
29772977
* record, keep track of that record. After recovery is done,
2978-
* we'll write a record to indicate downstream WAL readers that
2978+
* we'll write a record to indicate to downstream WAL readers that
29792979
* that portion is to be ignored.
29802980
*/
29812981
if (!StandbyMode &&

src/backend/commands/dbcommands.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
386386
* needs to be copied from the source database to the destination database,
387387
* and if so, construct a CreateDBRelInfo for it.
388388
*
389-
* Visbility checks are handled by the caller, so our job here is just
389+
* Visibility checks are handled by the caller, so our job here is just
390390
* to assess the data stored in the tuple.
391391
*/
392392
CreateDBRelInfo *

src/backend/commands/vacuumparallel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
* the memory space for storing dead items allocated in the DSM segment. We
1313
* launch parallel worker processes at the start of parallel index
1414
* bulk-deletion and index cleanup and once all indexes are processed, the
15-
* parallel worker processes exit. Each time we process indexes parallelly,
15+
* parallel worker processes exit. Each time we process indexes in parallel,
1616
* the parallel context is re-initialized so that the same DSM can be used for
1717
* multiple passes of index bulk-deletion and index cleanup.
1818
*

src/backend/executor/nodeMergeAppend.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
100100

101101
/*
102102
* When no run-time pruning is required and there's at least one
103-
* subplan, we can fill as_valid_subplans immediately, preventing
103+
* subplan, we can fill ms_valid_subplans immediately, preventing
104104
* later calls to ExecFindMatchingSubPlans.
105105
*/
106106
if (!prunestate->do_exec_prune && nplans > 0)

src/backend/optimizer/path/costsize.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1976,8 +1976,8 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
19761976
* by calling estimate_num_groups_incremental(), which estimates the
19771977
* group size for "new" pathkeys.
19781978
*
1979-
* Note: estimate_num_groups_incremntal does not handle fake Vars, so use
1980-
* a default estimate otherwise.
1979+
* Note: estimate_num_groups_incremental does not handle fake Vars, so
1980+
* use a default estimate otherwise.
19811981
*/
19821982
if (!has_fake_var)
19831983
nGroups = estimate_num_groups_incremental(root, pathkeyExprs,
@@ -6471,7 +6471,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
64716471
exact_pages = heap_pages - lossy_pages;
64726472

64736473
/*
6474-
* If there are lossy pages then recompute the number of tuples
6474+
* If there are lossy pages then recompute the number of tuples
64756475
* processed by the bitmap heap node. We assume here that the chance
64766476
* of a given tuple coming from an exact page is the same as the
64776477
* chance that a given page is exact. This might not be true, but

src/backend/optimizer/path/pathkeys.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2383,16 +2383,16 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
23832383
* Count the number of pathkeys that are useful for grouping (instead of
23842384
* explicit sort)
23852385
*
2386-
* Group pathkeys could be reordered to benefit from the odering. The ordering
2387-
* may not be "complete" and may require incremental sort, but that's fine. So
2388-
* we simply count prefix pathkeys with a matching group key, and stop once we
2389-
* find the first pathkey without a match.
2386+
* Group pathkeys could be reordered to benefit from the ordering. The
2387+
* ordering may not be "complete" and may require incremental sort, but that's
2388+
* fine. So we simply count prefix pathkeys with a matching group key, and
2389+
* stop once we find the first pathkey without a match.
23902390
*
23912391
* So e.g. with pathkeys (a,b,c) and group keys (a,b,e) this determines (a,b)
23922392
* pathkeys are useful for grouping, and we might do incremental sort to get
23932393
* path ordered by (a,b,e).
23942394
*
2395-
* This logic is necessary to retain paths with ordeding not matching grouping
2395+
* This logic is necessary to retain paths with ordering not matching grouping
23962396
* keys directly, without the reordering.
23972397
*
23982398
* Returns the length of pathkey prefix with matching group keys.

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy