diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 1f809c24a1..c655dadb96 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -424,7 +424,7 @@ _bt_binsrch(Relation rel, /* * - * bt_binsrch_insert() -- Cacheable, incremental leaf page binary search. + * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search. * * Like _bt_binsrch(), but with support for caching the binary search * bounds. Only used during insertion, and only on the leaf page that it diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 11936a6571..a065419cdb 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -383,7 +383,7 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn) * is also an unused OID within pg_class. If the result is to be used only * as a relfilenode for an existing relation, pass NULL for pg_class. * - * As with GetNewObjectIdWithIndex(), there is some theoretical risk of a race + * As with GetNewOidWithIndex(), there is some theoretical risk of a race * condition, but it doesn't seem worth worrying about. * * Note: we don't support using this in bootstrap mode. All relations diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 84c54fbc70..ac86f3d5be 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -2626,7 +2626,7 @@ CopyMultiInsertInfoNextFreeSlot(CopyMultiInsertInfo *miinfo, /* * Record the previously reserved TupleTableSlot that was reserved by - * MultiInsertInfoNextFreeSlot as being consumed. + * CopyMultiInsertInfoNextFreeSlot as being consumed. */ static inline void CopyMultiInsertInfoStore(CopyMultiInsertInfo *miinfo, ResultRelInfo *rri, diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c index 1673b10315..ba8c0cd0f0 100644 --- a/src/backend/libpq/be-secure-gssapi.c +++ b/src/backend/libpq/be-secure-gssapi.c @@ -400,7 +400,7 @@ read_or_wait(Port *port, ssize_t len) { /* * If we got back less than zero, indicating an error, and that - * wasn't just a EWOULDBOCK/EAGAIN, then give up. + * wasn't just a EWOULDBLOCK/EAGAIN, then give up. */ if (ret < 0 && !(errno == EWOULDBLOCK || errno == EAGAIN)) return -1; diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index ef2f5b45d8..ba470366e1 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -38,7 +38,7 @@ static void markTargetListOrigin(ParseState *pstate, TargetEntry *tle, static Node *transformAssignmentIndirection(ParseState *pstate, Node *basenode, const char *targetName, - bool targetIsArray, + bool targetIsSubscripting, Oid targetTypeId, int32 targetTypMod, Oid targetCollation, diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 13f152b473..11bbe2c397 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -1213,12 +1213,11 @@ CompactCheckpointerRequestQueue(void) * backwards from the end of the queue and check whether a request is * *preceded* by an earlier, identical request, in the hopes of doing less * copying. But that might change the semantics, if there's an - * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so - * we do it this way. It would be possible to be even smarter if we made - * the code below understand the specific semantics of such requests (it - * could blow away preceding entries that would end up being canceled - * anyhow), but it's not clear that the extra complexity would buy us - * anything. + * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it + * this way. It would be possible to be even smarter if we made the code + * below understand the specific semantics of such requests (it could blow + * away preceding entries that would end up being canceled anyhow), but + * it's not clear that the extra complexity would buy us anything. */ for (n = 0; n < CheckpointerShmem->num_requests; n++) { diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index b4f2d0f35a..c13c08a97b 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -1055,7 +1055,7 @@ get_opclass_input_type(Oid opclass) } /* - * get_opclass_family_and_input_type + * get_opclass_opfamily_and_input_type * * Returns the OID of the operator family the opclass belongs to, * the OID of the datatype the opclass indexes diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c index 60fb55af53..7847e8a451 100644 --- a/src/port/pg_bitutils.c +++ b/src/port/pg_bitutils.c @@ -28,7 +28,7 @@ * left-most the 7th bit. The 0th entry of the array should not be used. * * Note: this is not used by the functions in pg_bitutils.h when - * HAVE_BUILTIN_CLZ is defined, but we provide it anyway, so that + * HAVE__BUILTIN_CLZ is defined, but we provide it anyway, so that * extensions possibly compiled with a different compiler can use it. */ const uint8 pg_leftmost_one_pos[256] = { @@ -56,7 +56,7 @@ const uint8 pg_leftmost_one_pos[256] = { * left-most the 7th bit. The 0th entry of the array should not be used. * * Note: this is not used by the functions in pg_bitutils.h when - * HAVE_BUILTIN_CTZ is defined, but we provide it anyway, so that + * HAVE__BUILTIN_CTZ is defined, but we provide it anyway, so that * extensions possibly compiled with a different compiler can use it. */ const uint8 pg_rightmost_one_pos[256] = {