Change TRUE/FALSE to true/false

The lower case spellings are C and C++ standard and are used in most
parts of the PostgreSQL sources.  The upper case spellings are only used
in some files/modules.  So standardize on the standard spellings.

The APIs for ICU, Perl, and Windows define their own TRUE and FALSE, so
those are left as is when using those APIs.

In code comments, we use the lower-case spelling for the C concepts and
keep the upper-case spelling for the SQL concepts.

Reviewed-by: Michael Paquier <michael.paquier@gmail.com>
This commit is contained in:
Peter Eisentraut 2017-08-16 00:22:32 -04:00
parent 4497f2f3b3
commit 2eb4a831e5
216 changed files with 1168 additions and 1168 deletions

View File

@ -111,7 +111,7 @@ static const gbtree_vinfo tinfo =
{
gbt_t_bit,
0,
TRUE,
true,
gbt_bitgt,
gbt_bitge,
gbt_biteq,
@ -152,13 +152,13 @@ gbt_bit_consistent(PG_FUNCTION_ARGS)
if (GIST_LEAF(entry))
retval = gbt_var_consistent(&r, query, strategy, PG_GET_COLLATION(),
TRUE, &tinfo, fcinfo->flinfo);
true, &tinfo, fcinfo->flinfo);
else
{
bytea *q = gbt_bit_xfrm((bytea *) query);
retval = gbt_var_consistent(&r, q, strategy, PG_GET_COLLATION(),
FALSE, &tinfo, fcinfo->flinfo);
false, &tinfo, fcinfo->flinfo);
}
PG_RETURN_BOOL(retval);
}

View File

@ -75,7 +75,7 @@ static const gbtree_vinfo tinfo =
{
gbt_t_bytea,
0,
TRUE,
true,
gbt_byteagt,
gbt_byteage,
gbt_byteaeq,

View File

@ -105,7 +105,7 @@ gbt_inet_compress(PG_FUNCTION_ARGS)
r->upper = r->lower;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;

View File

@ -169,7 +169,7 @@ gbt_intv_compress(PG_FUNCTION_ARGS)
}
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
PG_RETURN_POINTER(retval);
@ -201,7 +201,7 @@ gbt_intv_decompress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
PG_RETURN_POINTER(retval);
}

View File

@ -79,7 +79,7 @@ static const gbtree_vinfo tinfo =
{
gbt_t_numeric,
0,
FALSE,
false,
gbt_numeric_gt,
gbt_numeric_ge,
gbt_numeric_eq,

View File

@ -80,7 +80,7 @@ static gbtree_vinfo tinfo =
{
gbt_t_text,
0,
FALSE,
false,
gbt_textgt,
gbt_textge,
gbt_texteq,
@ -128,7 +128,7 @@ gbt_bpchar_compress(PG_FUNCTION_ARGS)
gistentryinit(trim, d,
entry->rel, entry->page,
entry->offset, TRUE);
entry->offset, true);
retval = gbt_var_compress(&trim, &tinfo);
}
else

View File

@ -183,7 +183,7 @@ gbt_timetz_compress(PG_FUNCTION_ARGS)
r->lower = r->upper = tmp;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;

View File

@ -230,7 +230,7 @@ gbt_tstz_compress(PG_FUNCTION_ARGS)
r->lower = r->upper = gmt;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;

View File

@ -86,7 +86,7 @@ gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
memcpy((void *) &r[tinfo->size], leaf, tinfo->size);
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;
@ -150,7 +150,7 @@ gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo)
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, datum, entry->rel, entry->page, entry->offset,
FALSE);
false);
return retval;
}

View File

@ -45,7 +45,7 @@ gbt_var_decompress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -169,7 +169,7 @@ gbt_var_node_cp_len(const GBT_VARKEY *node, const gbtree_vinfo *tinfo)
static bool
gbt_bytea_pf_match(const bytea *pf, const bytea *query, const gbtree_vinfo *tinfo)
{
bool out = FALSE;
bool out = false;
int32 qlen = VARSIZE(query) - VARHDRSZ;
int32 nlen = VARSIZE(pf) - VARHDRSZ;
@ -294,7 +294,7 @@ gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo)
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, TRUE);
entry->offset, true);
}
else
retval = entry;
@ -314,7 +314,7 @@ gbt_var_fetch(PG_FUNCTION_ARGS)
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r.lower),
entry->rel, entry->page,
entry->offset, TRUE);
entry->offset, true);
PG_RETURN_POINTER(retval);
}
@ -561,7 +561,7 @@ gbt_var_consistent(GBT_VARKEY_R *key,
const gbtree_vinfo *tinfo,
FmgrInfo *flinfo)
{
bool retval = FALSE;
bool retval = false;
switch (strategy)
{
@ -607,7 +607,7 @@ gbt_var_consistent(GBT_VARKEY_R *key,
tinfo->f_eq(query, key->upper, collation, flinfo));
break;
default:
retval = FALSE;
retval = false;
}
return retval;

View File

@ -114,7 +114,7 @@ gbt_uuid_compress(PG_FUNCTION_ARGS)
memcpy((void *) (r + UUID_LEN), (void *) key, UUID_LEN);
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;

View File

@ -309,7 +309,7 @@ cube_out(PG_FUNCTION_ARGS)
/*
** The GiST Consistent method for boxes
** Should return false if for all data items x below entry,
** the predicate x op query == FALSE, where op is the oper
** the predicate x op query == false, where op is the oper
** corresponding to strategy in the pg_amop table.
*/
Datum
@ -396,7 +396,7 @@ g_cube_decompress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}
PG_RETURN_POINTER(entry);
@ -590,9 +590,9 @@ g_cube_same(PG_FUNCTION_ARGS)
bool *result = (bool *) PG_GETARG_POINTER(2);
if (cube_cmp_v0(b1, b2) == 0)
*result = TRUE;
*result = true;
else
*result = FALSE;
*result = false;
PG_RETURN_NDBOX_P(result);
}
@ -624,7 +624,7 @@ g_cube_leaf_consistent(NDBOX *key,
retval = cube_contains_v0(query, key);
break;
default:
retval = FALSE;
retval = false;
}
return retval;
}
@ -651,7 +651,7 @@ g_cube_internal_consistent(NDBOX *key,
retval = (bool) cube_overlap_v0(key, query);
break;
default:
retval = FALSE;
retval = false;
}
return retval;
}
@ -1059,7 +1059,7 @@ cube_contains_v0(NDBOX *a, NDBOX *b)
int i;
if ((a == NULL) || (b == NULL))
return FALSE;
return false;
if (DIM(a) < DIM(b))
{
@ -1071,9 +1071,9 @@ cube_contains_v0(NDBOX *a, NDBOX *b)
for (i = DIM(a); i < DIM(b); i++)
{
if (LL_COORD(b, i) != 0)
return FALSE;
return false;
if (UR_COORD(b, i) != 0)
return FALSE;
return false;
}
}
@ -1082,13 +1082,13 @@ cube_contains_v0(NDBOX *a, NDBOX *b)
{
if (Min(LL_COORD(a, i), UR_COORD(a, i)) >
Min(LL_COORD(b, i), UR_COORD(b, i)))
return FALSE;
return false;
if (Max(LL_COORD(a, i), UR_COORD(a, i)) <
Max(LL_COORD(b, i), UR_COORD(b, i)))
return FALSE;
return false;
}
return TRUE;
return true;
}
Datum
@ -1129,7 +1129,7 @@ cube_overlap_v0(NDBOX *a, NDBOX *b)
int i;
if ((a == NULL) || (b == NULL))
return FALSE;
return false;
/* swap the box pointers if needed */
if (DIM(a) < DIM(b))
@ -1144,21 +1144,21 @@ cube_overlap_v0(NDBOX *a, NDBOX *b)
for (i = 0; i < DIM(b); i++)
{
if (Min(LL_COORD(a, i), UR_COORD(a, i)) > Max(LL_COORD(b, i), UR_COORD(b, i)))
return FALSE;
return false;
if (Max(LL_COORD(a, i), UR_COORD(a, i)) < Min(LL_COORD(b, i), UR_COORD(b, i)))
return FALSE;
return false;
}
/* compare to zero those dimensions in (a) absent in (b) */
for (i = DIM(b); i < DIM(a); i++)
{
if (Min(LL_COORD(a, i), UR_COORD(a, i)) > 0)
return FALSE;
return false;
if (Max(LL_COORD(a, i), UR_COORD(a, i)) < 0)
return FALSE;
return false;
}
return TRUE;
return true;
}

View File

@ -243,7 +243,7 @@ dblink_init(void)
pconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext, sizeof(remoteConn));
pconn->conn = NULL;
pconn->openCursorCount = 0;
pconn->newXactForCursor = FALSE;
pconn->newXactForCursor = false;
}
}
@ -423,7 +423,7 @@ dblink_open(PG_FUNCTION_ARGS)
if (PQresultStatus(res) != PGRES_COMMAND_OK)
dblink_res_internalerror(conn, res, "begin error");
PQclear(res);
rconn->newXactForCursor = TRUE;
rconn->newXactForCursor = true;
/*
* Since transaction state was IDLE, we force cursor count to
@ -523,7 +523,7 @@ dblink_close(PG_FUNCTION_ARGS)
/* if count is zero, commit the transaction */
if (rconn->openCursorCount == 0)
{
rconn->newXactForCursor = FALSE;
rconn->newXactForCursor = false;
res = PQexec(conn, "COMMIT");
if (PQresultStatus(res) != PGRES_COMMAND_OK)

View File

@ -824,7 +824,7 @@ fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
*
* Check to see if it's useful to convert only a subset of the file's columns
* to binary. If so, construct a list of the column names to be converted,
* return that at *columns, and return TRUE. (Note that it's possible to
* return that at *columns, and return true. (Note that it's possible to
* determine that no columns need be converted, for instance with a COUNT(*)
* query. So we can't use returning a NIL list to indicate failure.)
*/

View File

@ -87,7 +87,7 @@ soundex_code(char letter)
phoned_word -- The final phonized word. (We'll allocate the
memory.)
Output
error -- A simple error flag, returns TRUE or FALSE
error -- A simple error flag, returns true or false
NOTES: ALL non-alpha characters are ignored, this includes whitespace,
although non-alpha characters will break up phonemes.

View File

@ -144,7 +144,7 @@ ghstore_compress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset,
FALSE);
false);
}
else if (!ISALLTRUE(DatumGetPointer(entry->key)))
{
@ -166,7 +166,7 @@ ghstore_compress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset,
FALSE);
false);
}
PG_RETURN_POINTER(retval);
@ -570,7 +570,7 @@ ghstore_consistent(PG_FUNCTION_ARGS)
continue;
crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
if (!(GETBIT(sign, HASHVAL(crc))))
res = FALSE;
res = false;
}
}
else if (strategy == HStoreExistsAnyStrategyNumber)
@ -585,7 +585,7 @@ ghstore_consistent(PG_FUNCTION_ARGS)
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
res = FALSE;
res = false;
for (i = 0; !res && i < key_count; ++i)
{
@ -595,7 +595,7 @@ ghstore_consistent(PG_FUNCTION_ARGS)
continue;
crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
if (GETBIT(sign, HASHVAL(crc)))
res = TRUE;
res = true;
}
}
else

View File

@ -342,7 +342,7 @@ gin_bool_consistent(QUERYTYPE *query, bool *check)
j = 0;
if (query->size <= 0)
return FALSE;
return false;
/*
* Set up data for checkcondition_gin. This must agree with the query

View File

@ -116,7 +116,7 @@ ginint4_consistent(PG_FUNCTION_ARGS)
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = FALSE;
bool res = false;
int32 i;
switch (strategy)
@ -125,25 +125,25 @@ ginint4_consistent(PG_FUNCTION_ARGS)
/* result is not lossy */
*recheck = false;
/* at least one element in check[] is true, so result = true */
res = TRUE;
res = true;
break;
case RTContainedByStrategyNumber:
case RTOldContainedByStrategyNumber:
/* we will need recheck */
*recheck = true;
/* at least one element in check[] is true, so result = true */
res = TRUE;
res = true;
break;
case RTSameStrategyNumber:
/* we will need recheck */
*recheck = true;
/* Must have all elements in check[] true */
res = TRUE;
res = true;
for (i = 0; i < nkeys; i++)
{
if (!check[i])
{
res = FALSE;
res = false;
break;
}
}
@ -153,12 +153,12 @@ ginint4_consistent(PG_FUNCTION_ARGS)
/* result is not lossy */
*recheck = false;
/* Must have all elements in check[] true */
res = TRUE;
res = true;
for (i = 0; i < nkeys; i++)
{
if (!check[i])
{
res = FALSE;
res = false;
break;
}
}

View File

@ -27,7 +27,7 @@ PG_FUNCTION_INFO_V1(g_int_same);
/*
** The GiST Consistent method for _intments
** Should return false if for all data items x below entry,
** the predicate x op query == FALSE, where op is the oper
** the predicate x op query == false, where op is the oper
** corresponding to strategy in the pg_amop table.
*/
Datum
@ -89,7 +89,7 @@ g_int_consistent(PG_FUNCTION_ARGS)
query);
break;
default:
retval = FALSE;
retval = false;
}
pfree(query);
PG_RETURN_BOOL(retval);
@ -159,7 +159,7 @@ g_int_compress(PG_FUNCTION_ARGS)
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -206,7 +206,7 @@ g_int_compress(PG_FUNCTION_ARGS)
r = resize_intArrayType(r, len);
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
else
@ -236,7 +236,7 @@ g_int_decompress(PG_FUNCTION_ARGS)
{
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(in),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -251,7 +251,7 @@ g_int_decompress(PG_FUNCTION_ARGS)
{
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(in),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -273,7 +273,7 @@ g_int_decompress(PG_FUNCTION_ARGS)
pfree(in);
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -321,14 +321,14 @@ g_int_same(PG_FUNCTION_ARGS)
*result = false;
PG_RETURN_POINTER(result);
}
*result = TRUE;
*result = true;
da = ARRPTR(a);
db = ARRPTR(b);
while (n--)
{
if (*da++ != *db++)
{
*result = FALSE;
*result = false;
break;
}
}

View File

@ -74,19 +74,19 @@ _int_same(PG_FUNCTION_ARGS)
da = ARRPTR(a);
db = ARRPTR(b);
result = FALSE;
result = false;
if (na == nb)
{
SORT(a);
SORT(b);
result = TRUE;
result = true;
for (n = 0; n < na; n++)
{
if (da[n] != db[n])
{
result = FALSE;
result = false;
break;
}
}
@ -110,7 +110,7 @@ _int_overlap(PG_FUNCTION_ARGS)
CHECKARRVALID(a);
CHECKARRVALID(b);
if (ARRISEMPTY(a) || ARRISEMPTY(b))
return FALSE;
return false;
SORT(a);
SORT(b);

View File

@ -40,7 +40,7 @@ inner_int_contains(ArrayType *a, ArrayType *b)
break; /* db[j] is not in da */
}
return (n == nb) ? TRUE : FALSE;
return (n == nb) ? true : false;
}
/* arguments are assumed sorted */
@ -65,12 +65,12 @@ inner_int_overlap(ArrayType *a, ArrayType *b)
if (da[i] < db[j])
i++;
else if (da[i] == db[j])
return TRUE;
return true;
else
j++;
}
return FALSE;
return false;
}
ArrayType *

View File

@ -168,7 +168,7 @@ g_intbig_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
if (in != DatumGetArrayTypeP(entry->key))
pfree(in);
@ -195,7 +195,7 @@ g_intbig_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -594,7 +594,7 @@ g_intbig_consistent(PG_FUNCTION_ARGS)
retval = _intbig_overlap((GISTTYPE *) DatumGetPointer(entry->key), query);
break;
default:
retval = FALSE;
retval = false;
}
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(retval);

View File

@ -100,7 +100,7 @@ _ltree_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else if (!LTG_ISALLTRUE(entry->key))
{
@ -123,7 +123,7 @@ _ltree_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
PG_RETURN_POINTER(retval);
}

View File

@ -64,7 +64,7 @@ ltree_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
PG_RETURN_POINTER(retval);
}
@ -81,7 +81,7 @@ ltree_decompress(PG_FUNCTION_ARGS)
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}
PG_RETURN_POINTER(entry);

View File

@ -106,7 +106,7 @@ gtrgm_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else if (ISSIGNKEY(DatumGetPointer(entry->key)) &&
!ISALLTRUE(DatumGetPointer(entry->key)))
@ -130,7 +130,7 @@ gtrgm_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
PG_RETURN_POINTER(retval);
}

View File

@ -634,7 +634,7 @@ createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph,
* Main entry point for evaluating a graph during index scanning.
*
* The check[] array is indexed by trigram number (in the array of simple
* trigrams returned by createTrgmNFA), and holds TRUE for those trigrams
* trigrams returned by createTrgmNFA), and holds true for those trigrams
* that are present in the index entry being checked.
*/
bool
@ -1451,7 +1451,7 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2)
* Get vector of all color trigrams in graph and select which of them
* to expand into simple trigrams.
*
* Returns TRUE if OK, FALSE if exhausted resource limits.
* Returns true if OK, false if exhausted resource limits.
*/
static bool
selectColorTrigrams(TrgmNFA *trgmNFA)

View File

@ -188,7 +188,7 @@ seg_upper(PG_FUNCTION_ARGS)
/*
** The GiST Consistent method for segments
** Should return false if for all data items x below entry,
** the predicate x op query == FALSE, where op is the oper
** the predicate x op query == false, where op is the oper
** corresponding to strategy in the pg_amop table.
*/
Datum
@ -413,9 +413,9 @@ gseg_same(PG_FUNCTION_ARGS)
bool *result = (bool *) PG_GETARG_POINTER(2);
if (DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1)))
*result = TRUE;
*result = true;
else
*result = FALSE;
*result = false;
#ifdef GIST_DEBUG
fprintf(stderr, "same: %s\n", (*result ? "TRUE" : "FALSE"));
@ -465,7 +465,7 @@ gseg_leaf_consistent(Datum key, Datum query, StrategyNumber strategy)
retval = DirectFunctionCall2(seg_contained, key, query);
break;
default:
retval = FALSE;
retval = false;
}
PG_RETURN_DATUM(retval);
@ -514,7 +514,7 @@ gseg_internal_consistent(Datum key, Datum query, StrategyNumber strategy)
DatumGetBool(DirectFunctionCall2(seg_overlap, key, query));
break;
default:
retval = FALSE;
retval = false;
}
PG_RETURN_BOOL(retval);

View File

@ -223,9 +223,9 @@
<para>
<literal>pmatch</literal> is an output argument for use when partial match
is supported. To use it, <function>extractQuery</function> must allocate
an array of <literal>*nkeys</literal> booleans and store its address at
<literal>*pmatch</literal>. Each element of the array should be set to TRUE
if the corresponding key requires partial match, FALSE if not.
an array of <literal>*nkeys</literal> bools and store its address at
<literal>*pmatch</literal>. Each element of the array should be set to true
if the corresponding key requires partial match, false if not.
If <literal>*pmatch</literal> is set to <symbol>NULL</symbol> then GIN assumes partial match
is not required. The variable is initialized to <symbol>NULL</symbol> before call,
so this argument can simply be ignored by operator classes that do
@ -267,7 +267,7 @@
Datum queryKeys[], bool nullFlags[])</function></term>
<listitem>
<para>
Returns TRUE if an indexed item satisfies the query operator with
Returns true if an indexed item satisfies the query operator with
strategy number <literal>n</literal> (or might satisfy it, if the recheck
indication is returned). This function does not have direct access
to the indexed item's value, since <acronym>GIN</acronym> does not
@ -277,8 +277,8 @@
<literal>nkeys</literal>, which is the same as the number of keys previously
returned by <function>extractQuery</function> for this <literal>query</literal> datum.
Each element of the
<literal>check</literal> array is TRUE if the indexed item contains the
corresponding query key, i.e., if (check[i] == TRUE) the i-th key of the
<literal>check</literal> array is true if the indexed item contains the
corresponding query key, i.e., if (check[i] == true) the i-th key of the
<function>extractQuery</function> result array is present in the indexed item.
The original <literal>query</literal> datum is
passed in case the <function>consistent</function> method needs to consult it,
@ -291,7 +291,7 @@
<para>
When <function>extractQuery</function> returns a null key in
<literal>queryKeys[]</literal>, the corresponding <literal>check[]</literal> element
is TRUE if the indexed item contains a null key; that is, the
is true if the indexed item contains a null key; that is, the
semantics of <literal>check[]</literal> are like <literal>IS NOT DISTINCT
FROM</literal>. The <function>consistent</function> function can examine the
corresponding <literal>nullFlags[]</literal> element if it needs to tell
@ -299,13 +299,13 @@
</para>
<para>
On success, <literal>*recheck</literal> should be set to TRUE if the heap
tuple needs to be rechecked against the query operator, or FALSE if
the index test is exact. That is, a FALSE return value guarantees
that the heap tuple does not match the query; a TRUE return value with
<literal>*recheck</literal> set to FALSE guarantees that the heap tuple does
match the query; and a TRUE return value with
<literal>*recheck</literal> set to TRUE means that the heap tuple might match
On success, <literal>*recheck</literal> should be set to true if the heap
tuple needs to be rechecked against the query operator, or false if
the index test is exact. That is, a false return value guarantees
that the heap tuple does not match the query; a true return value with
<literal>*recheck</literal> set to false guarantees that the heap tuple does
match the query; and a true return value with
<literal>*recheck</literal> set to true means that the heap tuple might match
the query, so it needs to be fetched and rechecked by evaluating the
query operator directly against the originally indexed item.
</para>

View File

@ -280,9 +280,9 @@ aminsert (Relation indexRelation,
<para>
The function's Boolean result value is significant only when
<literal>checkUnique</literal> is <literal>UNIQUE_CHECK_PARTIAL</literal>.
In this case a TRUE result means the new entry is known unique, whereas
FALSE means it might be non-unique (and a deferred uniqueness check must
be scheduled). For other cases a constant FALSE result is recommended.
In this case a true result means the new entry is known unique, whereas
false means it might be non-unique (and a deferred uniqueness check must
be scheduled). For other cases a constant false result is recommended.
</para>
<para>
@ -368,8 +368,8 @@ amcanreturn (Relation indexRelation, int attno);
linkend="indexes-index-only-scans"><firstterm>index-only scans</firstterm></link> on
the given column, by returning the indexed column values for an index entry
in the form of an <structname>IndexTuple</structname>. The attribute number
is 1-based, i.e. the first column's attno is 1. Returns TRUE if supported,
else FALSE. If the access method does not support index-only scans at all,
is 1-based, i.e. the first column's attno is 1. Returns true if supported,
else false. If the access method does not support index-only scans at all,
the <structfield>amcanreturn</structfield> field in its <structname>IndexAmRoutine</structname>
struct can be set to NULL.
</para>
@ -532,15 +532,15 @@ amgettuple (IndexScanDesc scan,
ScanDirection direction);
</programlisting>
Fetch the next tuple in the given scan, moving in the given
direction (forward or backward in the index). Returns TRUE if a tuple was
obtained, FALSE if no matching tuples remain. In the TRUE case the tuple
direction (forward or backward in the index). Returns true if a tuple was
obtained, false if no matching tuples remain. In the true case the tuple
TID is stored into the <literal>scan</literal> structure. Note that
<quote>success</quote> means only that the index contains an entry that matches
the scan keys, not that the tuple necessarily still exists in the heap or
will pass the caller's snapshot test. On success, <function>amgettuple</function>
must also set <literal>scan-&gt;xs_recheck</literal> to TRUE or FALSE.
FALSE means it is certain that the index entry matches the scan keys.
TRUE means this is not certain, and the conditions represented by the
must also set <literal>scan-&gt;xs_recheck</literal> to true or false.
False means it is certain that the index entry matches the scan keys.
true means this is not certain, and the conditions represented by the
scan keys must be rechecked against the heap tuple after fetching it.
This provision supports <quote>lossy</quote> index operators.
Note that rechecking will extend only to the scan conditions; a partial
@ -550,7 +550,7 @@ amgettuple (IndexScanDesc scan,
<para>
If the index supports <link linkend="indexes-index-only-scans">index-only
scans</link> (i.e., <function>amcanreturn</function> returns TRUE for it),
scans</link> (i.e., <function>amcanreturn</function> returns true for it),
then on success the AM must also check <literal>scan-&gt;xs_want_itup</literal>,
and if that is true it must return the originally indexed data for the
index entry. The data can be returned in the form of an
@ -1082,8 +1082,8 @@ amparallelrescan (IndexScanDesc scan);
constraint is deferrable. <productname>PostgreSQL</productname>
will use this mode to insert each row's index entry. The access
method must allow duplicate entries into the index, and report any
potential duplicates by returning FALSE from <function>aminsert</function>.
For each row for which FALSE is returned, a deferred recheck will
potential duplicates by returning false from <function>aminsert</function>.
For each row for which false is returned, a deferred recheck will
be scheduled.
</para>

View File

@ -6970,12 +6970,12 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
break;
}
/* unknown event ID, just return TRUE. */
/* unknown event ID, just return true. */
default:
break;
}
return TRUE; /* event processing succeeded */
return true; /* event processing succeeded */
}
]]>
</programlisting>

View File

@ -794,7 +794,7 @@ typedef struct spgLeafConsistentOut
trees, in which each level of the tree includes a prefix that is short
enough to fit on a page, and the final leaf level includes a suffix also
short enough to fit on a page. The operator class should set
<structfield>longValuesOK</structfield> to TRUE only if it is prepared to arrange for
<structfield>longValuesOK</structfield> to true only if it is prepared to arrange for
this to happen. Otherwise, the <acronym>SP-GiST</acronym> core will
reject any request to index a value that is too large to fit
on an index page.

View File

@ -32,7 +32,7 @@
</term>
<listitem>
<para>
Returns TRUE if current connection to server uses SSL, and FALSE
Returns true if current connection to server uses SSL, and false
otherwise.
</para>
</listitem>
@ -77,8 +77,8 @@
</term>
<listitem>
<para>
Returns TRUE if current client has presented a valid SSL client
certificate to the server, and FALSE otherwise. (The server
Returns true if current client has presented a valid SSL client
certificate to the server, and false otherwise. (The server
might or might not be configured to require a client certificate.)
</para>
</listitem>

View File

@ -315,7 +315,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
*
* Index must be locked in ShareUpdateExclusiveLock mode.
*
* Return FALSE if caller should retry.
* Return false if caller should retry.
*/
bool
brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)

View File

@ -289,7 +289,7 @@ heap_fill_tuple(TupleDesc tupleDesc,
*/
/* ----------------
* heap_attisnull - returns TRUE iff tuple attribute is not present
* heap_attisnull - returns true iff tuple attribute is not present
* ----------------
*/
bool

View File

@ -41,7 +41,7 @@ ginTraverseLock(Buffer buffer, bool searchMode)
page = BufferGetPage(buffer);
if (GinPageIsLeaf(page))
{
if (searchMode == FALSE)
if (searchMode == false)
{
/* we should relock our page */
LockBuffer(buffer, GIN_UNLOCK);
@ -107,7 +107,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot)
* ok, page is correctly locked, we should check to move right ..,
* root never has a right link, so small optimization
*/
while (btree->fullScan == FALSE && stack->blkno != btree->rootBlkno &&
while (btree->fullScan == false && stack->blkno != btree->rootBlkno &&
btree->isMoveRight(btree, page))
{
BlockNumber rightlink = GinPageGetOpaque(page)->rightlink;

View File

@ -52,7 +52,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg)
}
/* If item pointers are not ordered, they will need to be sorted later */
if (eo->shouldSort == FALSE)
if (eo->shouldSort == false)
{
int res;
@ -60,7 +60,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg)
Assert(res != 0);
if (res > 0)
eo->shouldSort = TRUE;
eo->shouldSort = true;
}
eo->list[eo->count] = en->list[0];
@ -176,7 +176,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
ea->key = getDatumCopy(accum, attnum, key);
ea->maxcount = DEF_NPTR;
ea->count = 1;
ea->shouldSort = FALSE;
ea->shouldSort = false;
ea->list =
(ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
ea->list[0] = *heapptr;

View File

@ -235,9 +235,9 @@ dataIsMoveRight(GinBtree btree, Page page)
ItemPointer iptr = GinDataPageGetRightBound(page);
if (GinPageRightMost(page))
return FALSE;
return false;
return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? TRUE : FALSE;
return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? true : false;
}
/*
@ -1875,9 +1875,9 @@ ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno)
btree->fillRoot = ginDataFillRoot;
btree->prepareDownlink = dataPrepareDownlink;
btree->isData = TRUE;
btree->fullScan = FALSE;
btree->isBuild = FALSE;
btree->isData = true;
btree->fullScan = false;
btree->isBuild = false;
}
/*
@ -1919,9 +1919,9 @@ ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno,
ginPrepareDataScan(btree, index, rootBlkno);
btree->fullScan = TRUE;
btree->fullScan = true;
stack = ginFindLeafPage(btree, TRUE, snapshot);
stack = ginFindLeafPage(btree, true, snapshot);
return stack;
}

View File

@ -30,7 +30,7 @@ static void entrySplitPage(GinBtree btree, Buffer origbuf,
* Form a tuple for entry tree.
*
* If the tuple would be too big to be stored, function throws a suitable
* error if errorTooBig is TRUE, or returns NULL if errorTooBig is FALSE.
* error if errorTooBig is true, or returns NULL if errorTooBig is false.
*
* See src/backend/access/gin/README for a description of the index tuple
* format that is being built here. We build on the assumption that we
@ -249,7 +249,7 @@ entryIsMoveRight(GinBtree btree, Page page)
GinNullCategory category;
if (GinPageRightMost(page))
return FALSE;
return false;
itup = getRightMostTuple(page);
attnum = gintuple_get_attrnum(btree->ginstate, itup);
@ -258,9 +258,9 @@ entryIsMoveRight(GinBtree btree, Page page)
if (ginCompareAttEntries(btree->ginstate,
btree->entryAttnum, btree->entryKey, btree->entryCategory,
attnum, key, category) > 0)
return TRUE;
return true;
return FALSE;
return false;
}
/*
@ -356,7 +356,7 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack)
if (btree->fullScan)
{
stack->off = FirstOffsetNumber;
return TRUE;
return true;
}
low = FirstOffsetNumber;
@ -762,9 +762,9 @@ ginPrepareEntryScan(GinBtree btree, OffsetNumber attnum,
btree->fillRoot = ginEntryFillRoot;
btree->prepareDownlink = entryPrepareDownlink;
btree->isData = FALSE;
btree->fullScan = FALSE;
btree->isBuild = FALSE;
btree->isData = false;
btree->fullScan = false;
btree->isBuild = false;
btree->entryAttnum = attnum;
btree->entryKey = key;

View File

@ -311,7 +311,7 @@ restartScanEntry:
entry->nlist = 0;
entry->matchBitmap = NULL;
entry->matchResult = NULL;
entry->reduceResult = FALSE;
entry->reduceResult = false;
entry->predictNumberResult = 0;
/*
@ -324,9 +324,9 @@ restartScanEntry:
stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot);
page = BufferGetPage(stackEntry->buffer);
/* ginFindLeafPage() will have already checked snapshot age. */
needUnlock = TRUE;
needUnlock = true;
entry->isFinished = TRUE;
entry->isFinished = true;
if (entry->isPartialMatch ||
entry->queryCategory == GIN_CAT_EMPTY_QUERY)
@ -363,7 +363,7 @@ restartScanEntry:
if (entry->matchBitmap && !tbm_is_empty(entry->matchBitmap))
{
entry->matchIterator = tbm_begin_iterate(entry->matchBitmap);
entry->isFinished = FALSE;
entry->isFinished = false;
}
}
else if (btreeEntry.findItem(&btreeEntry, stackEntry))
@ -385,7 +385,7 @@ restartScanEntry:
* root of posting tree.
*/
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
needUnlock = FALSE;
needUnlock = false;
stack = ginScanBeginPostingTree(&entry->btree, ginstate->index,
rootPostingTree, snapshot);
@ -410,7 +410,7 @@ restartScanEntry:
LockBuffer(entry->buffer, GIN_UNLOCK);
freeGinBtreeStack(stack);
entry->isFinished = FALSE;
entry->isFinished = false;
}
else if (GinGetNPosting(itup) > 0)
{
@ -418,7 +418,7 @@ restartScanEntry:
&entry->nlist);
entry->predictNumberResult = entry->nlist;
entry->isFinished = FALSE;
entry->isFinished = false;
}
}
@ -565,7 +565,7 @@ startScan(IndexScanDesc scan)
for (i = 0; i < so->totalentries; i++)
{
so->entries[i]->predictNumberResult /= so->totalentries;
so->entries[i]->reduceResult = TRUE;
so->entries[i]->reduceResult = true;
}
}
}
@ -666,7 +666,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
{
UnlockReleaseBuffer(entry->buffer);
entry->buffer = InvalidBuffer;
entry->isFinished = TRUE;
entry->isFinished = true;
return;
}
@ -728,7 +728,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry,
/*
* Sets entry->curItem to next heap item pointer > advancePast, for one entry
* of one scan key, or sets entry->isFinished to TRUE if there are no more.
* of one scan key, or sets entry->isFinished to true if there are no more.
*
* Item pointers are returned in ascending order.
*
@ -775,7 +775,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
ItemPointerSetInvalid(&entry->curItem);
tbm_end_iterate(entry->matchIterator);
entry->matchIterator = NULL;
entry->isFinished = TRUE;
entry->isFinished = true;
break;
}
@ -835,7 +835,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
entry->matchResult->offsets[entry->offset]);
entry->offset++;
gotitem = true;
} while (!gotitem || (entry->reduceResult == TRUE && dropItem(entry)));
} while (!gotitem || (entry->reduceResult == true && dropItem(entry)));
}
else if (!BufferIsValid(entry->buffer))
{
@ -848,7 +848,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
if (entry->offset >= entry->nlist)
{
ItemPointerSetInvalid(&entry->curItem);
entry->isFinished = TRUE;
entry->isFinished = true;
break;
}
@ -876,7 +876,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
entry->curItem = entry->list[entry->offset++];
} while (ginCompareItemPointers(&entry->curItem, &advancePast) <= 0 ||
(entry->reduceResult == TRUE && dropItem(entry)));
(entry->reduceResult == true && dropItem(entry)));
}
}
@ -891,7 +891,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
* iff recheck is needed for this item pointer (including the case where the
* item pointer is a lossy page pointer).
*
* If all entry streams are exhausted, sets key->isFinished to TRUE.
* If all entry streams are exhausted, sets key->isFinished to true.
*
* Item pointers must be returned in ascending order.
*
@ -963,7 +963,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
if (allFinished)
{
/* all entries are finished */
key->isFinished = TRUE;
key->isFinished = true;
return;
}
@ -1051,7 +1051,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
* them. We could pass them as MAYBE as well, but if we're using the
* "shim" implementation of a tri-state consistent function (see
* ginlogic.c), it's better to pass as few MAYBEs as possible. So pass
* them as TRUE.
* them as true.
*
* Note that only lossy-page entries pointing to the current item's page
* should trigger this processing; we might have future lossy pages in the
@ -1064,7 +1064,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
for (i = 0; i < key->nentries; i++)
{
entry = key->scanEntry[i];
if (entry->isFinished == FALSE &&
if (entry->isFinished == false &&
ginCompareItemPointers(&entry->curItem, &curPageLossy) == 0)
{
if (i < key->nuserentries)
@ -1314,7 +1314,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
}
return TRUE;
return true;
}
@ -1508,7 +1508,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
memset(key->entryRes, GIN_FALSE, key->nentries);
}
memset(pos->hasMatchKey, FALSE, so->nkeys);
memset(pos->hasMatchKey, false, so->nkeys);
/*
* Outer loop iterates over multiple pending-list pages when a single heap

View File

@ -185,7 +185,7 @@ ginEntryInsert(GinState *ginstate,
IndexTuple itup;
Page page;
insertdata.isDelete = FALSE;
insertdata.isDelete = false;
/* During index build, count the to-be-inserted entry */
if (buildStats)
@ -221,7 +221,7 @@ ginEntryInsert(GinState *ginstate,
itup = addItemPointersToLeafTuple(ginstate, itup,
items, nitem, buildStats);
insertdata.isDelete = TRUE;
insertdata.isDelete = true;
}
else
{

View File

@ -235,7 +235,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
DataPageDeleteStack *me;
Buffer buffer;
Page page;
bool meDelete = FALSE;
bool meDelete = false;
bool isempty;
if (isRoot)
@ -274,7 +274,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
{
PostingItem *pitem = GinDataPageGetPostingItem(page, i);
if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i))
if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), false, me, i))
i--;
}
}
@ -291,7 +291,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
{
Assert(!isRoot);
ginDeletePage(gvs, blkno, me->leftBlkno, me->parent->blkno, myoff, me->parent->isRoot);
meDelete = TRUE;
meDelete = true;
}
}
@ -319,7 +319,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
{
Buffer buffer;
Page page;
bool hasVoidPage = FALSE;
bool hasVoidPage = false;
MemoryContext oldCxt;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
@ -339,7 +339,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
/* if root is a leaf page, we don't desire further processing */
if (GinDataLeafPageIsEmpty(page))
hasVoidPage = TRUE;
hasVoidPage = true;
UnlockReleaseBuffer(buffer);
@ -348,8 +348,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
else
{
OffsetNumber i;
bool hasEmptyChild = FALSE;
bool hasNonEmptyChild = FALSE;
bool hasEmptyChild = false;
bool hasNonEmptyChild = false;
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
@ -369,10 +369,10 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
if (ginVacuumPostingTreeLeaves(gvs, children[i], FALSE))
hasEmptyChild = TRUE;
if (ginVacuumPostingTreeLeaves(gvs, children[i], false))
hasEmptyChild = true;
else
hasNonEmptyChild = TRUE;
hasNonEmptyChild = true;
}
pfree(children);
@ -380,12 +380,12 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
vacuum_delay_point();
/*
* All subtree is empty - just return TRUE to indicate that parent
* All subtree is empty - just return true to indicate that parent
* must do a cleanup. Unless we are ROOT an there is way to go upper.
*/
if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
return TRUE;
return true;
if (hasEmptyChild)
{
@ -399,9 +399,9 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
memset(&root, 0, sizeof(DataPageDeleteStack));
root.leftBlkno = InvalidBlockNumber;
root.isRoot = TRUE;
root.isRoot = true;
ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
ginScanToDelete(gvs, blkno, true, &root, InvalidOffsetNumber);
ptr = root.child;
@ -416,14 +416,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
}
/* Here we have deleted all empty subtrees */
return FALSE;
return false;
}
}
static void
ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
{
ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE);
ginVacuumPostingTreeLeaves(gvs, rootBlkno, true);
}
/*

View File

@ -1364,8 +1364,8 @@ gistSplit(Relation r,
IndexTupleSize(itup[0]), GiSTPageSize,
RelationGetRelationName(r))));
memset(v.spl_lisnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_risnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_lisnull, true, sizeof(bool) * giststate->tupdesc->natts);
memset(v.spl_risnull, true, sizeof(bool) * giststate->tupdesc->natts);
gistSplitByKey(r, page, itup, len, giststate, &v, 0);
/* form left and right vector */

View File

@ -197,7 +197,7 @@ gistindex_keytest(IndexScanDesc scan,
gistdentryinit(giststate, key->sk_attno - 1, &de,
datum, r, page, offset,
FALSE, isNull);
false, isNull);
/*
* Call the Consistent function to evaluate the test. The
@ -258,7 +258,7 @@ gistindex_keytest(IndexScanDesc scan,
gistdentryinit(giststate, key->sk_attno - 1, &de,
datum, r, page, offset,
FALSE, isNull);
false, isNull);
/*
* Call the Distance function to evaluate the distance. The

View File

@ -105,7 +105,7 @@ box_penalty(const BOX *original, const BOX *new)
* The GiST Consistent method for boxes
*
* Should return false if for all data items x below entry,
* the predicate x op query must be FALSE, where op is the oper
* the predicate x op query must be false, where op is the oper
* corresponding to strategy in the pg_amop table.
*/
Datum
@ -122,7 +122,7 @@ gist_box_consistent(PG_FUNCTION_ARGS)
*recheck = false;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* if entry is not leaf, use rtree_internal_consistent, else use
@ -1056,7 +1056,7 @@ gist_poly_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;
@ -1081,7 +1081,7 @@ gist_poly_consistent(PG_FUNCTION_ARGS)
*recheck = true;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* Since the operators require recheck anyway, we can just use
@ -1124,7 +1124,7 @@ gist_circle_compress(PG_FUNCTION_ARGS)
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
}
else
retval = entry;
@ -1150,7 +1150,7 @@ gist_circle_consistent(PG_FUNCTION_ARGS)
*recheck = true;
if (DatumGetBoxP(entry->key) == NULL || query == NULL)
PG_RETURN_BOOL(FALSE);
PG_RETURN_BOOL(false);
/*
* Since the operators require recheck anyway, we can just use
@ -1186,7 +1186,7 @@ gist_point_compress(PG_FUNCTION_ARGS)
box->high = box->low = *point;
gistentryinit(*retval, BoxPGetDatum(box),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, false);
PG_RETURN_POINTER(retval);
}
@ -1215,7 +1215,7 @@ gist_point_fetch(PG_FUNCTION_ARGS)
r->y = in->high.y;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, FALSE);
entry->offset, false);
PG_RETURN_POINTER(retval);
}

View File

@ -125,7 +125,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
* check for nulls
*/
gistentryinit(entry, spl->splitVector.spl_rdatum, r, NULL,
(OffsetNumber) 0, FALSE);
(OffsetNumber) 0, false);
for (i = 0; i < spl->splitVector.spl_nleft; i++)
{
int j = spl->splitVector.spl_left[i];
@ -141,7 +141,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/* And conversely for the right-side tuples */
gistentryinit(entry, spl->splitVector.spl_ldatum, r, NULL,
(OffsetNumber) 0, FALSE);
(OffsetNumber) 0, false);
for (i = 0; i < spl->splitVector.spl_nright; i++)
{
int j = spl->splitVector.spl_right[i];
@ -177,7 +177,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
{
OffsetNumber ai = a[i];
if (dontcare[ai] == FALSE)
if (dontcare[ai] == false)
{
/* re-emit item into a[] */
*curwpos = ai;
@ -213,10 +213,10 @@ placeOne(Relation r, GISTSTATE *giststate, GistSplitVector *v,
rpenalty;
GISTENTRY entry;
gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, false);
lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno],
identry + attno, isnull[attno]);
gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, false);
rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno],
identry + attno, isnull[attno]);
@ -265,10 +265,10 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
entrySL,
entrySR;
gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
gistentryinit(entryL, oldL, r, NULL, 0, false);
gistentryinit(entryR, oldR, r, NULL, 0, false);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false);
if (sv->spl_ldatum_exists && sv->spl_rdatum_exists)
{
@ -320,8 +320,8 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
SWAPVAR(sv->spl_left, sv->spl_right, off);
SWAPVAR(sv->spl_nleft, sv->spl_nright, noff);
SWAPVAR(sv->spl_ldatum, sv->spl_rdatum, datum);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false);
gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false);
}
if (sv->spl_ldatum_exists)
@ -396,20 +396,20 @@ genericPickSplit(GISTSTATE *giststate, GistEntryVector *entryvec, GIST_SPLITVEC
* Calls user picksplit method for attno column to split tuples into
* two vectors.
*
* Returns FALSE if split is complete (there are no more index columns, or
* Returns false if split is complete (there are no more index columns, or
* there is no need to consider them because split is optimal already).
*
* Returns TRUE and v->spl_dontcare = NULL if the picksplit result is
* Returns true and v->spl_dontcare = NULL if the picksplit result is
* degenerate (all tuples seem to be don't-cares), so we should just
* disregard this column and split on the next column(s) instead.
*
* Returns TRUE and v->spl_dontcare != NULL if there are don't-care tuples
* Returns true and v->spl_dontcare != NULL if there are don't-care tuples
* that could be relocated based on the next column(s). The don't-care
* tuples have been removed from the split and must be reinserted by caller.
* There is at least one non-don't-care tuple on each side of the split,
* and union keys for all columns are updated to include just those tuples.
*
* A TRUE result implies there is at least one more index column.
* A true result implies there is at least one more index column.
*/
static bool
gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v,
@ -610,7 +610,7 @@ gistSplitHalf(GIST_SPLITVEC *v, int len)
* attno: column we are working on (zero-based index)
*
* Outside caller must initialize v->spl_lisnull and v->spl_risnull arrays
* to all-TRUE. On return, spl_left/spl_nleft contain indexes of tuples
* to all-true. On return, spl_left/spl_nleft contain indexes of tuples
* to go left, spl_right/spl_nright contain indexes of tuples to go right,
* spl_lattr/spl_lisnull contain left-side union key values, and
* spl_rattr/spl_risnull contain right-side union key values. Other fields
@ -643,7 +643,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
&IsNull);
gistdentryinit(giststate, attno, &(entryvec->vector[i]),
datum, r, page, i,
FALSE, IsNull);
false, IsNull);
if (IsNull)
offNullTuples[nOffNullTuples++] = i;
}
@ -655,7 +655,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
* our attention to the next column. If there's no next column, just
* split page in half.
*/
v->spl_risnull[attno] = v->spl_lisnull[attno] = TRUE;
v->spl_risnull[attno] = v->spl_lisnull[attno] = true;
if (attno + 1 < giststate->tupdesc->natts)
gistSplitByKey(r, page, itup, len, giststate, v, attno + 1);
@ -672,7 +672,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
*/
v->splitVector.spl_right = offNullTuples;
v->splitVector.spl_nright = nOffNullTuples;
v->spl_risnull[attno] = TRUE;
v->spl_risnull[attno] = true;
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;

View File

@ -179,7 +179,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
evec->vector + evec->n,
datum,
NULL, NULL, (OffsetNumber) 0,
FALSE, IsNull);
false, IsNull);
evec->n++;
}
@ -187,7 +187,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
if (evec->n == 0)
{
attr[i] = (Datum) 0;
isnull[i] = TRUE;
isnull[i] = true;
}
else
{
@ -204,7 +204,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len,
PointerGetDatum(evec),
PointerGetDatum(&attrsize));
isnull[i] = FALSE;
isnull[i] = false;
}
}
}
@ -246,17 +246,17 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno,
if (isnull1 && isnull2)
{
*dstisnull = TRUE;
*dstisnull = true;
*dst = (Datum) 0;
}
else
{
if (isnull1 == FALSE && isnull2 == FALSE)
if (isnull1 == false && isnull2 == false)
{
evec->vector[0] = *entry1;
evec->vector[1] = *entry2;
}
else if (isnull1 == FALSE)
else if (isnull1 == false)
{
evec->vector[0] = *entry1;
evec->vector[1] = *entry1;
@ -267,7 +267,7 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno,
evec->vector[1] = *entry2;
}
*dstisnull = FALSE;
*dstisnull = false;
*dst = FunctionCall2Coll(&giststate->unionFn[attno],
giststate->supportCollation[attno],
PointerGetDatum(evec),
@ -303,7 +303,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
datum = index_getattr(tuple, i + 1, giststate->tupdesc, &isnull[i]);
gistdentryinit(giststate, i, &attdata[i],
datum, r, p, o,
FALSE, isnull[i]);
false, isnull[i]);
}
}
@ -313,7 +313,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
IndexTuple
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
bool neednew = FALSE;
bool neednew = false;
GISTENTRY oldentries[INDEX_MAX_KEYS],
addentries[INDEX_MAX_KEYS];
bool oldisnull[INDEX_MAX_KEYS],
@ -451,7 +451,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
/* Compute penalty for this column. */
datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull);
gistdentryinit(giststate, j, &entry, datum, r, p, i,
FALSE, IsNull);
false, IsNull);
usize = gistpenalty(giststate, j, &entry, IsNull,
&identry[j], isnull[j]);
if (usize > 0)
@ -691,8 +691,8 @@ gistpenalty(GISTSTATE *giststate, int attno,
{
float penalty = 0.0;
if (giststate->penaltyFn[attno].fn_strict == FALSE ||
(isNullOrig == FALSE && isNullAdd == FALSE))
if (giststate->penaltyFn[attno].fn_strict == false ||
(isNullOrig == false && isNullAdd == false))
{
FunctionCall3Coll(&giststate->penaltyFn[attno],
giststate->supportCollation[attno],

View File

@ -992,7 +992,7 @@ fail:
* for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
* so it may not be worth worrying about.
*
* Returns TRUE if successful, or FALSE if allocation failed due to
* Returns true if successful, or false if allocation failed due to
* BlockNumber overflow.
*/
static bool

View File

@ -39,7 +39,7 @@ static void _hash_readnext(IndexScanDesc scan, Buffer *bufp,
* On successful exit, scan->xs_ctup.t_self is set to the TID
* of the next heap tuple. so->currPos is updated as needed.
*
* On failure exit (no more tuples), we return FALSE with pin
* On failure exit (no more tuples), we return false with pin
* held on bucket page but no pins or locks held on overflow
* page.
*/
@ -283,7 +283,7 @@ _hash_readprev(IndexScanDesc scan,
* tuple(s) on the page has been loaded into so->currPos,
* scan->xs_ctup.t_self is set to the heap TID of the current tuple.
*
* On failure exit (no more tuples), we return FALSE, with pin held on
* On failure exit (no more tuples), we return false, with pin held on
* bucket page but no pins or locks held on overflow page.
*/
bool
@ -507,7 +507,7 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
/*
* Remember next and previous block numbers for scrollable
* cursors to know the start position and return FALSE
* cursors to know the start position and return false
* indicating that no more matching tuples were found. Also,
* don't reset currPage or lsn, because we expect
* _hash_kill_items to be called for the old page after this
@ -560,7 +560,7 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
/*
* Remember next and previous block numbers for scrollable
* cursors to know the start position and return FALSE
* cursors to know the start position and return false
* indicating that no more matching tuples were found. Also,
* don't reset currPage or lsn, because we expect
* _hash_kill_items to be called for the old page after this

View File

@ -1379,7 +1379,7 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
* heap_beginscan_strat offers an extended API that lets the caller control
* whether a nondefault buffer access strategy can be used, and whether
* syncscan can be chosen (possibly resulting in the scan not starting from
* block zero). Both of these default to TRUE with plain heap_beginscan.
* block zero). Both of these default to true with plain heap_beginscan.
*
* heap_beginscan_bm is an alternative entry point for setting up a
* HeapScanDesc for a bitmap heap scan. Although that scan technology is
@ -1842,16 +1842,16 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction)
* against the specified snapshot.
*
* If successful (tuple found and passes snapshot time qual), then *userbuf
* is set to the buffer holding the tuple and TRUE is returned. The caller
* is set to the buffer holding the tuple and true is returned. The caller
* must unpin the buffer when done with the tuple.
*
* If the tuple is not found (ie, item number references a deleted slot),
* then tuple->t_data is set to NULL and FALSE is returned.
* then tuple->t_data is set to NULL and false is returned.
*
* If the tuple is found but fails the time qual check, then FALSE is returned
* If the tuple is found but fails the time qual check, then false is returned
* but tuple->t_data is left pointing to the tuple.
*
* keep_buf determines what is done with the buffer in the FALSE-result cases.
* keep_buf determines what is done with the buffer in the false-result cases.
* When the caller specifies keep_buf = true, we retain the pin on the buffer
* and return it in *userbuf (so the caller must eventually unpin it); when
* keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
@ -1993,15 +1993,15 @@ heap_fetch(Relation relation,
* of a HOT chain), and buffer is the buffer holding this tuple. We search
* for the first chain member satisfying the given snapshot. If one is
* found, we update *tid to reference that tuple's offset number, and
* return TRUE. If no match, return FALSE without modifying *tid.
* return true. If no match, return false without modifying *tid.
*
* heapTuple is a caller-supplied buffer. When a match is found, we return
* the tuple here, in addition to updating *tid. If no match is found, the
* contents of this buffer on return are undefined.
*
* If all_dead is not NULL, we check non-visible tuples to see if they are
* globally dead; *all_dead is set TRUE if all members of the HOT chain
* are vacuumable, FALSE if not.
* globally dead; *all_dead is set true if all members of the HOT chain
* are vacuumable, false if not.
*
* Unlike heap_fetch, the caller must already have pin and (at least) share
* lock on the buffer; it is still pinned/locked at exit. Also unlike
@ -6594,7 +6594,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* WAL-log what we would need to do, and return true. Return false if nothing
* is to be changed. In addition, set *totally_frozen_p to true if the tuple
* will be totally frozen after these operations are performed and false if
* more freezing will eventually be required.
@ -7242,7 +7242,7 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
* are older than the specified cutoff XID or MultiXactId. If so, return true.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.

View File

@ -39,7 +39,7 @@ typedef struct
OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
OffsetNumber nowdead[MaxHeapTuplesPerPage];
OffsetNumber nowunused[MaxHeapTuplesPerPage];
/* marked[i] is TRUE if item i is entered in one of the above arrays */
/* marked[i] is true if item i is entered in one of the above arrays */
bool marked[MaxHeapTuplesPerPage + 1];
} PruneState;
@ -170,7 +170,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
*
* If report_stats is true then we send the number of reclaimed heap-only
* tuples to pgstats. (This must be FALSE during vacuum, since vacuum will
* tuples to pgstats. (This must be false during vacuum, since vacuum will
* send its own new total to pgstats, and we don't want this delta applied
* on top of that.)
*

View File

@ -140,9 +140,9 @@ identify_opfamily_groups(CatCList *oprlist, CatCList *proclist)
/*
* Validate the signature (argument and result types) of an opclass support
* function. Return TRUE if OK, FALSE if not.
* function. Return true if OK, false if not.
*
* The "..." represents maxargs argument-type OIDs. If "exact" is TRUE, they
* The "..." represents maxargs argument-type OIDs. If "exact" is true, they
* must match the function arg types exactly, else only binary-coercibly.
* In any case the function result type must match restype exactly.
*/
@ -184,7 +184,7 @@ check_amproc_signature(Oid funcid, Oid restype, bool exact,
/*
* Validate the signature (argument and result types) of an opclass operator.
* Return TRUE if OK, FALSE if not.
* Return true if OK, false if not.
*
* Currently, we can hard-wire this as accepting only binary operators. Also,
* we can insist on exact type matches, since the given lefttype/righttype

View File

@ -784,7 +784,7 @@ index_can_return(Relation indexRelation, int attno)
{
RELATION_CHECKS;
/* amcanreturn is optional; assume FALSE if not provided by AM */
/* amcanreturn is optional; assume false if not provided by AM */
if (indexRelation->rd_amroutine->amcanreturn == NULL)
return false;

View File

@ -99,8 +99,8 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* don't actually insert.
*
* The result value is only significant for UNIQUE_CHECK_PARTIAL:
* it must be TRUE if the entry is known unique, else FALSE.
* (In the current implementation we'll also return TRUE after a
* it must be true if the entry is known unique, else false.
* (In the current implementation we'll also return true after a
* successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
* that's just a coding artifact.)
*/

View File

@ -524,7 +524,7 @@ _bt_compare(Relation rel,
* scan->xs_ctup.t_self is set to the heap TID of the current tuple,
* and if requested, scan->xs_itup points to a copy of the index tuple.
*
* If there are no matching items in the index, we return FALSE, with no
* If there are no matching items in the index, we return false, with no
* pins or locks held.
*
* Note that scan->keyData[], and the so->keyData[] scankey built from it,
@ -1336,7 +1336,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex,
*
* For success on a scan using a non-MVCC snapshot we hold a pin, but not a
* read lock, on that page. If we do not hold the pin, we set so->currPos.buf
* to InvalidBuffer. We return TRUE to indicate success.
* to InvalidBuffer. We return true to indicate success.
*/
static bool
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
@ -1440,10 +1440,10 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
*
* On success exit, so->currPos is updated to contain data from the next
* interesting page. Caller is responsible to release lock and pin on
* buffer on success. We return TRUE to indicate success.
* buffer on success. We return true to indicate success.
*
* If there are no more matching records in the given direction, we drop all
* locks and pins, set so->currPos.buf to InvalidBuffer, and return FALSE.
* locks and pins, set so->currPos.buf to InvalidBuffer, and return false.
*/
static bool
_bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
@ -1608,7 +1608,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir)
/*
* _bt_parallel_readpage() -- Read current page containing valid data for scan
*
* On success, release lock and maybe pin on buffer. We return TRUE to
* On success, release lock and maybe pin on buffer. We return true to
* indicate success.
*/
static bool

View File

@ -540,8 +540,8 @@ _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
/*
* _bt_advance_array_keys() -- Advance to next set of array elements
*
* Returns TRUE if there is another set of values to consider, FALSE if not.
* On TRUE result, the scankeys are initialized with the next set of values.
* Returns true if there is another set of values to consider, false if not.
* On true result, the scankeys are initialized with the next set of values.
*/
bool
_bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
@ -724,7 +724,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* for a forward scan; or after the last match for a backward scan.)
*
* As a byproduct of this work, we can detect contradictory quals such
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = FALSE,
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
* indicating the scan need not be run at all since no tuples can match.
* (In this case we do not bother completing the output key array!)
* Again, missing cross-type operators might cause us to fail to prove the
@ -1020,7 +1020,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* we store the operator result in *result and return true. We return false
* if the comparison could not be made.
*
* Note: op always points at the same ScanKey as either leftarg or rightarg.
@ -1185,8 +1185,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
* a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
* a NULL means that the qual cannot be satisfied. We return true if the
* comparison value isn't NULL, or false if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
* on a rescan we will be looking at already-processed scankeys. Hence

View File

@ -580,7 +580,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
* Test to see if the user-defined picksplit function failed to do its job,
* ie, it put all the leaf tuples into the same node.
* If so, randomly divide the tuples into several nodes (all with the same
* label) and return TRUE to select allTheSame mode for this inner tuple.
* label) and return true to select allTheSame mode for this inner tuple.
*
* (This code is also used to forcibly select allTheSame mode for nulls.)
*

View File

@ -727,7 +727,7 @@ BootStrapCLOG(void)
/*
* Initialize (or reinitialize) a page of CLOG to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -531,7 +531,7 @@ BootStrapCommitTs(void)
/*
* Initialize (or reinitialize) a page of CommitTs to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -1892,7 +1892,7 @@ BootStrapMultiXact(void)
/*
* Initialize (or reinitialize) a page of MultiXactOffset to zeroes.
* If writeXlog is TRUE, also emit an XLOG record saying we did this.
* If writeXlog is true, also emit an XLOG record saying we did this.
*
* The page is not actually written, just set up in shared memory.
* The slot number of the new page is returned.

View File

@ -629,7 +629,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
* Physical read of a (previously existing) page into a buffer slot
*
* On failure, we cannot just ereport(ERROR) since caller has put state in
* shared memory that must be undone. So, we return FALSE and save enough
* shared memory that must be undone. So, we return false and save enough
* info in static variables to let SlruReportIOError make the report.
*
* For now, assume it's not worth keeping a file pointer open across
@ -705,7 +705,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* Physical write of a page from a buffer slot
*
* On failure, we cannot just ereport(ERROR) since caller has put state in
* shared memory that must be undone. So, we return FALSE and save enough
* shared memory that must be undone. So, we return false and save enough
* info in static variables to let SlruReportIOError make the report.
*
* For now, assume it's not worth keeping a file pointer open across

View File

@ -170,9 +170,9 @@ typedef struct GlobalTransactionData
Oid owner; /* ID of user that executed the xact */
BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
bool ondisk; /* TRUE if prepare state file is on disk */
bool inredo; /* TRUE if entry was added via xlog_redo */
bool valid; /* true if PGPROC entry is in proc array */
bool ondisk; /* true if prepare state file is on disk */
bool inredo; /* true if entry was added via xlog_redo */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;

View File

@ -671,8 +671,8 @@ SubTransactionIsActive(SubTransactionId subxid)
/*
* GetCurrentCommandId
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
* "used" must be true if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. false means the ID is being fetched
* for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
@ -3470,7 +3470,7 @@ BeginTransactionBlock(void)
* This executes a PREPARE command.
*
* Since PREPARE may actually do a ROLLBACK, the result indicates what
* happened: TRUE for PREPARE, FALSE for ROLLBACK.
* happened: true for PREPARE, false for ROLLBACK.
*
* Note that we don't actually do anything here except change blockState.
* The real work will be done in the upcoming PrepareTransaction().
@ -3522,7 +3522,7 @@ PrepareTransactionBlock(char *gid)
* This executes a COMMIT command.
*
* Since COMMIT may actually do a ROLLBACK, the result indicates what
* happened: TRUE for COMMIT, FALSE for ROLLBACK.
* happened: true for COMMIT, false for ROLLBACK.
*
* Note that we don't actually do anything here except change blockState.
* The real work will be done in the upcoming CommitTransactionCommand().

View File

@ -2324,7 +2324,7 @@ XLogCheckpointNeeded(XLogSegNo new_segno)
/*
* Write and/or fsync the log at least as far as WriteRqst indicates.
*
* If flexible == TRUE, we don't have to write as far as WriteRqst, but
* If flexible == true, we don't have to write as far as WriteRqst, but
* may stop at any convenient boundary (such as a cache or logfile boundary).
* This option allows us to avoid uselessly issuing multiple writes when a
* single one would do.
@ -2945,7 +2945,7 @@ XLogFlush(XLogRecPtr record)
*
* This routine is invoked periodically by the background walwriter process.
*
* Returns TRUE if there was any work to do, even if we skipped flushing due
* Returns true if there was any work to do, even if we skipped flushing due
* to wal_writer_delay/wal_writer_flush_after.
*/
bool
@ -3141,12 +3141,12 @@ XLogNeedsFlush(XLogRecPtr record)
*
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
* pre-existing file will be deleted). On return, TRUE if a pre-existing
* *use_existent: if true, OK to use a pre-existing file (else, any
* pre-existing file will be deleted). On return, true if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
* place. This should be TRUE except during bootstrap log creation. The
* use_lock: if true, acquire ControlFileLock while moving file into
* place. This should be true except during bootstrap log creation. The
* caller must *not* hold the lock at call.
*
* Returns FD of opened file.
@ -3441,24 +3441,24 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno,
* filename while it's being created) and to recycle an old segment.
*
* *segno: identify segment to install as (or first possible target).
* When find_free is TRUE, this is modified on return to indicate the
* When find_free is true, this is modified on return to indicate the
* actual installation location or last segment searched.
*
* tmppath: initial name of file to install. It will be renamed into place.
*
* find_free: if TRUE, install the new segment at the first empty segno
* number at or after the passed numbers. If FALSE, install the new segment
* find_free: if true, install the new segment at the first empty segno
* number at or after the passed numbers. If false, install the new segment
* exactly where specified, deleting any existing segment file there.
*
* max_segno: maximum segment number to install the new file as. Fail if no
* free slot is found between *segno and max_segno. (Ignored when find_free
* is FALSE.)
* is false.)
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
* place. This should be TRUE except during bootstrap log creation. The
* use_lock: if true, acquire ControlFileLock while moving file into
* place. This should be true except during bootstrap log creation. The
* caller must *not* hold the lock at call.
*
* Returns TRUE if the file was installed successfully. FALSE indicates that
* Returns true if the file was installed successfully. false indicates that
* max_segno limit was exceeded, or an error occurred while renaming the
* file into place.
*/
@ -5680,7 +5680,7 @@ getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime)
* For point-in-time recovery, this function decides whether we want to
* stop applying the XLOG before the current record.
*
* Returns TRUE if we are stopping, FALSE otherwise. If stopping, some
* Returns true if we are stopping, false otherwise. If stopping, some
* information is saved in recoveryStopXid et al for use in annotating the
* new timeline's history file.
*/
@ -6659,7 +6659,7 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
wasShutdown ? "true" : "false")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
@ -11192,11 +11192,11 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
*
* Returns TRUE if a backup_label was found (and fills the checkpoint
* Returns true if a backup_label was found (and fills the checkpoint
* location and its REDO location into *checkPointLoc and RedoStartLSN,
* respectively); returns FALSE if not. If this backup_label came from a
* streamed backup, *backupEndRequired is set to TRUE. If this backup_label
* was created during recovery, *backupFromStandby is set to TRUE.
* respectively); returns false if not. If this backup_label came from a
* streamed backup, *backupEndRequired is set to true. If this backup_label
* was created during recovery, *backupFromStandby is set to true.
*/
static bool
read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
@ -11279,8 +11279,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
* recovering from a backup dump file, and we therefore need to create symlinks
* as per the information present in tablespace_map file.
*
* Returns TRUE if a tablespace_map file was found (and fills the link
* information for all the tablespace links present in file); returns FALSE
* Returns true if a tablespace_map file was found (and fills the link
* information for all the tablespace links present in file); returns false
* if not.
*/
static bool
@ -11714,7 +11714,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* If primary_conninfo is set, launch walreceiver to try
* to stream the missing WAL.
*
* If fetching_ckpt is TRUE, RecPtr points to the initial
* If fetching_ckpt is true, RecPtr points to the initial
* checkpoint location. In that case, we use RedoStartLSN
* as the streaming start position instead of RecPtr, so
* that when we later jump backwards to start redo at

View File

@ -33,11 +33,11 @@
* Attempt to retrieve the specified file from off-line archival storage.
* If successful, fill "path" with its complete path (note that this will be
* a temp file name that doesn't follow the normal naming convention), and
* return TRUE.
* return true.
*
* If not successful, fill "path" with the name of the normal on-line file
* (which may or may not actually exist, but we'll try to use it), and return
* FALSE.
* false.
*
* For fixed-size files, the caller may pass the expected size as an
* additional crosscheck on successful recovery. If the file size is not

View File

@ -797,8 +797,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/*
* Create a compressed version of a backup block image.
*
* Returns FALSE if compression fails (i.e., compressed result is actually
* bigger than original). Otherwise, returns TRUE and sets 'dlen' to
* Returns false if compression fails (i.e., compressed result is actually
* bigger than original). Otherwise, returns true and sets 'dlen' to
* the length of compressed block image.
*/
static bool
@ -965,7 +965,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
* log_newpage_buffer instead.
*
* If the page follows the standard page layout, with a PageHeader and unused
* space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows
* space between pd_lower and pd_upper, set 'page_std' to true. That allows
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr
@ -1002,7 +1002,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
* function. This function will set the page LSN.
*
* If the page follows the standard page layout, with a PageHeader and unused
* space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows
* space between pd_lower and pd_upper, set 'page_std' to true. That allows
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr

View File

@ -1302,8 +1302,8 @@ err:
* Returns information about the block that a block reference refers to.
*
* If the WAL record contains a block reference with the given ID, *rnode,
* *forknum, and *blknum are filled in (if not NULL), and returns TRUE.
* Otherwise returns FALSE.
* *forknum, and *blknum are filled in (if not NULL), and returns true.
* Otherwise returns false.
*/
bool
XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,

View File

@ -1000,15 +1000,15 @@ AddNewRelationType(const char *typeName,
* cooked_constraints: list of precooked check constraints and defaults
* relkind: relkind for new rel
* relpersistence: rel's persistence status (permanent, temp, or unlogged)
* shared_relation: TRUE if it's to be a shared relation
* mapped_relation: TRUE if the relation will use the relfilenode map
* oidislocal: TRUE if oid column (if any) should be marked attislocal
* shared_relation: true if it's to be a shared relation
* mapped_relation: true if the relation will use the relfilenode map
* oidislocal: true if oid column (if any) should be marked attislocal
* oidinhcount: attinhcount to assign to oid column (if any)
* oncommit: ON COMMIT marking (only relevant if it's a temp table)
* reloptions: reloptions in Datum form, or (Datum) 0 if none
* use_user_acl: TRUE if should look for user-defined default permissions;
* if FALSE, relacl is always set NULL
* allow_system_table_mods: TRUE to allow creation in system namespaces
* use_user_acl: true if should look for user-defined default permissions;
* if false, relacl is always set NULL
* allow_system_table_mods: true to allow creation in system namespaces
* is_internal: is this a system-generated catalog?
*
* Output parameters:
@ -2208,9 +2208,9 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal)
* rel: relation to be modified
* newColDefaults: list of RawColumnDefault structures
* newConstraints: list of Constraint nodes
* allow_merge: TRUE if check constraints may be merged with existing ones
* is_local: TRUE if definition is local, FALSE if it's inherited
* is_internal: TRUE if result of some internal process, not a user request
* allow_merge: true if check constraints may be merged with existing ones
* is_local: true if definition is local, false if it's inherited
* is_internal: true if result of some internal process, not a user request
*
* All entries in newColDefaults will be processed. Entries in newConstraints
* will be processed only if they are CONSTR_CHECK type.
@ -2455,7 +2455,7 @@ AddRelationNewConstraints(Relation rel,
* new one, and either adjust its conislocal/coninhcount settings or throw
* error as needed.
*
* Returns TRUE if merged (constraint is a duplicate), or FALSE if it's
* Returns true if merged (constraint is a duplicate), or false if it's
* got a so-far-unique name, or throws error if conflict.
*
* XXX See MergeConstraintsIntoExisting too if you change this code.

View File

@ -95,7 +95,7 @@
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
* activeTempCreationPending is TRUE if "pg_temp" appears first in the string
* activeTempCreationPending is true if "pg_temp" appears first in the string
* but is not reflected in activeCreationNamespace because the namespace isn't
* set up yet.
*
@ -136,7 +136,7 @@ static List *activeSearchPath = NIL;
/* default place to create stuff; if InvalidOid, no default */
static Oid activeCreationNamespace = InvalidOid;
/* if TRUE, activeCreationNamespace is wrong, it should be temp namespace */
/* if true, activeCreationNamespace is wrong, it should be temp namespace */
static bool activeTempCreationPending = false;
/* These variables are the values last derived from namespace_search_path: */

View File

@ -1056,7 +1056,7 @@ get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid)
/*
* Determine whether a relation can be proven functionally dependent on
* a set of grouping columns. If so, return TRUE and add the pg_constraint
* a set of grouping columns. If so, return true and add the pg_constraint
* OIDs of the constraints needed for the proof to the *constraintDeps list.
*
* grouping_columns is a list of grouping expressions, in which columns of

View File

@ -490,7 +490,7 @@ getExtensionOfObject(Oid classId, Oid objectId)
*
* An ownership marker is an AUTO or INTERNAL dependency from the sequence to the
* column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
* into *tableId and *colId and return true; else return false.
*
* Note: if there's more than one such pg_depend entry then you get
* a random one of them returned into the out parameters. This should

View File

@ -124,7 +124,7 @@ validOperatorName(const char *name)
* finds an operator given an exact specification (name, namespace,
* left and right type IDs).
*
* *defined is set TRUE if defined (not a shell)
* *defined is set true if defined (not a shell)
*/
static Oid
OperatorGet(const char *operatorName,
@ -164,7 +164,7 @@ OperatorGet(const char *operatorName,
* looks up an operator given a possibly-qualified name and
* left and right type IDs.
*
* *defined is set TRUE if defined (not a shell)
* *defined is set true if defined (not a shell)
*/
static Oid
OperatorLookup(List *operatorName,

View File

@ -821,9 +821,9 @@ makeArrayTypeName(const char *typeName, Oid typeNamespace)
* determine the new type's own array type name; else the latter will
* certainly pick the same name.
*
* Returns TRUE if successfully moved the type, FALSE if not.
* Returns true if successfully moved the type, false if not.
*
* We also return TRUE if the given type is a shell type. In this case
* We also return true if the given type is a shell type. In this case
* the type has not been renamed out of the way, but nonetheless it can
* be expected that TypeCreate will succeed. This behavior is convenient
* for most callers --- those that need to distinguish the shell-type case

View File

@ -88,7 +88,7 @@ typedef enum EolType
* characters, else we might find a false match to a trailing byte. In
* supported server encodings, there is no possibility of a false match, and
* it's faster to make useless comparisons to trailing bytes than it is to
* invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is TRUE
* invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is true
* when we have to do it the hard way.
*/
typedef struct CopyStateData
@ -726,7 +726,7 @@ CopyGetInt16(CopyState cstate, int16 *val)
/*
* CopyLoadRawBuf loads some more data into raw_buf
*
* Returns TRUE if able to obtain at least one more byte, else FALSE.
* Returns true if able to obtain at least one more byte, else false.
*
* If raw_buf_index < raw_buf_len, the unprocessed bytes are transferred
* down to the start of the buffer and then we load more data after that.
@ -763,7 +763,7 @@ CopyLoadRawBuf(CopyState cstate)
* DoCopy executes the SQL COPY statement
*
* Either unload or reload contents of table <relation>, depending on <from>.
* (<from> = TRUE means we are inserting into the table.) In the "TO" case
* (<from> = true means we are inserting into the table.) In the "TO" case
* we also support copying the output of an arbitrary SELECT, INSERT, UPDATE
* or DELETE query.
*

View File

@ -1718,8 +1718,8 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
/*
* Look up info about the database named "name". If the database exists,
* obtain the specified lock type on it, fill in any of the remaining
* parameters that aren't NULL, and return TRUE. If no such database,
* return FALSE.
* parameters that aren't NULL, and return true. If no such database,
* return false.
*/
static bool
get_db_info(const char *name, LOCKMODE lockmode,
@ -1923,7 +1923,7 @@ remove_dbtablespaces(Oid db_id)
/*
* Check for existing files that conflict with a proposed new DB OID;
* return TRUE if there are any
* return true if there are any
*
* If there were a subdirectory in any tablespace matching the proposed new
* OID, we'd get a create failure due to the duplicate name ... and then we'd

View File

@ -513,7 +513,7 @@ find_language_template(const char *languageName)
/*
* This just returns TRUE if we have a valid template for a given language
* This just returns true if we have a valid template for a given language
*/
bool
PLTemplateExists(const char *languageName)

View File

@ -2307,7 +2307,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
*
* constraints is a list of CookedConstraint structs for previous constraints.
*
* Returns TRUE if merged (constraint is a duplicate), or FALSE if it's
* Returns true if merged (constraint is a duplicate), or false if it's
* got a so-far-unique name, or throws error if conflict.
*/
static bool
@ -5778,7 +5778,7 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode)
*/
if (((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
{
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = FALSE;
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = false;
CatalogTupleUpdate(attr_rel, &tuple->t_self, tuple);
@ -5859,7 +5859,7 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
*/
if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
{
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE;
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = true;
CatalogTupleUpdate(attr_rel, &tuple->t_self, tuple);
@ -8312,16 +8312,16 @@ validateForeignKeyConstraint(char *conname,
trig.tgoid = InvalidOid;
trig.tgname = conname;
trig.tgenabled = TRIGGER_FIRES_ON_ORIGIN;
trig.tgisinternal = TRUE;
trig.tgisinternal = true;
trig.tgconstrrelid = RelationGetRelid(pkrel);
trig.tgconstrindid = pkindOid;
trig.tgconstraint = constraintOid;
trig.tgdeferrable = FALSE;
trig.tginitdeferred = FALSE;
trig.tgdeferrable = false;
trig.tginitdeferred = false;
/* we needn't fill in remaining fields */
/*
* See if we can do it with a single LEFT JOIN query. A FALSE result
* See if we can do it with a single LEFT JOIN query. A false result
* indicates we must proceed with the fire-the-trigger method.
*/
if (RI_Initial_Check(&trig, rel, pkrel))

View File

@ -655,7 +655,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
* does not justify throwing an error that would require manual intervention
* to get the database running again.
*
* Returns TRUE if successful, FALSE if some subdirectory is not empty
* Returns true if successful, false if some subdirectory is not empty
*/
static bool
destroy_tablespace_directories(Oid tablespaceoid, bool redo)

View File

@ -127,7 +127,7 @@ static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
*
* If isInternal is true then this is an internally-generated trigger.
* This argument sets the tgisinternal field of the pg_trigger entry, and
* if TRUE causes us to modify the given trigger name to ensure uniqueness.
* if true causes us to modify the given trigger name to ensure uniqueness.
*
* When isInternal is not true we require ACL_TRIGGER permissions on the
* relation, as well as ACL_EXECUTE on the trigger function. For internal
@ -4124,10 +4124,10 @@ AfterTriggerExecute(AfterTriggerEvent event,
* If move_list isn't NULL, events that are not to be invoked now are
* transferred to move_list.
*
* When immediate_only is TRUE, do not invoke currently-deferred triggers.
* (This will be FALSE only at main transaction exit.)
* When immediate_only is true, do not invoke currently-deferred triggers.
* (This will be false only at main transaction exit.)
*
* Returns TRUE if any invokable events were found.
* Returns true if any invokable events were found.
*/
static bool
afterTriggerMarkEvents(AfterTriggerEventList *events,
@ -4191,14 +4191,14 @@ afterTriggerMarkEvents(AfterTriggerEventList *events,
* make one locally to cache the info in case there are multiple trigger
* events per rel.
*
* When delete_ok is TRUE, it's safe to delete fully-processed events.
* When delete_ok is true, it's safe to delete fully-processed events.
* (We are not very tense about that: we simply reset a chunk to be empty
* if all its events got fired. The objective here is just to avoid useless
* rescanning of events when a trigger queues new events during transaction
* end, so it's not necessary to worry much about the case where only
* some events are fired.)
*
* Returns TRUE if no unfired events remain in the list (this allows us
* Returns true if no unfired events remain in the list (this allows us
* to avoid repeating afterTriggerMarkEvents).
*/
static bool

View File

@ -3399,9 +3399,9 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
* AlterTypeOwner_oid - change type owner unconditionally
*
* This function recurses to handle a pg_class entry, if necessary. It
* invokes any necessary access object hooks. If hasDependEntry is TRUE, this
* invokes any necessary access object hooks. If hasDependEntry is true, this
* function modifies the pg_shdepend entry appropriately (this should be
* passed as FALSE only for table rowtypes and array types).
* passed as false only for table rowtypes and array types).
*
* This is used by ALTER TABLE/TYPE OWNER commands, as well as by REASSIGN
* OWNED BY. It assumes the caller has done all needed check.
@ -3567,10 +3567,10 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
* if any. isImplicitArray should be TRUE only when doing this internal
* if any. isImplicitArray should be true only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
* If errorOnTableType is true, the function errors out if the type is
* a table type. ALTER TABLE has to be used to move a table to a new
* namespace.
*

View File

@ -32,7 +32,7 @@ static ScanState *search_plan_tree(PlanState *node, Oid table_oid);
* of the table is currently being scanned by the cursor named by CURRENT OF,
* and return the row's TID into *current_tid.
*
* Returns TRUE if a row was identified. Returns FALSE if the cursor is valid
* Returns true if a row was identified. Returns false if the cursor is valid
* for the table but is not currently scanning a row of the table (this is a
* legal situation in inheritance cases). Raises error if cursor is not a
* valid updatable scan of the specified table.

View File

@ -1487,8 +1487,8 @@ ExecCleanUpTriggerState(EState *estate)
* going to be stored into a relation that has OIDs. In other contexts
* we are free to choose whether to leave space for OIDs in result tuples
* (we generally don't want to, but we do if a physical-tlist optimization
* is possible). This routine checks the plan context and returns TRUE if the
* choice is forced, FALSE if the choice is not forced. In the TRUE case,
* is possible). This routine checks the plan context and returns true if the
* choice is forced, false if the choice is not forced. In the true case,
* *hasoids is set to the required value.
*
* One reason this is ugly is that all plan nodes in the plan tree will emit

View File

@ -2628,7 +2628,7 @@ agg_retrieve_hash_table(AggState *aggstate)
else
{
/* No more hashtables, so done */
aggstate->agg_done = TRUE;
aggstate->agg_done = true;
return NULL;
}
}
@ -4206,12 +4206,12 @@ AggGetTempMemoryContext(FunctionCallInfo fcinfo)
* AggStateIsShared - find out whether transition state is shared
*
* If the function is being called as an aggregate support function,
* return TRUE if the aggregate's transition state is shared across
* multiple aggregates, FALSE if it is not.
* return true if the aggregate's transition state is shared across
* multiple aggregates, false if it is not.
*
* Returns TRUE if not called as an aggregate support function.
* Returns true if not called as an aggregate support function.
* This is intended as a conservative answer, ie "no you'd better not
* scribble on your input". In particular, will return TRUE if the
* scribble on your input". In particular, will return true if the
* aggregate is being used as a window function, which is a scenario
* in which changing the transition state is a bad idea. We might
* want to refine the behavior for the window case in future.

View File

@ -88,10 +88,10 @@ exec_append_initialize_next(AppendState *appendstate)
/*
* if scanning in reverse, we start at the last scan in the list and
* then proceed back to the first.. in any case we inform ExecAppend
* that we are at the end of the line by returning FALSE
* that we are at the end of the line by returning false
*/
appendstate->as_whichplan = 0;
return FALSE;
return false;
}
else if (whichplan >= appendstate->as_nplans)
{
@ -99,11 +99,11 @@ exec_append_initialize_next(AppendState *appendstate)
* as above, end the scan if we go beyond the last scan in our list..
*/
appendstate->as_whichplan = appendstate->as_nplans - 1;
return FALSE;
return false;
}
else
{
return TRUE;
return true;
}
}

View File

@ -73,7 +73,7 @@ ExecGroup(PlanState *pstate)
if (TupIsNull(outerslot))
{
/* empty input, so return nothing */
node->grp_done = TRUE;
node->grp_done = true;
return NULL;
}
/* Copy tuple into firsttupleslot */
@ -116,7 +116,7 @@ ExecGroup(PlanState *pstate)
if (TupIsNull(outerslot))
{
/* no more groups, so we're done */
node->grp_done = TRUE;
node->grp_done = true;
return NULL;
}
@ -177,7 +177,7 @@ ExecInitGroup(Group *node, EState *estate, int eflags)
grpstate->ss.ps.plan = (Plan *) node;
grpstate->ss.ps.state = estate;
grpstate->ss.ps.ExecProcNode = ExecGroup;
grpstate->grp_done = FALSE;
grpstate->grp_done = false;
/*
* create expression context
@ -246,7 +246,7 @@ ExecReScanGroup(GroupState *node)
{
PlanState *outerPlan = outerPlanState(node);
node->grp_done = FALSE;
node->grp_done = false;
/* must clear first tuple */
ExecClearTuple(node->ss.ss_ScanTupleSlot);

View File

@ -918,10 +918,10 @@ ExecHashTableInsert(HashJoinTable hashtable,
* econtext->ecxt_innertuple. Vars in the hashkeys expressions should have
* varno either OUTER_VAR or INNER_VAR.
*
* A TRUE result means the tuple's hash value has been successfully computed
* and stored at *hashvalue. A FALSE result means the tuple cannot match
* A true result means the tuple's hash value has been successfully computed
* and stored at *hashvalue. A false result means the tuple cannot match
* because it contains a null attribute, and hence it should be discarded
* immediately. (If keep_nulls is true then FALSE is never returned.)
* immediately. (If keep_nulls is true then false is never returned.)
*/
bool
ExecHashGetHashValue(HashJoinTable hashtable,

View File

@ -676,8 +676,8 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
* ExecIndexEvalArrayKeys
* Evaluate any array key values, and set up to iterate through arrays.
*
* Returns TRUE if there are array elements to consider; FALSE means there
* is at least one null or empty array, so no match is possible. On TRUE
* Returns true if there are array elements to consider; false means there
* is at least one null or empty array, so no match is possible. On true
* result, the scankeys are initialized with the first elements of the arrays.
*/
bool
@ -756,8 +756,8 @@ ExecIndexEvalArrayKeys(ExprContext *econtext,
* ExecIndexAdvanceArrayKeys
* Advance to the next set of array key values, if any.
*
* Returns TRUE if there is another set of values to consider, FALSE if not.
* On TRUE result, the scankeys are initialized with the next set of values.
* Returns true if there is another set of values to consider, false if not.
* On true result, the scankeys are initialized with the next set of values.
*/
bool
ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)

View File

@ -510,7 +510,7 @@ MJFillInner(MergeJoinState *node)
/*
* Check that a qual condition is constant true or constant false.
* If it is constant false (or null), set *is_const_false to TRUE.
* If it is constant false (or null), set *is_const_false to true.
*
* Constant true would normally be represented by a NIL list, but we allow an
* actual bool Const as well. We do expect that the planner will have thrown

View File

@ -220,7 +220,7 @@ ExecScanSubPlan(SubPlanState *node,
MemoryContext oldcontext;
TupleTableSlot *slot;
Datum result;
bool found = false; /* TRUE if got at least one subplan tuple */
bool found = false; /* true if got at least one subplan tuple */
ListCell *pvar;
ListCell *l;
ArrayBuildStateAny *astate = NULL;

View File

@ -1907,9 +1907,9 @@ _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
* snapshot: query snapshot to use, or InvalidSnapshot for the normal
* behavior of taking a new snapshot for each query.
* crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
* read_only: TRUE for read-only execution (no CommandCounterIncrement)
* fire_triggers: TRUE to fire AFTER triggers at end of query (normal case);
* FALSE means any AFTER triggers are postponed to end of outer query
* read_only: true for read-only execution (no CommandCounterIncrement)
* fire_triggers: true to fire AFTER triggers at end of query (normal case);
* false means any AFTER triggers are postponed to end of outer query
* tcount: execution tuple-count limit, or 0 for none
*/
static int

View File

@ -48,7 +48,7 @@ struct TupleQueueReader
/*
* Receive a tuple from a query, and send it to the designated shm_mq.
*
* Returns TRUE if successful, FALSE if shm_mq has been detached.
* Returns true if successful, false if shm_mq has been detached.
*/
static bool
tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)

View File

@ -428,7 +428,7 @@ GetFdwRoutineForRelation(Relation relation, bool makecopy)
/*
* IsImportableForeignTable - filter table names for IMPORT FOREIGN SCHEMA
*
* Returns TRUE if given table name should be imported according to the
* Returns true if given table name should be imported according to the
* statement's import filter options.
*/
bool

View File

@ -187,9 +187,9 @@ pg_isblank(const char c)
* set *err_msg to a string describing the error. Currently the only
* possible error is token too long for buf.
*
* If successful: store null-terminated token at *buf and return TRUE.
* If no more tokens on line: set *buf = '\0' and return FALSE.
* If error: fill buf with truncated or misformatted token and return FALSE.
* If successful: store null-terminated token at *buf and return true.
* If no more tokens on line: set *buf = '\0' and return false.
* If error: fill buf with truncated or misformatted token and return false.
*/
static bool
next_token(char **lineptr, char *buf, int bufsz,

View File

@ -914,7 +914,7 @@ RemoveSocketFiles(void)
/* --------------------------------
* socket_set_nonblocking - set socket blocking/non-blocking
*
* Sets the socket non-blocking if nonblocking is TRUE, or sets it
* Sets the socket non-blocking if nonblocking is true, or sets it
* blocking otherwise.
* --------------------------------
*/

View File

@ -558,8 +558,8 @@ bms_singleton_member(const Bitmapset *a)
* bms_get_singleton_member
*
* Test whether the given set is a singleton.
* If so, set *member to the value of its sole member, and return TRUE.
* If not, return FALSE, without changing *member.
* If so, set *member to the value of its sole member, and return true.
* If not, return false, without changing *member.
*
* This is more convenient and faster than calling bms_membership() and then
* bms_singleton_member(), if we don't care about distinguishing empty sets

View File

@ -663,7 +663,7 @@ strip_implicit_coercions(Node *node)
* Test whether an expression returns a set result.
*
* Because we use expression_tree_walker(), this can also be applied to
* whole targetlists; it'll produce TRUE if any one of the tlist items
* whole targetlists; it'll produce true if any one of the tlist items
* returns a set.
*/
bool
@ -1632,9 +1632,9 @@ set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
* check_functions_in_node -
* apply checker() to each function OID contained in given expression node
*
* Returns TRUE if the checker() function does; for nodes representing more
* than one function call, returns TRUE if the checker() function does so
* for any of those functions. Returns FALSE if node does not invoke any
* Returns true if the checker() function does; for nodes representing more
* than one function call, returns true if the checker() function does so
* for any of those functions. Returns false if node does not invoke any
* SQL-visible function. Caller must not pass node == NULL.
*
* This function examines only the given node; it does not recurse into any

View File

@ -593,7 +593,7 @@ tbm_intersect(TIDBitmap *a, const TIDBitmap *b)
/*
* Process one page of a during an intersection op
*
* Returns TRUE if apage is now empty and should be deleted from a
* Returns true if apage is now empty and should be deleted from a
*/
static bool
tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)

View File

@ -1884,7 +1884,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
* Zero out result area for subquery_is_pushdown_safe, so that it can set
* flags as needed while recursing. In particular, we need a workspace
* for keeping track of unsafe-to-reference columns. unsafeColumns[i]
* will be set TRUE if we find that output column i of the subquery is
* will be set true if we find that output column i of the subquery is
* unsafe to use in a pushed-down qual.
*/
memset(&safetyInfo, 0, sizeof(safetyInfo));
@ -2566,7 +2566,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* In addition, we make several checks on the subquery's output columns to see
* if it is safe to reference them in pushed-down quals. If output column k
* is found to be unsafe to reference, we set safetyInfo->unsafeColumns[k]
* to TRUE, but we don't reject the subquery overall since column k might not
* to true, but we don't reject the subquery overall since column k might not
* be referenced by some/all quals. The unsafeColumns[] array will be
* consulted later by qual_is_pushdown_safe(). It's better to do it this way
* than to make the checks directly in qual_is_pushdown_safe(), because when
@ -2688,7 +2688,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
*
* There are several cases in which it's unsafe to push down an upper-level
* qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set unsafeColumns[k] to TRUE if
* each output column of the subquery and set unsafeColumns[k] to true if
* that column is unsafe for a pushed-down qual to reference. The conditions
* checked here are:
*

View File

@ -72,7 +72,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root,
* any delay by an outer join, so its two sides can be considered equal
* anywhere they are both computable; moreover that equality can be
* extended transitively. Record this knowledge in the EquivalenceClass
* data structure, if applicable. Returns TRUE if successful, FALSE if not
* data structure, if applicable. Returns true if successful, false if not
* (in which case caller should treat the clause as ordinary, not an
* equivalence).
*
@ -602,8 +602,8 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
* so for now we live with just reporting the first match. See also
* generate_implied_equalities_for_column and match_pathkeys_to_index.)
*
* If create_it is TRUE, we'll build a new EquivalenceClass when there is no
* match. If create_it is FALSE, we just return NULL when no match.
* If create_it is true, we'll build a new EquivalenceClass when there is no
* match. If create_it is false, we just return NULL when no match.
*
* This can be used safely both before and after EquivalenceClass merging;
* since it never causes merging it does not invalidate any existing ECs
@ -1675,7 +1675,7 @@ reconsider_outer_join_clauses(PlannerInfo *root)
/*
* reconsider_outer_join_clauses for a single LEFT/RIGHT JOIN clause
*
* Returns TRUE if we were able to propagate a constant through the clause.
* Returns true if we were able to propagate a constant through the clause.
*/
static bool
reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
@ -1800,7 +1800,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
/*
* reconsider_outer_join_clauses for a single FULL JOIN clause
*
* Returns TRUE if we were able to propagate a constant through the clause.
* Returns true if we were able to propagate a constant through the clause.
*/
static bool
reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo)

View File

@ -838,12 +838,12 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
*
* If skip_nonnative_saop is non-NULL, we ignore ScalarArrayOpExpr clauses
* unless the index AM supports them directly, and we set *skip_nonnative_saop
* to TRUE if we found any such clauses (caller must initialize the variable
* to FALSE). If it's NULL, we do not ignore ScalarArrayOpExpr clauses.
* to true if we found any such clauses (caller must initialize the variable
* to false). If it's NULL, we do not ignore ScalarArrayOpExpr clauses.
*
* If skip_lower_saop is non-NULL, we ignore ScalarArrayOpExpr clauses for
* non-first index columns, and we set *skip_lower_saop to TRUE if we found
* any such clauses (caller must initialize the variable to FALSE). If it's
* non-first index columns, and we set *skip_lower_saop to true if we found
* any such clauses (caller must initialize the variable to false). If it's
* NULL, we do not ignore non-first ScalarArrayOpExpr clauses, but they will
* result in considering the scan's output to be unordered.
*

View File

@ -336,7 +336,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* across joins unless there's a join-order-constraint-based reason to do so.
* So we ignore the param_source_rels restriction when this case applies.
*
* allow_star_schema_join() returns TRUE if the param_source_rels restriction
* allow_star_schema_join() returns true if the param_source_rels restriction
* should be overridden, ie, it's okay to perform this join.
*/
static inline bool
@ -1880,7 +1880,7 @@ hash_inner_and_outer(PlannerInfo *root,
* Select mergejoin clauses that are usable for a particular join.
* Returns a list of RestrictInfo nodes for those clauses.
*
* *mergejoin_allowed is normally set to TRUE, but it is set to FALSE if
* *mergejoin_allowed is normally set to true, but it is set to false if
* this is a right/full join and there are nonmergejoinable join clauses.
* The executor's mergejoin machinery cannot handle such cases, so we have
* to avoid generating a mergejoin plan. (Note that this flag does NOT

Some files were not shown because too many files have changed in this diff Show More