Remove tabs after spaces in C comments

This was not changed in HEAD, but will be done later as part of a
pgindent run.  Future pgindent runs will also do this.

Report by Tom Lane

Backpatch through all supported branches, but not HEAD
This commit is contained in:
Bruce Momjian 2014-05-06 11:26:24 -04:00
parent 3ada1fab8b
commit 99058cfc63
561 changed files with 2793 additions and 2793 deletions

View File

@ -82,7 +82,7 @@ intr2num(const Interval *i)
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
* in pg_type. This might be less than sizeof(Interval) if the compiler
* in pg_type. This might be less than sizeof(Interval) if the compiler
* insists on adding alignment padding at the end of the struct.
*/
#define INTERVALSIZE 16

View File

@ -564,7 +564,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
rt_cube_size(datum_r, &size_r);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -580,7 +580,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
{
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -636,7 +636,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
"the specified FROM clause rowtype")));
/*
* fast track when no results. We could exit earlier, but then we'd
* fast track when no results. We could exit earlier, but then we'd
* not report error if the result tuple type is wrong.
*/
if (funcctx->max_calls < 1)
@ -2257,7 +2257,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn)
}
/*
* For non-superusers, insist that the connstr specify a password. This
* For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.

View File

@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2)
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
* is unportable, so we use version-1. However, if it's passed-by-reference,
* is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.

View File

@ -487,7 +487,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -505,7 +505,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -612,7 +612,7 @@ ltreeparentsel(PG_FUNCTION_ARGS)
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
* non-MCV population. Otherwise use the default selectivity for the
* non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,

View File

@ -430,7 +430,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
}
/*
* Dump all databases. There are no system objects to worry about.
* Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)

View File

@ -791,7 +791,7 @@ pgss_memsize(void)
* caller must hold an exclusive lock on pgss->lock
*
* Note: despite needing exclusive lock, it's not an error for the target
* entry to already exist. This is because pgss_store releases and
* entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/

View File

@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -9,7 +9,7 @@
* entirely in crypt_blowfish.c.
*
* Put bcrypt generator also here as crypt-blowfish.c
* may not be compiled always. -- marko
* may not be compiled always. -- marko
*/
#include "postgres.h"

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -53,7 +53,7 @@
/*
* There is some confusion about whether and how to carry forward
* the state of the pools. Seems like original Fortuna does not
* the state of the pools. Seems like original Fortuna does not
* do it, resetting hash after each request. I guess expecting
* feeding to happen more often that requesting. This is absolutely
* unsuitable for pgcrypto, as nothing asynchronous happens here.
@ -77,7 +77,7 @@
* How many pools.
*
* Original Fortuna uses 32 pools, that means 32'th pool is
* used not earlier than in 13th year. This is a waste in
* used not earlier than in 13th year. This is a waste in
* pgcrypto, as we have very low-frequancy seeding. Here
* is preferable to have all entropy usable in reasonable time.
*
@ -296,7 +296,7 @@ reseed(FState *st)
}
/*
* Pick a random pool. This uses key bytes as random source.
* Pick a random pool. This uses key bytes as random source.
*/
static unsigned
get_rand_pool(FState *st)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -21,7 +21,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
@ -211,7 +211,7 @@ static int s_vcmp(mp_int a, int v);
static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
/* Unsigned magnitude subtraction. Assumes dc is big enough. */
/* Unsigned magnitude subtraction. Assumes dc is big enough. */
static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
@ -2275,7 +2275,7 @@ mp_error_string(mp_result res)
/* }}} */
/*------------------------------------------------------------------------*/
/* Private functions for internal use. These make assumptions. */
/* Private functions for internal use. These make assumptions. */
/* {{{ s_alloc(num) */

View File

@ -20,7 +20,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -90,7 +90,7 @@ pgp_cfb_free(PGP_CFB *ctx)
}
/*
* Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
* Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -147,7 +147,7 @@ bn_to_mpi(mpz_t *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
* above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -82,7 +82,7 @@ bn_to_mpi(BIGNUM *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
* above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -93,7 +93,7 @@ add_block_entropy(PX_MD *md, text *data)
}
/*
* Mix user data into RNG. It is for user own interests to have
* Mix user data into RNG. It is for user own interests to have
* RNG state shuffled.
*/
static void
@ -310,7 +310,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
}
/*
* Find next word. Handle ',' and '=' as words. Skip whitespace.
* Find next word. Handle ',' and '=' as words. Skip whitespace.
* Put word info into res_p, res_len.
* Returns ptr to next word.
*/

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -7,12 +7,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
/* programme of the US National Institute of Standards and Technology. */
/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
/* that the originators of the algorithm place on its exploitation. */
/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
@ -188,7 +188,7 @@ gen_tabs(void)
/* rijndael specification is in big endian format with */
/* bit 0 as the most significant bit. In the remainder */
/* of the specification the bits are numbered from the */
/* least significant end of a byte. */
/* least significant end of a byte. */
for (i = 0; i < 256; ++i)
{

View File

@ -8,12 +8,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
/* programme of the US National Institute of Standards and Technology. */
/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
/* that the originators of the algorithm place on its exploitation. */
/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */

View File

@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -296,7 +296,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
/*
* To avoid physically reading the table twice, try to do the
* free-space scan in parallel with the heap scan. However,
* free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/

View File

@ -381,7 +381,7 @@ gseg_picksplit(GistEntryVector *entryvec,
rt_seg_size(datum_r, &size_r);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -397,7 +397,7 @@ gseg_picksplit(GistEntryVector *entryvec,
{
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -89,7 +89,7 @@ moddatetime(PG_FUNCTION_ARGS)
/*
* This is were we check to see if the field we are supposed to update
* even exits. The above function must return -1 if name not found?
* even exits. The above function must return -1 if name not found?
*/
if (attnum < 0)
ereport(ERROR,

View File

@ -47,17 +47,17 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
/*
* timetravel () --
* 1. IF an update affects tuple with stop_date eq INFINITY
* 1. IF an update affects tuple with stop_date eq INFINITY
* then form (and return) new tuple with start_date eq current date
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
* 2. IF an delete affects tuple with stop_date eq INFINITY
* 2. IF an delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
* 3. On INSERT, if start_date is NULL then current date will be
* 3. On INSERT, if start_date is NULL then current date will be
* inserted, if stop_date is NULL then INFINITY will be inserted.
* [ and insert_user eq current user, update_user and delete_user
* eq NULL ]

View File

@ -104,7 +104,7 @@ ssl_client_serial(PG_FUNCTION_ARGS)
* current database encoding if possible. Any invalid characters are
* replaced by question marks.
*
* Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
* Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
* of this structure is responsibility of caller.
*
* Returns Datum, which can be directly returned from a C language SQL

View File

@ -241,7 +241,7 @@ vacuumlo(char *database, struct _param * param)
PQclear(res);
/*
* Run the actual deletes in a single transaction. Note that this would
* Run the actual deletes in a single transaction. Note that this would
* be a bad idea in pre-7.1 Postgres releases (since rolling back a table
* delete used to cause problems), but it should be safe now.
*/

View File

@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
* that have been put into a tuple but never sent to disk. Hopefully there
* that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@ -427,7 +427,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize the
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@ -494,7 +494,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@ -590,7 +590,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
* can in fact also be a combo command id. XXX perhaps we should
* can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@ -750,7 +750,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
* Allocate and zero the space needed. Note that the tuple body and
* Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);

View File

@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
* on outside storage. This should be improved someday.
* on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@ -314,7 +314,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize the
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@ -381,7 +381,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/

View File

@ -166,7 +166,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/

View File

@ -432,7 +432,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
* validity of the values. It must elog(ERROR) when the argument string is
* validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@ -499,7 +499,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
* Note that this is not responsible for determining whether the options
* are valid, but it does check that namespaces for all the options given are
* listed in validnsps. The NULL namespace is always valid and needs not be
* explicitely listed. Passing a NULL pointer means that only the NULL
* explicitely listed. Passing a NULL pointer means that only the NULL
* namespace is valid.
*
* Both oldOptions and the result are text arrays (or NULL for "default"),
@ -772,7 +772,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
* returned array. Values of type string are allocated separately and must
* returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *

View File

@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
* of dropped columns. There is some overlap of functionality with the
* of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*

View File

@ -507,7 +507,7 @@ TupleDescInitEntry(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc

View File

@ -662,7 +662,7 @@ processPendingPage(BuildAccumulator *accum, DatumArray *da,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
* either. The reason it's okay is that multiple insertion of the same entry
* either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@ -715,7 +715,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
* Initialize. All temporary space will be in opCtx
* Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@ -809,7 +809,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
* added to it. If so, process those entries immediately. There
* added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* gurantees that inserted row(s) will not continue on next page.

View File

@ -1293,10 +1293,10 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* case like
*
* stream 1 stream 2
* ... ...
* ... ...
* 42/6 42/7
* 50/1 42/0xffff
* ... ...
* ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@ -1368,10 +1368,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
* bitmap. After we scan a pending item, some other backend could post it
* bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
* bitmap. (The possibility of duplicate visits is a major reason why GIN
* bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.

View File

@ -201,7 +201,7 @@ newScanKey(IndexScanDesc scan)
{
/*
* extractQueryFn signals that nothing can match, so we can just
* set isVoidRes flag. No need to examine any more keys.
* set isVoidRes flag. No need to examine any more keys.
*/
so->isVoidRes = true;
break;
@ -210,9 +210,9 @@ newScanKey(IndexScanDesc scan)
if (entryValues == NULL || nEntryValues == 0)
{
/*
* extractQueryFn signals that everything matches. This would
* extractQueryFn signals that everything matches. This would
* require a full scan, which we can't do, but perhaps there is
* another scankey that provides a restriction to use. So we keep
* another scankey that provides a restriction to use. So we keep
* going and check only at the end.
*/
continue;

View File

@ -191,7 +191,7 @@ gistbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call gistdoinsert
* directly. Normal access method calls dispatch through gistinsert,
* which locks the relation for write. This is the right thing to do if
* which locks the relation for write. This is the right thing to do if
* you're inserting single tups, but not when you're initializing the
* whole index at once.
*

View File

@ -72,7 +72,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
* Note: we always recompute union keys for all index columns. In some cases
* Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@ -161,7 +161,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
* of length *len. This is applied separately to the spl_left and spl_right
* of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@ -194,7 +194,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
* the previously-computed union keys. We need consider only columns starting
* the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@ -292,7 +292,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
* or not by lowest penalty for that side. We can only get here if a
* or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@ -426,7 +426,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
* Let the opclass-specific PickSplit method do its thing. Note that at
* Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2(&giststate->picksplitFn[attno],

View File

@ -448,7 +448,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
* as the target, and record the best penalty. Then reset the
* as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@ -464,7 +464,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
* best tuple seen so far. The next iteration of this loop
* best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@ -631,7 +631,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))

View File

@ -528,7 +528,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->std.estimated_count = info->estimated_count;
/*
* XXX the above is wrong if index is partial. Would it be OK to just
* XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}

View File

@ -76,7 +76,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
* tuples by (expected) bucket number. However, such a sort is useless
* tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@ -510,7 +510,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
* hashm_spares[] values to compute bucket page addresses. This is a bit
* hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@ -641,7 +641,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/

View File

@ -11,7 +11,7 @@
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.59 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
* These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@ -163,7 +163,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
* different in non-C locales. (See also hashbpchar, if so.)
* different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@ -241,7 +241,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
* direction from the goal of parallelism. I did what I could. Rotates
* direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@ -275,7 +275,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
* bits (affecting 32 bits is enough). The original hash function had
* bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@ -296,7 +296,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
* Returns a uint32 value. Every bit of the key affects every bit of
* Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).

View File

@ -81,7 +81,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@ -90,12 +90,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
* no one else tries to compact the bucket meanwhile. This guarantees that
* no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
* immediate successor of the originally passed 'buf'. Additional overflow
* immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@ -158,7 +158,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
* Find an available overflow page and return it. The returned buffer
* Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@ -254,7 +254,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
* is the one we return to the caller. Both of these are correctly
* is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@ -285,7 +285,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@ -314,7 +314,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@ -495,7 +495,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
* Initialize a new bitmap page. The metapage has a write-lock upon
* Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.

View File

@ -52,7 +52,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
* take locks. We still take buffer-level locks, but not lmgr locks.
* take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@ -139,7 +139,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
* _hash_pageinit() is applied automatically. Otherwise it has
* _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@ -347,7 +347,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
* as the user-settable fillfactor parameter says. We can compute it
* as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@ -380,7 +380,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
* calls to occur. This ensures that the smgr level has the right idea of
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
@ -516,9 +516,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Note: deadlock should be impossible here. Our own backend could only be
* holding bucket sharelocks due to stopped indexscans; those will not
* block other holders of the page-zero lock, who are only interested in
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
* only taken here and in hashbulkdelete, and neither of these operations
* needs any additional locks to complete. (If, due to some flaw in this
* needs any additional locks to complete. (If, due to some flaw in this
* reasoning, we manage to deadlock anyway, it's okay to error out; the
* index will be left in a consistent state.)
*/
@ -560,7 +560,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
* bucket. If we can't get the lock, give up.
* bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@ -618,7 +618,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
* Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@ -656,7 +656,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
* date before _hash_splitbucket finishes. That's okay, since all it
* date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@ -897,7 +897,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/

View File

@ -251,7 +251,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
* false. Else, return true and set the hashso_curpos for the
* false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.

View File

@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
* bucket splits may occur during the index build. In that case we'd
* bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@ -52,7 +52,7 @@ _h_spoolinit(Relation index, uint32 num_buckets)
hspool->index = index;
/*
* Determine the bitmask for hash code values. Since there are currently
* Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*

View File

@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
* existing hash keys in the page. This is the appropriate place to start
* existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber

View File

@ -106,7 +106,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
* might go into pages we already scanned. To guarantee consistent
* might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@ -114,7 +114,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
* strategy and enable synchronized scanning (see syncscan.c). Although
* strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@ -243,7 +243,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
* visibility. Afterwards, however, the tuples we have found to be
* visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@ -1640,7 +1640,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
* show the latest version of the row. Note that it will not be changed
* show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@ -1758,7 +1758,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@ -1831,7 +1831,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
* TID where the tuple was stored. But note that any toasting of fields
* TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@ -1852,7 +1852,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If the object id of this tuple has already been assigned, trust the
* caller. There are a couple of ways this can happen. At initial db
* caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@ -2138,10 +2138,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to delete
* other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
* exclusive). We don't bother changing the on-disk hint bits
* exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@ -2205,7 +2205,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@ -2306,7 +2306,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@ -2402,7 +2402,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
* different page. But we must compute the list before obtaining buffer
* different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@ -2490,10 +2490,10 @@ l2:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to update
* other subxacts of this backend. It is legal for us to update
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
* exclusive). We don't bother changing the on-disk hint bits
* exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@ -2583,7 +2583,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
* TOAST and/or table-file-extension work. We must mark the old tuple to
* TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@ -2648,7 +2648,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
* buffer locks on both old and new pages. To avoid deadlock against
* buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@ -2696,7 +2696,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
* has enough space for the new tuple. If they are the same buffer, only
* has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@ -2704,7 +2704,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
* to do a HOT update. Check if any of the index columns have been
* to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@ -2722,13 +2722,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
* not to optimize for aborts. Note that heap_xlog_update must be kept in
* not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@ -2892,7 +2892,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
* there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
*/
@ -2915,7 +2915,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
* same logical value. But we should be OK as long as there are no false
* same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@ -2971,7 +2971,7 @@ HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@ -3053,7 +3053,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* waiter gets the tuple, potentially leading to indefinite starvation of
* some waiters. The possibility of share-locking makes the problem much
* worse --- a steady stream of share-lockers can easily block an exclusive
* locker forever. To provide more reliable semantics about who gets a
* locker forever. To provide more reliable semantics about who gets a
* tuple-level lock first, we use the standard lock manager. The protocol
* for waiting for a tuple-level lock is really
* LockTuple()
@ -3061,7 +3061,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* mark tuple as locked by me
* UnlockTuple()
* When there are multiple waiters, arbitration of who is to get the lock next
* is provided by LockTuple(). However, at most one tuple-level lock will
* is provided by LockTuple(). However, at most one tuple-level lock will
* be held or awaited per backend at any time, so we don't risk overflow
* of the lock table. Note that incoming share-lockers are required to
* do LockTuple as well, if there is any conflict, to ensure that they don't
@ -3203,7 +3203,7 @@ l3:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to lock the
* other subxacts of this backend. It is legal for us to lock the
* tuple in either case, however. We don't bother changing the
* on-disk hint bits since we are about to overwrite the xmax
* altogether.
@ -3361,7 +3361,7 @@ l3:
/*
* Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
* as running, but it finished before
* TransactionIdIsInProgress() got to run. Treat it like
* TransactionIdIsInProgress() got to run. Treat it like
* there's no locker in the tuple.
*/
}
@ -3397,8 +3397,8 @@ l3:
MarkBufferDirty(*buffer);
/*
* XLOG stuff. You might think that we don't need an XLOG record because
* there is no state change worth restoring after a crash. You would be
* XLOG stuff. You might think that we don't need an XLOG record because
* there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@ -3460,7 +3460,7 @@ l3:
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
* of this function in Postgres are extremely limited. Nonetheless we
* of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@ -3614,7 +3614,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
* exclusive lock. (We don't need this pushup for xmin, because only
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
@ -3755,7 +3755,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
* If we reached end of scan, rs_inited will now be false. We must
* If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@ -3866,7 +3866,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-freeze operation. Caller must already
* Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@ -3909,7 +3909,7 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-update operation. Caller must already
* Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@ -4032,7 +4032,7 @@ log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
* directly to smgr, rather than using bufmgr. Therefore there is no need
* directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*

View File

@ -116,7 +116,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
* enough there). In that case, the page will be pinned and locked only once.
*
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
@ -133,7 +133,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
* insertions into the same relation. This keeps a pin on the current
* insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@ -186,7 +186,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
* on, as cached in the BulkInsertState or relcache entry. If that
* on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@ -218,7 +218,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This avoids one-tuple-per-page syndrome during
* give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@ -279,7 +279,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
* any) and prepare to look elsewhere. We don't care which order we
* any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@ -321,7 +321,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is
* only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@ -341,7 +341,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
* the relation some more. Note that we cannot release this lock before
* the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/

View File

@ -92,7 +92,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
* or pd_upper is probably atomic. Avoiding taking a lock seems more
* or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@ -134,7 +134,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
@ -333,8 +333,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@ -381,7 +381,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
* any pruning occurs. So we have to be able to reap them
* any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@ -562,7 +562,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
* count it in the result. But changing a redirect (even to DEAD
* count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@ -692,7 +692,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
* to replay the WAL record when needed after a crash. Note that the
* to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void

View File

@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
* or B first. (Note that a tuple in the middle of a chain is both A and B
* or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
* is encountered. That helps to keep the memory usage down. At the end,
* is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
* for deadness using OldestXmin is not exact. In such a case we might
* for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@ -339,7 +339,7 @@ rewrite_heap_tuple(RewriteState state,
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
* be given a pointer to a tuple in a disk buffer. It happens though that
* be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
@ -543,7 +543,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
* Insert a tuple to the new relation. This has to track heap_insert
* Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the

View File

@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
* tables in progress at any time. Therefore we just keep the scan positions
* tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@ -245,7 +245,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
* so that number is passed in rather than computing it again. The result
* so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber

View File

@ -545,7 +545,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
* PLAIN storage). If necessary, we'll push it out as a new
* PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@ -686,7 +686,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
* inline. But skip this if there's no toast table to push them to.
* inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@ -1310,7 +1310,7 @@ toast_save_datum(Relation rel, Datum value, int options)
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
* Create the index entry. We cheat a little here by not using
* Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*

View File

@ -41,7 +41,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
* routine does that. (We can't do it in the AM because index_endscan
* routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@ -336,7 +336,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
* Currently we do not support non-index-based scans here. (In principle
* Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of

View File

@ -126,7 +126,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
* obtained on the index. (Generally, NoLock should only be
* obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@ -287,7 +287,7 @@ index_beginscan_internal(Relation indexRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
* cannot change). To restart the scan without changing keys, pass NULL
* cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@ -375,7 +375,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
* that merge-join plans only work for MVCC snapshots. This could be fixed
* that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@ -400,7 +400,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
* snapshot, or NULL if no more matching tuples exist. On success,
* snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan).
*
@ -438,7 +438,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
* earlier member. Must still hold pin on current heap page.
* earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@ -556,7 +556,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The xmin should match the previous xmax value, else chain is
* broken. (Note: this test is not optional because it protects
* broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
@ -758,7 +758,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
* type instead). Only the default functions are stored in relcache
* type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@ -792,7 +792,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
* support procs in the relcache. As above, only the "default"
* support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during

View File

@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
* That could overflow and give the wrong answer. Also, one must not
* That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order

View File

@ -112,7 +112,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
* move right in the tree. See Lehman and Yao for an excruciatingly
* move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@ -169,7 +169,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
* we must wait for to see if it commits a conflicting tuple. If an actual
* we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*/
static TransactionId
@ -361,7 +361,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
* the next page. If it is equal to the high key, and there's not room
* the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@ -373,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@ -438,7 +438,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
* pages). Currently the probability of moving right is set at 0.99,
* pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@ -539,7 +539,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
* insertion, and the buffer must be pinned and write-locked. On return,
* insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@ -1018,7 +1018,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
* examine both pages. But if D, our right sibling, has a different
* examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@ -1240,7 +1240,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
* the left or right page. The bool is necessary to disambiguate the case
* the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@ -1476,7 +1476,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
* write locks once we have write lock on the parent page. (Any sooner,
* write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@ -1499,7 +1499,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
* descended the tree. If it was still the root then we perform a
* descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@ -1649,7 +1649,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
* is. Scan to the right first, then to the left.
* is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;

View File

@ -12,7 +12,7 @@
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.113.2.1 2010/08/29 19:33:29 tgl Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
* Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@ -56,7 +56,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
* Set pd_lower just past the end of the metadata. This is not essential
* Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@ -74,7 +74,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
* and no root page exists, we just return InvalidBuffer. For
* and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@ -191,7 +191,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
* over again. (Is that really true? But it's hardly worth trying
* over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@ -256,7 +256,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
* swap root write lock for read lock. There is no danger of anyone
* swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@ -324,7 +324,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
* from whatever non-root page we were at. If we ever do need to lock the
* from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@ -423,7 +423,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@ -449,7 +449,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
* blkno == P_NEW means to get an unallocated index page. The page
* blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@ -480,7 +480,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
* the page is still free. (For example, an already-free page could
* the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@ -647,7 +647,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page.
*
* This must only be used for deleting leaf items. Deleting an item on a
* This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@ -704,7 +704,7 @@ _bt_delitems(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
* is. When XLogInsert stores the whole buffer, the offsets array
* is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@ -869,7 +869,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
BTPageOpaque opaque;
/*
* We can never delete rightmost pages nor root pages. While at it, check
* We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@ -941,7 +941,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
* facilities not available in recovery mode). Instead, just set
* facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@ -976,7 +976,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
* if so, give up. (Right now, that should never happen, since page
* if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@ -1005,7 +1005,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
lbuf = InvalidBuffer;
/*
* Next write-lock the target page itself. It should be okay to take just
* Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/

View File

@ -151,7 +151,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
* index metapage. We expect that the caller will ensure that happens
* index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@ -258,11 +258,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, remember it for later. (We'll deal with all such tuples
* Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
* multiple times. It's not worth trying to optimize that, so we
* multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@ -814,7 +814,7 @@ restart:
LockBufferForCleanup(buf);
/*
* Check whether we need to recurse back to earlier pages. What we
* Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
@ -913,7 +913,7 @@ restart:
* since there can be no other transactions scanning the index. Note
* that we will only recycle the current page and not any parent pages
* that _bt_pagedel might have recursed to; this seems reasonable in
* the name of simplicity. (Trying to do otherwise would mean we'd
* the name of simplicity. (Trying to do otherwise would mean we'd
* have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)

View File

@ -49,7 +49,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
* to be created and returned. When access = BT_READ, an empty index
* to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
@ -226,7 +226,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
* the given page. _bt_binsrch() has no lock or refcount side effects
* the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@ -358,7 +358,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
* scan contain the first k key attributes, and that they be in order. If
* scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@ -397,7 +397,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
* types). Since it is convenient for callers to think of
* types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@ -425,7 +425,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
* conditions, and the tree ordering. We find the first item (or,
* conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@ -478,7 +478,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@ -641,7 +641,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
* scheme. But, by the same token, if we aren't able to use all
* scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@ -758,7 +758,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
* item < scankey. (Note: this positioning strategy is only used
* item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@ -807,7 +807,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
* Find first item >= scankey. (This is only used for forward
* Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@ -878,7 +878,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
* the last item on this page. Adjust the starting offset if needed. (If
* the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@ -1161,7 +1161,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
* it. See nbtree/README for details.
* it. See nbtree/README for details.
*/
for (;;)
{
@ -1255,7 +1255,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
* because half-dead pages are still in the sibling chain. Caller
* because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@ -1281,7 +1281,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
* It was deleted. Move right to first nondeleted page (there
* It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@ -1325,7 +1325,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
* condition causes ereport(). We will not return a dead page.
* condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/

View File

@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
* for each level. We load source tuples into leaf-level pages.
* for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
* steps of the build. After completing the index build, we can just sync
* steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
* just after the index build. However, it is clearly not sufficient if the
* just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@ -87,7 +87,7 @@ struct BTSpool
};
/*
* Status record for a btree page being built. We have one of these
* Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@ -156,7 +156,7 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
* unique index actually requires two BTSpool objects. We expect that the
* unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@ -295,7 +295,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
}
/*
* Now write the page. We say isTemp = true even if it's not a temp
* Now write the page. We say isTemp = true even if it's not a temp
* index, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves before ending the build.
*/
@ -421,14 +421,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
* | PageHeaderData | linp0 linp1 linp2 ... |
* | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
* | | itemN ... |
* | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@ -489,9 +489,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
"or use full text indexing.")));
/*
* Check to see if page is "full". It's definitely full if the item won't
* Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
* fillfactor. However, we must put at least two items on each page, so
* fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@ -564,7 +564,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
* Write out the old page. We never need to touch it again, so we can
* Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);

View File

@ -94,7 +94,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
* data is first stored into the key entries. Currently this
* data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@ -164,7 +164,7 @@ _bt_freestack(BTStack stack)
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The caller-supplied search-type keys (in scan->keyData[]) are copied to
* so->keyData[] with possible transformation. scan->numberOfKeys is
* so->keyData[] with possible transformation. scan->numberOfKeys is
* the number of input keys, so->numberOfKeys gets the number of output
* keys (possibly less, never greater).
*
@ -175,7 +175,7 @@ _bt_freestack(BTStack stack)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover how many scan keys
* must be satisfied to continue the scan. It also attempts to eliminate
* must be satisfied to continue the scan. It also attempts to eliminate
* redundant keys and detect contradictory keys. (If the index opfamily
* provides incomplete sets of cross-type operators, we may fail to detect
* redundant or contradictory keys, but we can survive that.)
@ -207,7 +207,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
* we cannot eliminate either. If there are two such keys of the same
* we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@ -267,12 +267,12 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
* marked in pg_proc). This means that it is impossible for an
* marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed, and
* we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
* conditions, so in that case keep going. The planner has not filled
* conditions, so in that case keep going. The planner has not filled
* in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
@ -426,7 +426,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
* mark them if they are required. They are required (possibly
* mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@ -545,7 +545,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@ -686,7 +686,7 @@ _bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
* directions or just one. Also, if the key is a row comparison header,
* directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@ -698,7 +698,7 @@ _bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
* anyway on a rescan. Something to keep an eye on though.
* anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@ -877,7 +877,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
@ -890,8 +890,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@ -981,7 +981,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
* one of the "must match" subset. On a forward scan,
* one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@ -994,7 +994,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@ -1012,7 +1012,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
* But it can never match. If all the earlier row comparison
* But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@ -1076,7 +1076,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
* either. Note we have to look at the deciding column, not
* either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@ -1102,7 +1102,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
* delete. We cope with cases where items have moved right due to insertions.
* delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@ -1188,8 +1188,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
* operations. There is a single counter which increments each time we
* start a vacuum to assign it a cycle ID. Since multiple vacuums could
* operations. There is a single counter which increments each time we
* start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.

View File

@ -126,7 +126,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
* eye on. Is it worth changing just on general principles? See also the
* eye on. Is it worth changing just on general principles? See also the
* notes in btree_xlog_split().
*/
static void
@ -177,7 +177,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
* Set pd_lower just past the end of the metadata. This is not essential
* Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@ -359,7 +359,7 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
* would have had. Is this worth changing? See also _bt_restore_page().
* would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
@ -379,7 +379,7 @@ btree_xlog_split(bool onleft, bool isroot,
/*
* Remove the items from the left page that were copied to the
* right page. Also remove the old high key, if any. (We must
* right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/

View File

@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
* CLOG page is initialized to zeroes. Other writes of CLOG come from
* CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
* on redo; so we need make no additional XLOG entry here. For synchronous
* on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
* XLOG that far and satisfy the WAL rule. We don't have to worry about this
* XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@ -104,7 +104,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
* commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*

View File

@ -4,15 +4,15 @@
* PostgreSQL multi-transaction-log manager
*
* The pg_multixact manager is a pg_clog-like manager that stores an array
* of TransactionIds for each MultiXactId. It is a fundamental part of the
* shared-row-lock implementation. A share-locked tuple stores a
* of TransactionIds for each MultiXactId. It is a fundamental part of the
* shared-row-lock implementation. A share-locked tuple stores a
* MultiXactId in its Xmax, and a transaction that needs to wait for the
* tuple to be unlocked can sleep on the potentially-several TransactionIds
* that compose the MultiXactId.
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
* store variable length arrays of TransactionIds. (We could alternatively
* store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@ -32,7 +32,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
* across a crash. We do this in the same way as it's done for transaction
* across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@ -63,13 +63,13 @@
/*
* Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because both MultiXactOffsets and TransactionIds are 32 bits and
* wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
* explicit notice of that fact in this module, except when comparing segment
* and page numbers in TruncateMultiXact
* (see MultiXact{Offset,Member}PagePrecedes).
@ -100,7 +100,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
* MultiXact state shared across all backends. All this state is protected
* MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@ -343,7 +343,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
/*
* Determine which of the members of the MultiXactId are still running,
* and use them to create a new one. (Removing dead members is just an
* optimization, but a useful one. Note we have the same race condition
* optimization, but a useful one. Note we have the same race condition
* here as above: j could be 0 at the end of the loop.)
*/
newMembers = (TransactionId *)
@ -408,7 +408,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
* walking the PGPROC array only once for all the members. But in most
* walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@ -527,7 +527,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
* an OldestMemberMXactId[] entry older than what we compute here. Therefore
* an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@ -698,7 +698,7 @@ CreateMultiXactId(int nxids, TransactionId *xids)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
* reaches disk first. If the SLRU members/offsets data reaches disk
* reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@ -805,7 +805,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
* Also, reserve the needed amount of space in the "members" area. The
* Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@ -870,7 +870,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that. Similarly, nextOffset may be zero, but
* we won't use that as the actual start offset of the next multixact.
@ -934,7 +934,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. We just
* seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@ -950,7 +950,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
* values. We may need both nextMXact and nextOffset; see below.
* values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@ -968,12 +968,12 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
* and the number of members in the multixact. We determine the latter as
* and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
* no next one to look at. In this case the nextOffset value we just
* no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@ -984,11 +984,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
* have this case. We sleep for a bit and try again.
* have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
* wraparound. If we see next multixact's offset is one, is that our
* wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@ -1401,7 +1401,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
* thus double memory. Also, reserve space for the shared MultiXactState
* thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@ -1461,7 +1461,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
* MultiXact segments. (The MultiXacts directories are assumed to have been
* MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@ -1534,7 +1534,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
* This must be called ONCE during postmaster or standalone-backend startup.
*
* StartupXLOG has already established nextMXact/nextOffset by calling
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
* may already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@ -1558,7 +1558,7 @@ StartupMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
* Zero out the remainder of the current offsets page. See notes in
* Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@ -1588,7 +1588,7 @@ StartupMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
* Zero out the remainder of the current members page. See notes in
* Zero out the remainder of the current members page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MXOffsetToMemberEntry(offset);
@ -1661,7 +1661,7 @@ CheckPointMultiXact(void)
/*
* Truncate the SLRU files. This could be done at any time, but
* checkpoint seems a reasonable place for it. There is one exception: if
* checkpoint seems a reasonable place for it. There is one exception: if
* we are called during xlog recovery, then shared->latest_page_number
* isn't valid (because StartupMultiXact hasn't been called yet) and so
* SimpleLruTruncate would get confused. It seems best not to risk
@ -1794,7 +1794,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
* Remove all MultiXactOffset and MultiXactMember segments before the oldest
* ones still of interest.
*
* This is called only during checkpoints. We assume no more than one
* This is called only during checkpoints. We assume no more than one
* backend does this at a time.
*
* XXX do we have any issues with needing to checkpoint here?
@ -1855,7 +1855,7 @@ TruncateMultiXact(void)
return;
/*
* We need to determine where to truncate MultiXactMember. If we found a
* We need to determine where to truncate MultiXactMember. If we found a
* valid oldest MultiXactId, read its starting offset; otherwise we use
* the nextOffset value we saved above.
*/

View File

@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
* must be held to examine or modify any shared state. A process that is
* must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
* to re-dirty a page that is currently being written out. This is handled
* to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@ -73,7 +73,7 @@
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
*
* Note: this file currently assumes that segment file names will be four
* hex digits. This sets a lower bound on the segment size (64K transactions
* hex digits. This sets a lower bound on the segment size (64K transactions
* for 32-bit TransactionIds).
*/
#define SLRU_PAGES_PER_SEGMENT 32
@ -113,7 +113,7 @@ typedef struct SlruFlushData
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
* worst possible consequence is a nonoptimal choice of page to evict. The
* worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@ -499,7 +499,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
* the write). However, we *do* attempt a fresh write even if the page
* the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@ -597,7 +597,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
* SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@ -1128,7 +1128,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
* the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/

View File

@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
* nested transactions implementation. A main transaction has a parent
* nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@ -186,7 +186,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
* since slru.c would create it on first write anyway. But we may as well
* since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void

View File

@ -149,7 +149,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it
* zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@ -305,7 +305,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
* comparison. If both are normal, do a modulo-2^32 comparison.
* comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;

View File

@ -425,7 +425,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
* there may be some other issues as well. Hence disallow until
* there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
@ -983,7 +983,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
* the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@ -1021,7 +1021,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
* Mark the prepared transaction as valid. As soon as xact.c marks MyProc
* Mark the prepared transaction as valid. As soon as xact.c marks MyProc
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
*
@ -1241,7 +1241,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
* properly later. It is still locked by our XID so it won't go away yet.
* properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@ -1435,7 +1435,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
* to open its state file. We handle this by special-casing ENOENT
* to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@ -1515,7 +1515,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
* earlier point in time without cleaning out pg_twophase. We dare not
* earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*

View File

@ -114,20 +114,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
* want the next incoming transaction to try it again. We cannot assign
* want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
* XidGenLock. This ensures that every active XID older than
* XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGPROC xid fields should be careful to fetch the
@ -238,7 +238,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
/*
* We'll start complaining loudly when we get within 10M transactions of
* the stop point. This is kind of arbitrary, but if you let your gas
* the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic

Some files were not shown because too many files have changed in this diff Show More