diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index d750f0800b..c7d84b59ce 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -223,7 +223,7 @@ include 'filename'
in configuration file
The postgresql.conf> file can also contain
- include_dir directives>, which specify an entire directory
+ include_dir directives, which specify an entire directory
of configuration files to include. It is used similarly:
include_dir 'directory'
@@ -234,7 +234,7 @@ include 'filename'
names end with the suffix .conf will be included. File
names that start with the . character are also excluded,
to prevent mistakes as they are hidden on some platforms. Multiple files
- within an include directory are processed in filename order. The filenames
+ within an include directory are processed in file name order. The file names
are ordered by C locale rules, ie. numbers before letters, and uppercase
letters before lowercase ones.
@@ -1211,7 +1211,7 @@ include 'filename'
Specifies the maximum amount of disk space that a session can use
for temporary files, such as sort and hash temporary files, or the
storage file for a held cursor. A transaction attempting to exceed
- this limit will be cancelled.
+ this limit will be canceled.
The value is specified in kilobytes, and -1> (the
default) means no limit.
Only superusers can change this setting.
@@ -3358,7 +3358,7 @@ local0.* /var/log/postgresql
When logging_collector is enabled,
this parameter sets the file names of the created log files. The value
- is treated as a strftime pattern,
+ is treated as a strftime pattern,
so %-escapes can be used to specify time-varying
file names. (Note that if there are
any time-zone-dependent %-escapes, the computation
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index a5e03c6fc9..f73e6b2e3a 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -4098,7 +4098,7 @@ SET xmloption TO { DOCUMENT | CONTENT };
representations of XML values, such as in the above examples.
This would ordinarily mean that encoding declarations contained in
XML data can become invalid as the character data is converted
- to other encodings while travelling between client and server,
+ to other encodings while traveling between client and server,
because the embedded encoding declaration is not changed. To cope
with this behavior, encoding declarations contained in
character strings presented for input to the xml type
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index c94988a2f9..912ca8663e 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -450,7 +450,7 @@ ExecForeignInsert (EState *estate,
query has a RETURNING> clause. Hence, the FDW could choose
to optimize away returning some or all columns depending on the contents
of the RETURNING> clause. However, some slot must be
- returned to indicate success, or the query's reported rowcount will be
+ returned to indicate success, or the query's reported row count will be
wrong.
@@ -495,7 +495,7 @@ ExecForeignUpdate (EState *estate,
query has a RETURNING> clause. Hence, the FDW could choose
to optimize away returning some or all columns depending on the contents
of the RETURNING> clause. However, some slot must be
- returned to indicate success, or the query's reported rowcount will be
+ returned to indicate success, or the query's reported row count will be
wrong.
@@ -538,7 +538,7 @@ ExecForeignDelete (EState *estate,
query has a RETURNING> clause. Hence, the FDW could choose
to optimize away returning some or all columns depending on the contents
of the RETURNING> clause. However, some slot must be
- returned to indicate success, or the query's reported rowcount will be
+ returned to indicate success, or the query's reported row count will be
wrong.
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index 5d13ba3136..2c02fd1e1b 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -9928,7 +9928,7 @@ table2-mapping
array_to_json(anyarray [, pretty_bool])
- json
+ json
Returns the array as JSON. A PostgreSQL multidimensional array
becomes a JSON array of arrays. Line feeds will be added between
@@ -9944,7 +9944,7 @@ table2-mapping
row_to_json(record [, pretty_bool])
- json
+ json
Returns the row as JSON. Line feeds will be added between level
1 elements if pretty_bool is true.
@@ -9959,12 +9959,12 @@ table2-mapping
to_json(anyelement)
- json
+ json
- Returns the value as JSON. If the data type is not builtin, and there
- is a cast from the type to json, the cast function will be used to
+ Returns the value as JSON. If the data type is not built in, and there
+ is a cast from the type to json, the cast function will be used to
perform the conversion. Otherwise, for any value other than a number,
- a boolean or NULL, the text representation will be used, escaped and
+ a Boolean, or a null value, the text representation will be used, escaped and
quoted so that it is legal JSON.
to_json('Fred said "Hi."'::text)
@@ -9977,9 +9977,9 @@ table2-mapping
json_array_length(json)
- int
+ int
- Returns the number of elements in the outermost json array.
+ Returns the number of elements in the outermost JSON array.
json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]')5
@@ -9991,9 +9991,9 @@ table2-mapping
json_each(json)
- SETOF key text, value json
+ SETOF key text, value json
- Expands the outermost json object into a set of key/value pairs.
+ Expands the outermost JSON object into a set of key/value pairs.
select * from json_each('{"a":"foo", "b":"bar"}')
@@ -10012,9 +10012,9 @@ table2-mapping
json_each_text(from_json json)
- SETOF key text, value text
+ SETOF key text, value text
- Expands the outermost json object into a set of key/value pairs. The
+ Expands the outermost JSON object into a set of key/value pairs. The
returned value will be of type text.
select * from json_each_text('{"a":"foo", "b":"bar"}')
@@ -10034,9 +10034,9 @@ table2-mapping
json_extract_path(from_json json, VARIADIC path_elems text[])
- json
+ json
- Returns json object pointed to by path_elems.
+ Returns JSON object pointed to by path_elems.
json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4'){"f5":99,"f6":"foo"}
@@ -10048,9 +10048,9 @@ table2-mapping
json_extract_path_text(from_json json, VARIADIC path_elems text[])
- text
+ text
- Returns json object pointed to by path_elems.
+ Returns JSON object pointed to by path_elems.
json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4', 'f6')foo
@@ -10062,9 +10062,9 @@ table2-mapping
json_object_keys(json)
- SETOF text
+ SETOF text
- Returns set of keys in the json object. Only the "outer" object will be displayed.
+ Returns set of keys in the JSON object. Only the outer object will be displayed.
json_object_keys('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}')
@@ -10083,11 +10083,11 @@ table2-mapping
json_populate_record(base anyelement, from_json json, [, use_json_as_text bool=false]
- anyelement
+ anyelement
- Expands the object in from_json to a row whose columns match
+ Expands the object in from_json to a row whose columns match
the record type defined by base. Conversion will be best
- effort; columns in base with no corresponding key in from_json
+ effort; columns in base with no corresponding key in from_json
will be left null. A column may only be specified once.
select * from json_populate_record(null::x, '{"a":1,"b":2}')
@@ -10106,12 +10106,12 @@ table2-mapping
json_populate_recordset(base anyelement, from_json json, [, use_json_as_text bool=false]
- SETOF anyelement
+ SETOF anyelement
- Expands the outermost set of objects in from_json to a set
+ Expands the outermost set of objects in from_json to a set
whose columns match the record type defined by base.
Conversion will be best effort; columns in base with no
- corresponding key in from_json will be left null. A column
+ corresponding key in from_json will be left null. A column
may only be specified once.
select * from json_populate_recordset(null::x, '[{"a":1,"b":2},{"a":3,"b":4}]')
@@ -10131,9 +10131,9 @@ table2-mapping
json_array_elements(json)
- SETOF json
+ SETOF json
- Expands a json array to a set of json elements.
+ Expands a JSON array to a set of JSON elements.
json_array_elements('[1,true, [2,false]]')
@@ -10152,8 +10152,8 @@ table2-mapping
- The extension has a cast from hstore to
- json, so that converted hstore values are represented as json objects,
+ The extension has a cast from hstore to
+ json, so that converted hstore values are represented as JSON objects,
not as string values.
@@ -10161,7 +10161,7 @@ table2-mapping
See also about the aggregate
function json_agg which aggregates record
- values as json efficiently.
+ values as JSON efficiently.
@@ -11546,7 +11546,7 @@ SELECT NULLIF(value, '(none)') ...
json
- aggregates records as a json array of objects
+ aggregates records as a JSON array of objects
@@ -14904,7 +14904,7 @@ SELECT set_config('log_statement_stats', 'off', false);
- Server Signalling Functions
+ Server Signaling Functionspg_cancel_backend
@@ -14932,7 +14932,7 @@ SELECT set_config('log_statement_stats', 'off', false);
- Server Signalling Functions
+ Server Signaling FunctionsNameReturn TypeDescription
diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml
index ca7641a839..9ffa8be7bc 100644
--- a/doc/src/sgml/gin.sgml
+++ b/doc/src/sgml/gin.sgml
@@ -105,7 +105,7 @@
Returns a palloc'd array of keys given an item to be indexed. The
number of returned keys must be stored into *nkeys>.
If any of the keys can be null, also palloc an array of
- *nkeys> booleans, store its address at
+ *nkeys> bool fields, store its address at
*nullFlags>, and set these null flags as needed.
*nullFlags> can be left NULL (its initial value)
if all keys are non-null.
@@ -130,11 +130,11 @@
query> and the method it should use to extract key values.
The number of returned keys must be stored into *nkeys>.
If any of the keys can be null, also palloc an array of
- *nkeys> booleans, store its address at
+ *nkeys> bool fields, store its address at
*nullFlags>, and set these null flags as needed.
- *nullFlags> can be left NULL (its initial value)
+ *nullFlags> can be left NULL (its initial value)
if all keys are non-null.
- The return value can be NULL if the query> contains no keys.
+ The return value can be NULL if the query> contains no keys.
@@ -168,8 +168,8 @@
an array of *nkeys> booleans and store its address at
*pmatch>. Each element of the array should be set to TRUE
if the corresponding key requires partial match, FALSE if not.
- If *pmatch> is set to NULL then GIN assumes partial match
- is not required. The variable is initialized to NULL before call,
+ If *pmatch> is set to NULL then GIN assumes partial match
+ is not required. The variable is initialized to NULL before call,
so this argument can simply be ignored by operator classes that do
not support partial match.
@@ -181,7 +181,7 @@
To use it, extractQuery> must allocate
an array of *nkeys> Pointers and store its address at
*extra_data>, then store whatever it wants to into the
- individual pointers. The variable is initialized to NULL before
+ individual pointers. The variable is initialized to NULL before
call, so this argument can simply be ignored by operator classes that
do not require extra data. If *extra_data> is set, the
whole array is passed to the consistent> method, and
@@ -215,7 +215,7 @@
and so are the queryKeys[]> and nullFlags[]>
arrays previously returned by extractQuery>.
extra_data> is the extra-data array returned by
- extractQuery>, or NULL if none.
+ extractQuery>, or NULL if none.
@@ -261,7 +261,7 @@
that generated the partial match query is provided, in case its
semantics are needed to determine when to end the scan. Also,
extra_data> is the corresponding element of the extra-data
- array made by extractQuery>, or NULL if none.
+ array made by extractQuery>, or NULL if none.
Null keys are never passed to this function.
@@ -305,9 +305,9 @@
- As of PostgreSQL 9.1, NULL key values can be
- included in the index. Also, placeholder NULLs are included in the index
- for indexed items that are NULL or contain no keys according to
+ As of PostgreSQL 9.1, null key values can be
+ included in the index. Also, placeholder nulls are included in the index
+ for indexed items that are null or contain no keys according to
extractValue>. This allows searches that should find empty
items to do so.
@@ -471,11 +471,11 @@
GIN assumes that indexable operators are strict. This
- means that extractValue> will not be called at all on a NULL
+ means that extractValue> will not be called at all on a null
item value (instead, a placeholder index entry is created automatically),
- and extractQuery will not be called on a NULL query
+ and extractQuery will not be called on a null query
value either (instead, the query is presumed to be unsatisfiable). Note
- however that NULL key values contained within a non-null composite item
+ however that null key values contained within a non-null composite item
or query value are supported.
diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml
index b20108da79..73c421d463 100644
--- a/doc/src/sgml/hstore.sgml
+++ b/doc/src/sgml/hstore.sgml
@@ -325,7 +325,7 @@ b
hstore_to_json(hstore)json
- get hstore as a json value
+ get hstore as a json valuehstore_to_json('"a key"=>1, b=>t, c=>null, d=>12345, e=>012345, f=>1.234, g=>2.345e+4'){"a key": "1", "b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4"}
@@ -333,7 +333,7 @@ b
hstore_to_json_loose(hstore)json
- get hstore as a json value, but attempting to distinguish numerical and boolean values so they are unquoted in the json
+ get hstore as a json value, but attempting to distinguish numerical and Boolean values so they are unquoted in the JSONhstore_to_json_loose('"a key"=>1, b=>t, c=>null, d=>12345, e=>012345, f=>1.234, g=>2.345e+4'){"a key": 1, "b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4}
diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml
index e15f60fdec..570ee90dcd 100644
--- a/doc/src/sgml/indexam.sgml
+++ b/doc/src/sgml/indexam.sgml
@@ -113,8 +113,8 @@
amoptionalkey false.
One reason that an index AM might set
amoptionalkey false is if it doesn't index
- NULLs. Since most indexable operators are
- strict and hence cannot return TRUE for NULL inputs,
+ null values. Since most indexable operators are
+ strict and hence cannot return true for null inputs,
it is at first sight attractive to not store index entries for null values:
they could never be returned by an index scan anyway. However, this
argument fails when an index scan has no restriction clause for a given
diff --git a/doc/src/sgml/information_schema.sgml b/doc/src/sgml/information_schema.sgml
index ddbc56c6e4..3ac555dad3 100644
--- a/doc/src/sgml/information_schema.sgml
+++ b/doc/src/sgml/information_schema.sgml
@@ -13,7 +13,7 @@
information schema is defined in the SQL standard and can therefore
be expected to be portable and remain stable — unlike the system
catalogs, which are specific to
- PostgreSQL and are modelled after
+ PostgreSQL and are modeled after
implementation concerns. The information schema views do not,
however, contain information about
PostgreSQL-specific features; to inquire
diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml
index f29b9d153b..4c9ce5b145 100644
--- a/doc/src/sgml/install-windows.sgml
+++ b/doc/src/sgml/install-windows.sgml
@@ -233,7 +233,7 @@ $ENV{PATH}=$ENV{PATH} . ';c:\some\where\bison\bin';
spaces in the name, such as the default location on English
installations C:\Program Files\GnuWin32.
Consider installing into C:\GnuWin32 or use the
- NTFS shortname path to GnuWin32 in your PATH environment setting
+ NTFS short name path to GnuWin32 in your PATH environment setting
(e.g. C:\PROGRA~1\GnuWin32).
diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index deef3be965..07db5e4d35 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -2734,9 +2734,9 @@ char *PQresultErrorField(const PGresult *res, int fieldcode);
PG_DIAG_DATATYPE_NAME>
- If the error was associated with a specific datatype, the name
- of the datatype. (When this field is present, the schema name
- field provides the name of the datatype's schema.)
+ If the error was associated with a specific data type, the name
+ of the data type. (When this field is present, the schema name
+ field provides the name of the data type's schema.)
@@ -2787,7 +2787,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode);
- The fields for schema name, table name, column name, datatype
+ The fields for schema name, table name, column name, data type
name, and constraint name are supplied only for a limited number
of error types; see .
diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml
index 06b262bccb..f5a0ac98d4 100644
--- a/doc/src/sgml/ltree.sgml
+++ b/doc/src/sgml/ltree.sgml
@@ -33,7 +33,7 @@
a path from the root of a hierarchical tree to a particular node. The
length of a label path must be less than 65Kb, but keeping it under 2Kb is
preferable. In practice this is not a major limitation; for example,
- the longest label path in the DMOZ catalogue () is about 240 bytes.
diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml
index b2d172f725..316add70b7 100644
--- a/doc/src/sgml/mvcc.sgml
+++ b/doc/src/sgml/mvcc.sgml
@@ -263,9 +263,9 @@
Some PostgreSQL data types and functions have
- special rules regarding transactional behaviour. In particular, changes
- made to a SEQUENCE (and therefore the counter of a
- column declared using SERIAL) are immediately visible
+ special rules regarding transactional behavior. In particular, changes
+ made to a sequence (and therefore the counter of a
+ column declared using serial) are immediately visible
to all other transactions and are not rolled back if the transaction
that made the changes aborts. See
and .
diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml
index 34eace35b6..7868fe4d17 100644
--- a/doc/src/sgml/perform.sgml
+++ b/doc/src/sgml/perform.sgml
@@ -675,7 +675,7 @@ EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @> polygon '(0.5,2.0)';
EXPLAIN> has a BUFFERS> option that can be used with
- ANALYZE> to get even more runtime statistics:
+ ANALYZE> to get even more run time statistics:
EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000;
@@ -735,7 +735,7 @@ ROLLBACK;
So above, we see the same sort of bitmap table scan we've seen already,
and its output is fed to an Update node that stores the updated rows.
It's worth noting that although the data-modifying node can take a
- considerable amount of runtime (here, it's consuming the lion's share
+ considerable amount of run time (here, it's consuming the lion's share
of the time), the planner does not currently add anything to the cost
estimates to account for that work. That's because the work to be done is
the same for every correct query plan, so it doesn't affect planning
@@ -811,7 +811,7 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000
the estimated cost and row count for the Index Scan node are shown as
though it were run to completion. But in reality the Limit node stopped
requesting rows after it got two, so the actual row count is only 2 and
- the runtime is less than the cost estimate would suggest. This is not
+ the run time is less than the cost estimate would suggest. This is not
an estimation error, only a discrepancy in the way the estimates and true
values are displayed.
diff --git a/doc/src/sgml/pgtestfsync.sgml b/doc/src/sgml/pgtestfsync.sgml
index 8c58985c90..45f0919125 100644
--- a/doc/src/sgml/pgtestfsync.sgml
+++ b/doc/src/sgml/pgtestfsync.sgml
@@ -36,7 +36,7 @@
difference in real database throughput, especially since many database servers
are not speed-limited by their transaction logs.
pg_test_fsync reports average file sync operation
- time in microseconds for each wal_sync_method, which can also be used to
+ time in microseconds for each wal_sync_method, which can also be used to
inform efforts to optimize the value of .
diff --git a/doc/src/sgml/planstats.sgml b/doc/src/sgml/planstats.sgml
index 88cc7df2f9..986d0753bf 100644
--- a/doc/src/sgml/planstats.sgml
+++ b/doc/src/sgml/planstats.sgml
@@ -432,7 +432,7 @@ rows = (outer_cardinality * inner_cardinality) * selectivity
tenk2>. But this is not the case: the join relation size
is estimated before any particular join plan has been considered. If
everything is working well then the two ways of estimating the join
- size will produce about the same answer, but due to roundoff error and
+ size will produce about the same answer, but due to round-off error and
other factors they sometimes diverge significantly.
diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml
index 6189a14d70..10eac0e243 100644
--- a/doc/src/sgml/plperl.sgml
+++ b/doc/src/sgml/plperl.sgml
@@ -201,7 +201,7 @@ select returns_array();
Perl passes PostgreSQL arrays as a blessed
- PostgreSQL::InServer::ARRAY object. This object may be treated as an array
+ PostgreSQL::InServer::ARRAY object. This object may be treated as an array
reference or a string, allowing for backward compatibility with Perl
code written for PostgreSQL versions below 9.1 to
run. For example:
@@ -228,7 +228,7 @@ SELECT concat_array_elements(ARRAY['PL','/','Perl']);
- Multi-dimensional arrays are represented as references to
+ Multidimensional arrays are represented as references to
lower-dimensional arrays of references in a way common to every Perl
programmer.
@@ -278,7 +278,7 @@ SELECT * FROM perl_row();
PL/Perl functions can also return sets of either scalar or
composite types. Usually you'll want to return rows one at a
- time, both to speed up startup time and to keep from queueing up
+ time, both to speed up startup time and to keep from queuing up
the entire result set in memory. You can do this with
return_next as illustrated below. Note that
after the last return_next, you must put
diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml
index dbea3cd280..19498c6767 100644
--- a/doc/src/sgml/plpgsql.sgml
+++ b/doc/src/sgml/plpgsql.sgml
@@ -1292,7 +1292,7 @@ EXECUTE 'UPDATE tbl SET '
- Because quote_literal is labelled
+ Because quote_literal is labeled
STRICT, it will always return null when called with a
null argument. In the above example, if newvalue> or
keyvalue> were null, the entire dynamic query string would
@@ -2107,11 +2107,11 @@ EXIT label WHEN BEGIN block, EXIT passes
control to the next statement after the end of the block.
- Note that a label must be used for this purpose; an unlabelled
+ Note that a label must be used for this purpose; an unlabeled
EXIT is never considered to match a
BEGIN block. (This is a change from
pre-8.4 releases of PostgreSQL, which
- would allow an unlabelled EXIT to match
+ would allow an unlabeled EXIT to match
a BEGIN block.)
diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml
index 4aa798ac2e..a1c3bebb09 100644
--- a/doc/src/sgml/postgres-fdw.sgml
+++ b/doc/src/sgml/postgres-fdw.sgml
@@ -236,11 +236,11 @@
When use_remote_estimate is true,
- postgres_fdw> obtains rowcount and cost estimates from the
+ postgres_fdw> obtains row count and cost estimates from the
remote server and then adds fdw_startup_cost and
fdw_tuple_cost to the cost estimates. When
use_remote_estimate is false,
- postgres_fdw> performs local rowcount and cost estimation
+ postgres_fdw> performs local row count and cost estimation
and then adds fdw_startup_cost and
fdw_tuple_cost to the cost estimates. This local
estimation is unlikely to be very accurate unless local copies of the
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index 70165115f5..f1cafa59d2 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -4813,9 +4813,9 @@ message.
- Datatype name: if the error was associated with a specific datatype,
- the name of the datatype. (When this field is present, the schema
- name field provides the name of the datatype's schema.)
+ Data type name: if the error was associated with a specific data type,
+ the name of the data type. (When this field is present, the schema
+ name field provides the name of the data type's schema.)
@@ -4874,7 +4874,7 @@ message.
- The fields for schema name, table name, column name, datatype name, and
+ The fields for schema name, table name, column name, data type name, and
constraint name are supplied only for a limited number of error types;
see .
diff --git a/doc/src/sgml/ref/copy.sgml b/doc/src/sgml/ref/copy.sgml
index 472033dd1a..e67bd68acd 100644
--- a/doc/src/sgml/ref/copy.sgml
+++ b/doc/src/sgml/ref/copy.sgml
@@ -121,8 +121,8 @@ COPY { table_name [ ( filename
- The path name of the input or output file. An input filename can be
- an absolute or relative path, but an output filename must be an absolute
+ The path name of the input or output file. An input file name can be
+ an absolute or relative path, but an output file name must be an absolute
path. Windows users might need to use an E''> string and
double any backslashes used in the path name.
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index af11eb05a6..2f0fa53a41 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -364,7 +364,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
constraints copied by LIKE> are not merged with similarly
named columns and constraints.
If the same name is specified explicitly or in another
- LIKE clause, an error is signalled.
+ LIKE clause, an error is signaled.
The LIKE clause can also be used to copy columns from
diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml
index d822037e4c..606efeee86 100644
--- a/doc/src/sgml/ref/create_type.sgml
+++ b/doc/src/sgml/ref/create_type.sgml
@@ -136,7 +136,7 @@ CREATE TYPE name
be any type with an associated b-tree operator class (to determine the
ordering of values for the range type). Normally the subtype's default
b-tree operator class is used to determine ordering; to use a non-default
- opclass, specify its name with subtype_opclass. If the subtype is
collatable, and you want to use a non-default collation in the range's
ordering, specify the desired collation with the statement
The ANALYZE option causes the statement to be actually
- executed, not only planned. Then actual runtime statistics are added to
+ executed, not only planned. Then actual run time statistics are added to
the display, including the total elapsed time expended within each plan
node (in milliseconds) and the total number of rows it actually returned.
This is useful for seeing whether the planner's estimates
diff --git a/doc/src/sgml/ref/lock.sgml b/doc/src/sgml/ref/lock.sgml
index 05acbc4f60..95d6767376 100644
--- a/doc/src/sgml/ref/lock.sgml
+++ b/doc/src/sgml/ref/lock.sgml
@@ -183,7 +183,7 @@ LOCK [ TABLE ] [ ONLY ] name [ * ]
the mode names involving ROW> are all misnomers. These
mode names should generally be read as indicating the intention of
the user to acquire row-level locks within the locked table. Also,
- ROW EXCLUSIVE> mode is a sharable table lock. Keep in
+ ROW EXCLUSIVE> mode is a shareable table lock. Keep in
mind that all the lock modes have identical semantics so far as
LOCK TABLE> is concerned, differing only in the rules
about which modes conflict with which. For information on how to
diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml
index 9fe440a66d..eb0c1d6f36 100644
--- a/doc/src/sgml/ref/pg_basebackup.sgml
+++ b/doc/src/sgml/ref/pg_basebackup.sgml
@@ -194,7 +194,7 @@ PostgreSQL documentation
- Write a minimal recovery.conf in the output directory (or into
+ Write a minimal recovery.conf in the output directory (or into
the base archive file when using tar format) to ease setting
up a standby server.
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index 0ab69c3f66..40ca18c75f 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -323,10 +323,10 @@ PostgreSQL documentation
For a consistent backup, the database server needs to support synchronized snapshots,
a feature that was introduced in PostgreSQL 9.2. With this
- feature, database clients can ensure they see the same dataset even though they use
+ feature, database clients can ensure they see the same data set even though they use
different connections. pg_dump -j uses multiple database
connections; it connects to the database once with the master process and
- once again for each worker job. Without the sychronized snapshot feature, the
+ once again for each worker job. Without the synchronized snapshot feature, the
different worker jobs wouldn't be guaranteed to see the same data in each connection,
which could lead to an inconsistent backup.
diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml
index 327c3d04f7..2b955870b9 100644
--- a/doc/src/sgml/regress.sgml
+++ b/doc/src/sgml/regress.sgml
@@ -156,7 +156,7 @@ gmake installcheck
The source distribution also contains regression tests of the static
- behaviour of Hot Standby. These tests require a running primary server
+ behavior of Hot Standby. These tests require a running primary server
and a running standby server that is accepting new WAL changes from the
primary using either file-based log shipping or streaming replication.
Those servers are not automatically created for you, nor is the setup
@@ -185,9 +185,9 @@ gmake standbycheck
- Some extreme behaviours can also be generated on the primary using the
+ Some extreme behaviors can also be generated on the primary using the
script: src/test/regress/sql/hs_primary_extremes.sql
- to allow the behaviour of the standby to be tested.
+ to allow the behavior of the standby to be tested.
diff --git a/doc/src/sgml/release-9.3.sgml b/doc/src/sgml/release-9.3.sgml
index 873cc78f23..2a2f4f9cfe 100644
--- a/doc/src/sgml/release-9.3.sgml
+++ b/doc/src/sgml/release-9.3.sgml
@@ -700,7 +700,7 @@
- Allow a multi-row VALUES> clause in a rule
to reference OLD>/NEW> (Tom Lane)
@@ -911,7 +911,7 @@
Allow text timezone
designations, e.g. America/Chicago> when using
- the ISO> T> timestamptz format (Bruce Momjian)
+ the ISO> T> timestamptz format (Bruce Momjian)
@@ -1128,7 +1128,7 @@
- This allows plpy.debug(rv) to output something reasonable.
+ This allows plpy.debug(rv) to output something reasonable.
@@ -1538,7 +1538,7 @@
- Add emacs macro to match PostgreSQL> perltidy
+ Add Emacs macro to match PostgreSQL> perltidy
formatting (Peter Eisentraut)
@@ -1783,7 +1783,7 @@
- Have pg_upgrade> create unix-domain sockets in
+ Have pg_upgrade> create Unix-domain sockets in
the current directory (Bruce Momjian, Tom Lane)
diff --git a/doc/src/sgml/sepgsql.sgml b/doc/src/sgml/sepgsql.sgml
index 2f7b132336..9a9b5906ba 100644
--- a/doc/src/sgml/sepgsql.sgml
+++ b/doc/src/sgml/sepgsql.sgml
@@ -315,7 +315,7 @@ $ sudo semodule -r sepgsql-regtest
control rules as relationships between a subject entity (typically,
a client of the database) and an object entity (such as a database
object), each of which is
- identified by a security label. If access to an unlabelled object is
+ identified by a security label. If access to an unlabeled object is
attempted, the object is treated as if it were assigned the label
unlabeled_t>.
@@ -397,7 +397,7 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100;
user tries to execute a function as a part of query, or using fast-path
invocation. If this function is a trusted procedure, it also checks
db_procedure:{entrypoint}> permission to check whether it
- can perform as entrypoint of trusted procedure.
+ can perform as entry point of trusted procedure.
diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml
index 97961c0592..059697e2b3 100644
--- a/doc/src/sgml/wal.sgml
+++ b/doc/src/sgml/wal.sgml
@@ -148,7 +148,7 @@
there is little it can do to make sure the data has arrived at a truly
non-volatile storage area. Rather, it is the
administrator's responsibility to make certain that all storage components
- ensure integrity for both data and filesystem metadata.
+ ensure integrity for both data and file-system metadata.
Avoid disk controllers that have non-battery-backed write caches.
At the drive level, disable write-back caching if the
drive cannot guarantee the data will be written before shutdown.
@@ -200,8 +200,8 @@
- Internal data structures such as pg_clog, pg_subtrans, pg_multixact,
- pg_serial, pg_notify, pg_stat, pg_snapshots are not directly
+ Internal data structures such as pg_clog, pg_subtrans, pg_multixact,
+ pg_serial, pg_notify, pg_stat, pg_snapshots are not directly
checksummed, nor are pages protected by full page writes. However, where
such data structures are persistent, WAL records are written that allow
recent changes to be accurately rebuilt at crash recovery and those
@@ -210,7 +210,7 @@
- Individual state files in pg_twophase are protected by CRC-32.
+ Individual state files in pg_twophase are protected by CRC-32.