From 8b5a3998a104ef5918b50e207be0aa86e085d49d Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Thu, 30 May 2013 21:05:07 -0400 Subject: [PATCH] Remove whitespace from end of lines --- doc/src/sgml/maintenance.sgml | 2 +- doc/src/sgml/ref/create_table.sgml | 2 +- doc/src/sgml/ref/notify.sgml | 2 +- doc/src/sgml/ref/pg_dump.sgml | 2 +- src/Makefile.global.in | 2 +- src/backend/catalog/system_views.sql | 4 ++-- src/test/regress/expected/json.out | 12 ++++++------ src/test/regress/sql/json.sql | 12 ++++++------ src/tools/msvc/Mkvcbuild.pm | 4 ++-- src/tools/pgindent/perltidyrc | 14 +++++++------- src/tools/pgindent/pgindent | 2 +- src/tools/pgindent/pgindent.man | 8 ++++---- 12 files changed, 33 insertions(+), 33 deletions(-) diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index d2bd68f501..c05b5262cb 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -632,7 +632,7 @@ HINT: Stop the postmaster and use a standalone backend to VACUUM in "mydb". autovacuum_max_workers databases to be processed, the next database will be processed as soon as the first worker finishes. Each worker process will check each table within its database and - execute VACUUM and/or ANALYZE as needed. + execute VACUUM and/or ANALYZE as needed. log_autovacuum_min_duration can be used to monitor autovacuum activity. diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 2f0fa53a41..26eca6731c 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -336,7 +336,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI Not-null constraints are always copied to the new table. CHECK constraints will be copied only if - INCLUDING CONSTRAINTS is specified. + INCLUDING CONSTRAINTS is specified. Indexes, PRIMARY KEY, and UNIQUE constraints on the original table will be created on the new table only if the INCLUDING INDEXES clause is specified. diff --git a/doc/src/sgml/ref/notify.sgml b/doc/src/sgml/ref/notify.sgml index a9405fdd31..307d2f6282 100644 --- a/doc/src/sgml/ref/notify.sgml +++ b/doc/src/sgml/ref/notify.sgml @@ -33,7 +33,7 @@ NOTIFY channel [ , payload string to each client application that has previously executed LISTEN channel - for the specified channel name in the current database. + for the specified channel name in the current database. Notifications are visible to all users. diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 40ca18c75f..2b5e95bfe9 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -324,7 +324,7 @@ PostgreSQL documentation For a consistent backup, the database server needs to support synchronized snapshots, a feature that was introduced in PostgreSQL 9.2. With this feature, database clients can ensure they see the same data set even though they use - different connections. pg_dump -j uses multiple database + different connections. pg_dump -j uses multiple database connections; it connects to the database once with the master process and once again for each worker job. Without the synchronized snapshot feature, the different worker jobs wouldn't be guaranteed to see the same data in each connection, diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 89e39d2fa0..8bfb77d7df 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -434,7 +434,7 @@ PL_TESTDB = pl_regression CONTRIB_TESTDB = contrib_regression ifneq ($(MODULE_big),) CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULE_big) -else +else ifneq ($(MODULES),) CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULES) else diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index a03bfa684b..81d7c4fec8 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -788,10 +788,10 @@ CREATE OR REPLACE FUNCTION pg_start_backup(label text, fast boolean DEFAULT false) RETURNS text STRICT VOLATILE LANGUAGE internal AS 'pg_start_backup'; -CREATE OR REPLACE FUNCTION +CREATE OR REPLACE FUNCTION json_populate_record(base anyelement, from_json json, use_json_as_text boolean DEFAULT false) RETURNS anyelement LANGUAGE internal STABLE AS 'json_populate_record'; -CREATE OR REPLACE FUNCTION +CREATE OR REPLACE FUNCTION json_populate_recordset(base anyelement, from_json json, use_json_as_text boolean DEFAULT false) RETURNS SETOF anyelement LANGUAGE internal STABLE ROWS 100 AS 'json_populate_recordset'; diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out index 01f0679c87..1d7cf5ff2f 100644 --- a/src/test/regress/expected/json.out +++ b/src/test/regress/expected/json.out @@ -466,15 +466,15 @@ INSERT INTO test_json VALUES ('scalar','"a scalar"'), ('array','["zero", "one","two",null,"four","five"]'), ('object','{"field1":"val1","field2":"val2","field3":null}'); -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'scalar'; ERROR: cannot extract element from a scalar -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'array'; ERROR: cannot extract field from a non-object -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'object'; ?column? @@ -490,7 +490,7 @@ WHERE json_type = 'object'; "val2" (1 row) -SELECT test_json->>'field2' +SELECT test_json->>'field2' FROM test_json WHERE json_type = 'object'; ?column? @@ -498,11 +498,11 @@ WHERE json_type = 'object'; val2 (1 row) -SELECT test_json -> 2 +SELECT test_json -> 2 FROM test_json WHERE json_type = 'scalar'; ERROR: cannot extract element from a scalar -SELECT test_json -> 2 +SELECT test_json -> 2 FROM test_json WHERE json_type = 'array'; ?column? diff --git a/src/test/regress/sql/json.sql b/src/test/regress/sql/json.sql index 04b22fe297..8a136d7a27 100644 --- a/src/test/regress/sql/json.sql +++ b/src/test/regress/sql/json.sql @@ -139,15 +139,15 @@ INSERT INTO test_json VALUES ('array','["zero", "one","two",null,"four","five"]'), ('object','{"field1":"val1","field2":"val2","field3":null}'); -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'scalar'; -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'array'; -SELECT test_json -> 'x' +SELECT test_json -> 'x' FROM test_json WHERE json_type = 'object'; @@ -155,15 +155,15 @@ SELECT test_json->'field2' FROM test_json WHERE json_type = 'object'; -SELECT test_json->>'field2' +SELECT test_json->>'field2' FROM test_json WHERE json_type = 'object'; -SELECT test_json -> 2 +SELECT test_json -> 2 FROM test_json WHERE json_type = 'scalar'; -SELECT test_json -> 2 +SELECT test_json -> 2 FROM test_json WHERE json_type = 'array'; diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index e1650a9812..7964c01886 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -523,8 +523,8 @@ sub mkvcbuild my $mf = Project::read_file( 'src\backend\utils\mb\conversion_procs\\' . $sub . '\Makefile'); my $p = $solution->AddProject($sub, 'dll', 'conversion procs'); - $p->AddFile('src\backend\utils\mb\conversion_procs\\' - . $sub . '\\' + $p->AddFile('src\backend\utils\mb\conversion_procs\\' + . $sub . '\\' . $sub . '.c'); if ($mf =~ m{^SRCS\s*\+=\s*(.*)$}m) diff --git a/src/tools/pgindent/perltidyrc b/src/tools/pgindent/perltidyrc index 60489febb5..e8ae7c5d8b 100644 --- a/src/tools/pgindent/perltidyrc +++ b/src/tools/pgindent/perltidyrc @@ -1,12 +1,12 @@ ---add-whitespace ---backup-and-modify-in-place +--add-whitespace +--backup-and-modify-in-place --delete-old-whitespace ---entab-leading-whitespace=4 ---keep-old-blank-lines=2 +--entab-leading-whitespace=4 +--keep-old-blank-lines=2 --maximum-line-length=78 ---nospace-for-semicolon +--nospace-for-semicolon --opening-brace-on-new-line --output-line-ending=unix ---paren-tightness=2 ---vertical-tightness=2 +--paren-tightness=2 +--vertical-tightness=2 --vertical-tightness-closing=2 diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 584218f384..bc83c1a95c 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -310,7 +310,7 @@ sub post_indent $source =~ s! (\n$ident[^(\n]*)\n # e.g. static void ( - $ident\(\n? # func_name( + $ident\(\n? # func_name( (.*,([ \t]*$comment)?\n)* # args b4 final ln .*\);([ \t]*$comment)?$ # final line ) diff --git a/src/tools/pgindent/pgindent.man b/src/tools/pgindent/pgindent.man index cff092ca7a..f3a68acc2a 100644 --- a/src/tools/pgindent/pgindent.man +++ b/src/tools/pgindent/pgindent.man @@ -6,9 +6,9 @@ or the environment. In its simplest form, if all the required objects are installed, simply run it without any parameters at the top of the source tree you want to process. - pgindent + pgindent -If you don't have all the requirements installed, pgindent will fetch and build +If you don't have all the requirements installed, pgindent will fetch and build them for you, if you're in a PostgreSQL source tree: @@ -23,7 +23,7 @@ command line option --indent: Similarly, the entab program can be specified using the PGENTAB environment variable, or using the --entab command line option. -pgindent also needs a file containing a list of typedefs. This can be +pgindent also needs a file containing a list of typedefs. This can be specified using the PGTYPEDEFS environment variable, or via the command line --typedefs option. If neither is used, it will look for it within the current source tree, or in /usr/local/etc/typedefs.list. @@ -40,6 +40,6 @@ src/tools/pgindent/exclude_file_patterns. Any non-option arguments are taken as the names of files to be indented. In this case only these files will be changed, and nothing else will be touched. If the first non-option argument is not a .c or .h file, it is treated as the name -of a typedefs file for legacy reasons, but this use is deprecated - use the +of a typedefs file for legacy reasons, but this use is deprecated - use the --typedefs option instead.