From fc946c39aeacdff7df60c83fca6582985e8546c8 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 23 Nov 2010 22:27:50 +0200 Subject: [PATCH] Remove useless whitespace at end of lines --- README | 2 +- config/ac_func_accept_argtypes.m4 | 4 +- config/general.m4 | 2 +- configure.in | 28 +- contrib/README | 6 +- contrib/btree_gin/Makefile | 2 +- contrib/btree_gin/expected/cidr.out | 2 +- contrib/btree_gin/expected/date.out | 2 +- contrib/btree_gin/expected/inet.out | 2 +- contrib/btree_gin/expected/interval.out | 2 +- contrib/btree_gin/expected/macaddr.out | 2 +- contrib/btree_gin/expected/time.out | 2 +- contrib/btree_gin/expected/timestamp.out | 2 +- contrib/btree_gin/expected/timestamptz.out | 2 +- contrib/btree_gin/expected/timetz.out | 2 +- contrib/btree_gin/sql/cidr.sql | 2 +- contrib/btree_gin/sql/date.sql | 2 +- contrib/btree_gin/sql/inet.sql | 2 +- contrib/btree_gin/sql/interval.sql | 2 +- contrib/btree_gin/sql/macaddr.sql | 2 +- contrib/btree_gin/sql/time.sql | 2 +- contrib/btree_gin/sql/timestamp.sql | 2 +- contrib/btree_gin/sql/timestamptz.sql | 2 +- contrib/btree_gin/sql/timetz.sql | 2 +- contrib/btree_gist/btree_gist.sql.in | 84 +-- contrib/btree_gist/uninstall_btree_gist.sql | 18 +- contrib/citext/citext.sql.in | 2 +- contrib/citext/expected/citext.out | 4 +- contrib/citext/expected/citext_1.out | 4 +- contrib/citext/sql/citext.sql | 4 +- contrib/cube/CHANGES | 4 +- contrib/cube/cube.sql.in | 10 +- contrib/cube/cubeparse.y | 32 +- contrib/cube/cubescan.l | 6 +- contrib/cube/expected/cube.out | 12 +- contrib/cube/expected/cube_1.out | 12 +- contrib/cube/expected/cube_2.out | 12 +- contrib/cube/sql/cube.sql | 12 +- contrib/dblink/Makefile | 4 +- contrib/dblink/dblink.sql.in | 4 +- contrib/dblink/expected/dblink.out | 8 +- contrib/dblink/sql/dblink.sql | 8 +- contrib/earthdistance/earthdistance.sql.in | 4 +- contrib/fuzzystrmatch/fuzzystrmatch.sql.in | 4 +- contrib/hstore/expected/hstore.out | 2 +- contrib/hstore/sql/hstore.sql | 2 +- contrib/intarray/Makefile | 2 +- contrib/intarray/bench/bench.pl | 22 +- contrib/intarray/bench/create_test.pl | 4 +- contrib/isn/ISBN.h | 2 +- contrib/ltree/ltree.sql.in | 6 +- contrib/ltree/uninstall_ltree.sql | 2 +- contrib/pg_buffercache/Makefile | 6 +- contrib/pg_buffercache/pg_buffercache.sql.in | 4 +- contrib/pg_freespacemap/Makefile | 6 +- contrib/pg_trgm/pg_trgm.sql.in | 2 +- contrib/pg_trgm/uninstall_pg_trgm.sql | 2 +- contrib/pg_upgrade/IMPLEMENTATION | 2 +- contrib/pg_upgrade/TESTING | 10 +- contrib/pg_upgrade/relfilenode.c | 2 +- contrib/pgcrypto/expected/blowfish.out | 8 +- contrib/pgcrypto/expected/crypt-blowfish.out | 2 +- contrib/pgcrypto/expected/rijndael.out | 8 +- contrib/pgcrypto/rijndael.tbl | 2 +- contrib/pgcrypto/sql/blowfish.sql | 8 +- contrib/pgcrypto/sql/crypt-blowfish.sql | 2 +- contrib/pgcrypto/sql/rijndael.sql | 8 +- contrib/seg/expected/seg.out | 4 +- contrib/seg/expected/seg_1.out | 4 +- contrib/seg/seg.sql.in | 12 +- contrib/seg/segparse.y | 6 +- contrib/seg/segscan.l | 6 +- contrib/seg/sort-segments.pl | 2 +- contrib/seg/sql/seg.sql | 4 +- contrib/spi/autoinc.example | 10 +- contrib/spi/autoinc.sql.in | 4 +- contrib/spi/insert_username.example | 2 +- contrib/spi/insert_username.sql.in | 4 +- contrib/spi/moddatetime.example | 2 +- contrib/spi/refint.example | 10 +- contrib/spi/timetravel.example | 18 +- contrib/spi/timetravel.sql.in | 12 +- contrib/start-scripts/osx/PostgreSQL | 4 +- contrib/test_parser/expected/test_parser.out | 2 +- contrib/test_parser/sql/test_parser.sql | 2 +- contrib/tsearch2/expected/tsearch2.out | 8 +- contrib/tsearch2/expected/tsearch2_1.out | 8 +- contrib/tsearch2/sql/tsearch2.sql | 10 +- contrib/tsearch2/tsearch2.sql.in | 26 +- contrib/unaccent/Makefile | 2 +- contrib/xml2/expected/xml2.out | 2 +- contrib/xml2/expected/xml2_1.out | 2 +- contrib/xml2/sql/xml2.sql | 2 +- doc/bug.template | 2 +- doc/src/sgml/Makefile | 2 +- doc/src/sgml/auto-explain.sgml | 4 +- doc/src/sgml/biblio.sgml | 4 +- doc/src/sgml/charset.sgml | 2 +- doc/src/sgml/config.sgml | 76 +- doc/src/sgml/contacts.sgml | 2 +- doc/src/sgml/contrib.sgml | 2 +- doc/src/sgml/datatype.sgml | 52 +- doc/src/sgml/datetime.sgml | 16 +- doc/src/sgml/dfunc.sgml | 6 +- doc/src/sgml/docguide.sgml | 28 +- doc/src/sgml/ecpg.sgml | 71 +- doc/src/sgml/extend.sgml | 14 +- doc/src/sgml/external-projects.sgml | 4 +- doc/src/sgml/filelist.sgml | 2 +- doc/src/sgml/func.sgml | 144 ++-- doc/src/sgml/history.sgml | 4 +- doc/src/sgml/info.sgml | 2 +- doc/src/sgml/install-windows.sgml | 14 +- doc/src/sgml/keywords.sgml | 2 +- doc/src/sgml/legal.sgml | 2 +- doc/src/sgml/libpq.sgml | 2 +- doc/src/sgml/lobj.sgml | 16 +- doc/src/sgml/mvcc.sgml | 8 +- doc/src/sgml/pgarchivecleanup.sgml | 2 +- doc/src/sgml/pgupgrade.sgml | 124 +-- doc/src/sgml/plperl.sgml | 10 +- doc/src/sgml/pltcl.sgml | 4 +- doc/src/sgml/problems.sgml | 6 +- doc/src/sgml/ref/abort.sgml | 2 +- doc/src/sgml/ref/alter_aggregate.sgml | 8 +- doc/src/sgml/ref/alter_conversion.sgml | 8 +- doc/src/sgml/ref/alter_database.sgml | 8 +- doc/src/sgml/ref/alter_domain.sgml | 6 +- doc/src/sgml/ref/alter_function.sgml | 10 +- doc/src/sgml/ref/alter_group.sgml | 4 +- doc/src/sgml/ref/alter_index.sgml | 8 +- doc/src/sgml/ref/alter_language.sgml | 2 +- doc/src/sgml/ref/alter_large_object.sgml | 2 +- doc/src/sgml/ref/alter_opclass.sgml | 8 +- doc/src/sgml/ref/alter_operator.sgml | 10 +- doc/src/sgml/ref/alter_opfamily.sgml | 16 +- doc/src/sgml/ref/alter_schema.sgml | 2 +- doc/src/sgml/ref/alter_sequence.sgml | 2 +- doc/src/sgml/ref/alter_tablespace.sgml | 10 +- doc/src/sgml/ref/alter_tsconfig.sgml | 10 +- doc/src/sgml/ref/alter_tsdictionary.sgml | 14 +- doc/src/sgml/ref/alter_tsparser.sgml | 8 +- doc/src/sgml/ref/alter_tstemplate.sgml | 8 +- doc/src/sgml/ref/alter_user.sgml | 6 +- doc/src/sgml/ref/alter_view.sgml | 8 +- doc/src/sgml/ref/begin.sgml | 14 +- doc/src/sgml/ref/close.sgml | 6 +- doc/src/sgml/ref/clusterdb.sgml | 10 +- doc/src/sgml/ref/comment.sgml | 10 +- doc/src/sgml/ref/commit.sgml | 6 +- doc/src/sgml/ref/commit_prepared.sgml | 4 +- doc/src/sgml/ref/create_aggregate.sgml | 12 +- doc/src/sgml/ref/create_conversion.sgml | 4 +- doc/src/sgml/ref/create_function.sgml | 2 +- doc/src/sgml/ref/create_group.sgml | 12 +- doc/src/sgml/ref/create_opclass.sgml | 12 +- doc/src/sgml/ref/create_operator.sgml | 10 +- doc/src/sgml/ref/create_opfamily.sgml | 4 +- doc/src/sgml/ref/create_role.sgml | 14 +- doc/src/sgml/ref/create_sequence.sgml | 2 +- doc/src/sgml/ref/create_table_as.sgml | 4 +- doc/src/sgml/ref/create_tsconfig.sgml | 6 +- doc/src/sgml/ref/create_tsdictionary.sgml | 8 +- doc/src/sgml/ref/create_tsparser.sgml | 4 +- doc/src/sgml/ref/create_tstemplate.sgml | 4 +- doc/src/sgml/ref/create_user.sgml | 10 +- doc/src/sgml/ref/createdb.sgml | 10 +- doc/src/sgml/ref/createlang.sgml | 20 +- doc/src/sgml/ref/createuser.sgml | 14 +- doc/src/sgml/ref/drop_cast.sgml | 6 +- doc/src/sgml/ref/drop_conversion.sgml | 4 +- doc/src/sgml/ref/drop_database.sgml | 6 +- doc/src/sgml/ref/drop_domain.sgml | 6 +- doc/src/sgml/ref/drop_function.sgml | 6 +- doc/src/sgml/ref/drop_index.sgml | 2 +- doc/src/sgml/ref/drop_language.sgml | 8 +- doc/src/sgml/ref/drop_opclass.sgml | 10 +- doc/src/sgml/ref/drop_operator.sgml | 8 +- doc/src/sgml/ref/drop_opfamily.sgml | 6 +- doc/src/sgml/ref/drop_role.sgml | 6 +- doc/src/sgml/ref/drop_rule.sgml | 4 +- doc/src/sgml/ref/drop_schema.sgml | 12 +- doc/src/sgml/ref/drop_sequence.sgml | 12 +- doc/src/sgml/ref/drop_table.sgml | 14 +- doc/src/sgml/ref/drop_tablespace.sgml | 2 +- doc/src/sgml/ref/drop_trigger.sgml | 10 +- doc/src/sgml/ref/drop_tsconfig.sgml | 6 +- doc/src/sgml/ref/drop_tsdictionary.sgml | 6 +- doc/src/sgml/ref/drop_tsparser.sgml | 6 +- doc/src/sgml/ref/drop_tstemplate.sgml | 6 +- doc/src/sgml/ref/drop_type.sgml | 6 +- doc/src/sgml/ref/drop_view.sgml | 12 +- doc/src/sgml/ref/dropdb.sgml | 8 +- doc/src/sgml/ref/droplang.sgml | 20 +- doc/src/sgml/ref/dropuser.sgml | 12 +- doc/src/sgml/ref/ecpg-ref.sgml | 2 +- doc/src/sgml/ref/end.sgml | 6 +- doc/src/sgml/ref/fetch.sgml | 2 +- doc/src/sgml/ref/initdb.sgml | 10 +- doc/src/sgml/ref/lock.sgml | 8 +- doc/src/sgml/ref/move.sgml | 2 +- doc/src/sgml/ref/pg_config-ref.sgml | 2 +- doc/src/sgml/ref/pg_controldata.sgml | 4 +- doc/src/sgml/ref/pg_dumpall.sgml | 2 +- doc/src/sgml/ref/pg_resetxlog.sgml | 4 +- doc/src/sgml/ref/postgres-ref.sgml | 8 +- doc/src/sgml/ref/prepare.sgml | 2 +- doc/src/sgml/ref/reindexdb.sgml | 10 +- doc/src/sgml/ref/release_savepoint.sgml | 4 +- doc/src/sgml/ref/rollback_prepared.sgml | 4 +- doc/src/sgml/ref/savepoint.sgml | 6 +- doc/src/sgml/ref/security_label.sgml | 2 +- doc/src/sgml/ref/set.sgml | 4 +- doc/src/sgml/ref/values.sgml | 2 +- doc/src/sgml/regress.sgml | 28 +- doc/src/sgml/rowtypes.sgml | 4 +- doc/src/sgml/runtime.sgml | 16 +- doc/src/sgml/sql.sgml | 88 +-- doc/src/sgml/start.sgml | 2 +- doc/src/sgml/stylesheet.css | 2 +- doc/src/sgml/stylesheet.dsl | 32 +- doc/src/sgml/vacuumlo.sgml | 2 +- doc/src/sgml/wal.sgml | 2 +- doc/src/sgml/xindex.sgml | 8 +- doc/src/sgml/xoper.sgml | 2 +- doc/src/sgml/xtypes.sgml | 2 +- src/Makefile.global.in | 12 +- src/Makefile.shlib | 2 +- src/backend/Makefile | 4 +- src/backend/access/gin/README | 32 +- src/backend/access/gist/README | 52 +- src/backend/access/nbtree/README | 2 +- src/backend/access/transam/xlog.c | 2 +- src/backend/bootstrap/Makefile | 2 +- src/backend/catalog/information_schema.sql | 6 +- src/backend/catalog/objectaddress.c | 4 +- src/backend/catalog/system_views.sql | 354 ++++----- src/backend/commands/comment.c | 2 +- src/backend/commands/copy.c | 2 +- src/backend/commands/explain.c | 4 +- src/backend/commands/tablespace.c | 2 +- src/backend/libpq/README.SSL | 2 +- src/backend/nodes/README | 4 +- src/backend/optimizer/plan/README | 34 +- src/backend/parser/scan.l | 6 +- src/backend/port/Makefile | 6 +- src/backend/port/aix/mkldexport.sh | 4 +- src/backend/port/darwin/README | 2 +- src/backend/port/tas/sunstudio_sparc.s | 4 +- src/backend/snowball/Makefile | 2 +- src/backend/storage/buffer/README | 2 +- src/backend/storage/freespace/README | 2 +- src/backend/storage/ipc/README | 2 +- src/backend/storage/lmgr/Makefile | 2 +- src/backend/storage/lmgr/README | 4 +- src/backend/tsearch/wparser_def.c | 76 +- src/backend/utils/Gen_fmgrtab.pl | 2 +- src/backend/utils/adt/numeric.c | 4 +- src/backend/utils/adt/varlena.c | 20 +- src/backend/utils/adt/xml.c | 6 +- src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl | 2 +- .../utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl | 10 +- src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl | 2 +- src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl | 2 +- src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl | 2 +- .../utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl | 8 +- src/backend/utils/mb/Unicode/UCS_to_SJIS.pl | 2 +- src/backend/utils/mb/Unicode/ucs2utf.pl | 4 +- src/backend/utils/misc/Makefile | 2 +- src/backend/utils/misc/check_guc | 22 +- src/backend/utils/misc/guc-file.l | 8 +- src/backend/utils/misc/postgresql.conf.sample | 12 +- src/backend/utils/mmgr/README | 2 +- src/bcc32.mak | 8 +- src/bin/pg_dump/README | 6 +- src/bin/pg_dump/pg_dump.c | 2 +- src/bin/psql/psqlscan.l | 6 +- src/include/catalog/objectaddress.h | 2 +- src/include/pg_config.h.win32 | 4 +- src/include/storage/s_lock.h | 2 +- src/interfaces/ecpg/README.dynSQL | 2 +- src/interfaces/ecpg/ecpglib/prepare.c | 2 +- src/interfaces/ecpg/preproc/Makefile | 2 +- src/interfaces/ecpg/preproc/check_rules.pl | 4 +- src/interfaces/ecpg/preproc/ecpg.addons | 4 +- src/interfaces/ecpg/preproc/ecpg.header | 2 +- src/interfaces/ecpg/preproc/ecpg.tokens | 4 +- src/interfaces/ecpg/preproc/ecpg.trailer | 14 +- src/interfaces/ecpg/preproc/ecpg.type | 2 +- src/interfaces/ecpg/preproc/parse.pl | 16 +- src/interfaces/ecpg/preproc/pgc.l | 44 +- src/interfaces/ecpg/test/Makefile.regress | 4 +- .../ecpg/test/compat_informix/describe.pgc | 2 +- .../ecpg/test/compat_informix/sqlda.pgc | 2 +- .../test/compat_informix/test_informix.pgc | 2 +- .../test/compat_informix/test_informix2.pgc | 6 +- .../test/expected/compat_informix-describe.c | 2 +- .../test/expected/compat_informix-sqlda.c | 2 +- .../expected/compat_informix-test_informix.c | 2 +- .../expected/compat_informix-test_informix2.c | 4 +- .../ecpg/test/expected/pgtypeslib-dt_test.c | 12 +- .../test/expected/preproc-array_of_struct.c | 2 +- .../ecpg/test/expected/preproc-cursor.c | 2 +- .../ecpg/test/expected/preproc-init.c | 4 +- .../ecpg/test/expected/preproc-outofscope.c | 2 +- .../ecpg/test/expected/preproc-variable.c | 2 +- .../ecpg/test/expected/preproc-whenever.c | 2 +- src/interfaces/ecpg/test/expected/sql-array.c | 4 +- .../ecpg/test/expected/sql-code100.c | 10 +- .../ecpg/test/expected/sql-describe.c | 2 +- .../ecpg/test/expected/sql-dynalloc.c | 16 +- .../ecpg/test/expected/sql-dynalloc2.c | 4 +- src/interfaces/ecpg/test/expected/sql-fetch.c | 2 +- src/interfaces/ecpg/test/expected/sql-sqlda.c | 2 +- .../ecpg/test/pgtypeslib/dt_test.pgc | 12 +- .../ecpg/test/preproc/array_of_struct.pgc | 2 +- src/interfaces/ecpg/test/preproc/cursor.pgc | 2 +- src/interfaces/ecpg/test/preproc/init.pgc | 4 +- .../ecpg/test/preproc/outofscope.pgc | 2 +- src/interfaces/ecpg/test/preproc/variable.pgc | 2 +- src/interfaces/ecpg/test/preproc/whenever.pgc | 2 +- src/interfaces/ecpg/test/sql/Makefile | 2 +- src/interfaces/ecpg/test/sql/array.pgc | 4 +- src/interfaces/ecpg/test/sql/code100.pgc | 12 +- src/interfaces/ecpg/test/sql/describe.pgc | 2 +- src/interfaces/ecpg/test/sql/dynalloc.pgc | 16 +- src/interfaces/ecpg/test/sql/dynalloc2.pgc | 4 +- src/interfaces/ecpg/test/sql/fetch.pgc | 2 +- src/interfaces/ecpg/test/sql/sqlda.pgc | 2 +- src/interfaces/libpq/bcc32.mak | 8 +- src/interfaces/libpq/pg_service.conf.sample | 4 +- src/interfaces/libpq/win32.mak | 10 +- src/makefiles/Makefile.darwin | 2 +- src/makefiles/Makefile.irix | 2 +- src/makefiles/pgxs.mk | 2 +- src/pl/plperl/GNUmakefile | 4 +- src/pl/plperl/SPI.xs | 18 +- src/pl/plperl/Util.xs | 4 +- src/pl/plperl/expected/plperl.out | 6 +- src/pl/plperl/expected/plperl_plperlu.out | 2 - src/pl/plperl/expected/plperl_trigger.out | 18 +- src/pl/plperl/plc_trusted.pl | 2 +- src/pl/plperl/plperl.c | 6 +- src/pl/plperl/sql/plperl.sql | 6 +- src/pl/plperl/sql/plperl_plperlu.sql | 4 +- src/pl/plperl/sql/plperl_trigger.sql | 20 +- src/pl/plperl/text2macro.pl | 2 +- src/pl/plpgsql/src/gram.y | 2 +- src/pl/plpython/expected/plpython_newline.out | 2 +- src/pl/plpython/expected/plpython_schema.out | 2 +- src/pl/plpython/expected/plpython_trigger.out | 2 +- src/pl/plpython/sql/plpython_newline.sql | 2 +- src/pl/plpython/sql/plpython_schema.sql | 2 +- src/pl/plpython/sql/plpython_trigger.sql | 2 +- src/pl/tcl/expected/pltcl_setup.out | 10 +- src/pl/tcl/sql/pltcl_setup.sql | 10 +- src/test/examples/Makefile | 2 +- src/test/locale/Makefile | 2 +- src/test/locale/README | 2 +- src/test/locale/de_DE.ISO8859-1/Makefile | 6 +- src/test/locale/gr_GR.ISO8859-7/Makefile | 6 +- src/test/locale/koi8-r/Makefile | 6 +- src/test/locale/koi8-to-win1251/Makefile | 6 +- src/test/mb/mbregress.sh | 2 +- src/test/performance/runtests.pl | 16 +- src/test/regress/GNUmakefile | 2 +- src/test/regress/expected/abstime.out | 6 +- src/test/regress/expected/aggregates.out | 4 +- src/test/regress/expected/alter_table.out | 14 +- src/test/regress/expected/arrays.out | 18 +- src/test/regress/expected/bit.out | 14 +- src/test/regress/expected/bitmapops.out | 2 +- src/test/regress/expected/boolean.out | 12 +- src/test/regress/expected/box.out | 46 +- src/test/regress/expected/char.out | 6 +- src/test/regress/expected/char_1.out | 6 +- src/test/regress/expected/char_2.out | 6 +- src/test/regress/expected/cluster.out | 2 +- src/test/regress/expected/copyselect.out | 2 +- .../regress/expected/create_aggregate.out | 4 +- src/test/regress/expected/create_index.out | 5 +- src/test/regress/expected/create_misc.out | 22 +- src/test/regress/expected/create_operator.out | 16 +- src/test/regress/expected/create_table.out | 16 +- src/test/regress/expected/create_type.out | 10 +- src/test/regress/expected/create_view.out | 12 +- src/test/regress/expected/drop_if_exists.out | 4 +- src/test/regress/expected/errors.out | 188 +++-- .../expected/float4-exp-three-digits.out | 4 +- src/test/regress/expected/float4.out | 4 +- .../float8-exp-three-digits-win32.out | 18 +- .../regress/expected/float8-small-is-zero.out | 18 +- .../expected/float8-small-is-zero_1.out | 18 +- src/test/regress/expected/float8.out | 18 +- src/test/regress/expected/foreign_key.out | 20 +- src/test/regress/expected/hash_index.out | 16 +- src/test/regress/expected/inet.out | 2 +- src/test/regress/expected/inherit.out | 2 +- src/test/regress/expected/int2.out | 4 +- .../expected/int8-exp-three-digits.out | 22 +- src/test/regress/expected/int8.out | 22 +- src/test/regress/expected/interval.out | 14 +- src/test/regress/expected/limit.out | 28 +- src/test/regress/expected/numeric.out | 14 +- src/test/regress/expected/oid.out | 2 +- src/test/regress/expected/oidjoins.out | 714 +++++++++--------- src/test/regress/expected/plpgsql.out | 4 +- src/test/regress/expected/point.out | 16 +- src/test/regress/expected/polygon.out | 54 +- src/test/regress/expected/portals.out | 16 +- src/test/regress/expected/portals_p2.out | 26 +- src/test/regress/expected/rules.out | 20 +- src/test/regress/expected/select.out | 13 +- src/test/regress/expected/select_implicit.out | 20 +- .../regress/expected/select_implicit_1.out | 20 +- .../regress/expected/select_implicit_2.out | 20 +- src/test/regress/expected/sequence.out | 4 - src/test/regress/expected/sequence_1.out | 4 - src/test/regress/expected/subselect.out | 4 +- src/test/regress/expected/timestamp.out | 17 +- src/test/regress/expected/timestamptz.out | 29 +- src/test/regress/expected/tinterval.out | 4 +- src/test/regress/expected/transactions.out | 7 +- src/test/regress/expected/triggers.out | 82 +- src/test/regress/expected/truncate.out | 4 +- src/test/regress/expected/tsdicts.out | 12 +- src/test/regress/expected/tsearch.out | 5 +- src/test/regress/expected/type_sanity.out | 2 +- src/test/regress/expected/varchar.out | 6 +- src/test/regress/expected/varchar_1.out | 6 +- src/test/regress/expected/varchar_2.out | 6 +- src/test/regress/expected/window.out | 14 +- src/test/regress/input/copy.source | 4 +- .../regress/input/create_function_2.source | 4 +- src/test/regress/input/misc.source | 14 +- src/test/regress/output/copy.source | 4 +- .../regress/output/create_function_2.source | 4 +- src/test/regress/output/misc.source | 14 +- src/test/regress/sql/abstime.sql | 6 +- src/test/regress/sql/aggregates.sql | 4 +- src/test/regress/sql/alter_table.sql | 14 +- src/test/regress/sql/arrays.sql | 18 +- src/test/regress/sql/bit.sql | 14 +- src/test/regress/sql/bitmapops.sql | 2 +- src/test/regress/sql/boolean.sql | 12 +- src/test/regress/sql/box.sql | 46 +- src/test/regress/sql/char.sql | 6 +- src/test/regress/sql/cluster.sql | 2 +- src/test/regress/sql/copyselect.sql | 2 +- src/test/regress/sql/create_aggregate.sql | 4 +- src/test/regress/sql/create_index.sql | 6 +- src/test/regress/sql/create_misc.sql | 22 +- src/test/regress/sql/create_operator.sql | 16 +- src/test/regress/sql/create_table.sql | 16 +- src/test/regress/sql/create_type.sql | 10 +- src/test/regress/sql/create_view.sql | 12 +- src/test/regress/sql/drop.sql | 12 +- src/test/regress/sql/drop_if_exists.sql | 4 +- src/test/regress/sql/errors.sql | 192 ++--- src/test/regress/sql/float4.sql | 4 +- src/test/regress/sql/float8.sql | 18 +- src/test/regress/sql/foreign_key.sql | 20 +- src/test/regress/sql/hash_index.sql | 16 +- src/test/regress/sql/hs_primary_extremes.sql | 16 +- src/test/regress/sql/inet.sql | 2 +- src/test/regress/sql/inherit.sql | 2 +- src/test/regress/sql/int2.sql | 4 +- src/test/regress/sql/int8.sql | 24 +- src/test/regress/sql/interval.sql | 14 +- src/test/regress/sql/limit.sql | 28 +- src/test/regress/sql/numeric.sql | 14 +- src/test/regress/sql/oid.sql | 2 +- src/test/regress/sql/oidjoins.sql | 714 +++++++++--------- src/test/regress/sql/plpgsql.sql | 4 +- src/test/regress/sql/point.sql | 16 +- src/test/regress/sql/polygon.sql | 54 +- src/test/regress/sql/portals.sql | 16 +- src/test/regress/sql/portals_p2.sql | 26 +- src/test/regress/sql/rules.sql | 22 +- src/test/regress/sql/select.sql | 16 +- src/test/regress/sql/select_implicit.sql | 20 +- src/test/regress/sql/sequence.sql | 8 +- src/test/regress/sql/subselect.sql | 4 +- src/test/regress/sql/timestamp.sql | 18 +- src/test/regress/sql/timestamptz.sql | 36 +- src/test/regress/sql/tinterval.sql | 4 +- src/test/regress/sql/transactions.sql | 8 +- src/test/regress/sql/triggers.sql | 84 +-- src/test/regress/sql/truncate.sql | 4 +- src/test/regress/sql/tsdicts.sql | 12 +- src/test/regress/sql/tsearch.sql | 6 +- src/test/regress/sql/type_sanity.sql | 2 +- src/test/regress/sql/varchar.sql | 6 +- src/test/regress/sql/window.sql | 14 +- src/test/thread/README | 8 +- src/tools/RELEASE_CHANGES | 2 +- src/tools/backend/README | 2 +- src/tools/backend/backend_dirs.html | 2 +- src/tools/check_keywords.pl | 4 +- src/tools/editors/emacs.samples | 4 +- src/tools/entab/Makefile | 8 +- src/tools/entab/entab.man | 2 +- src/tools/find_static | 8 +- src/tools/find_typedef | 6 +- src/tools/make_diff/README | 4 +- src/tools/msvc/Mkvcbuild.pm | 4 +- src/tools/msvc/README | 8 +- src/tools/pginclude/pgrminclude | 6 +- src/tools/pgindent/README | 6 +- src/tools/pgindent/pgindent | 10 +- src/tools/pgtest | 10 +- src/tutorial/advanced.source | 2 +- src/tutorial/basics.source | 16 +- src/tutorial/complex.source | 12 +- src/tutorial/funcs.source | 14 +- src/tutorial/syscat.source | 24 +- src/win32.mak | 6 +- 517 files changed, 3463 insertions(+), 3508 deletions(-) diff --git a/README b/README index 0790fd21ab..49d55af5f6 100644 --- a/README +++ b/README @@ -1,6 +1,6 @@ PostgreSQL Database Management System ===================================== - + This directory contains the source code distribution of the PostgreSQL database management system. diff --git a/config/ac_func_accept_argtypes.m4 b/config/ac_func_accept_argtypes.m4 index 7a86ccaae5..7cb5cb3776 100644 --- a/config/ac_func_accept_argtypes.m4 +++ b/config/ac_func_accept_argtypes.m4 @@ -6,7 +6,7 @@ dnl @synopsis AC_FUNC_ACCEPT_ARGTYPES dnl dnl Checks the data types of the three arguments to accept(). Results are -dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123], +dnl placed into the symbols ACCEPT_TYPE_RETURN and ACCEPT_TYPE_ARG[123], dnl consistent with the following example: dnl dnl #define ACCEPT_TYPE_RETURN int @@ -37,7 +37,7 @@ dnl # which is *not* 'socklen_t *'). If we detect that, then we assume # 'int' as the result, because that ought to work best. # -# On Win32, accept() returns 'unsigned int PASCAL' +# On Win32, accept() returns 'unsigned int PASCAL' AC_DEFUN([AC_FUNC_ACCEPT_ARGTYPES], [AC_MSG_CHECKING([types of arguments for accept()]) diff --git a/config/general.m4 b/config/general.m4 index eb83815931..95d65ceb09 100644 --- a/config/general.m4 +++ b/config/general.m4 @@ -90,7 +90,7 @@ dnl values. But we only want it to appear once in the help. We achieve dnl that by making the help string look the same, which is why we need to dnl save the default that was passed in previously. m4_define([_pgac_helpdefault], m4_ifdef([pgac_defined_$1_$2_bool], [m4_defn([pgac_defined_$1_$2_bool])], [$3]))dnl -PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6], +PGAC_ARG([$1], [$2], [m4_if(_pgac_helpdefault, yes, -)], [$4], [$5], [$6], [AC_MSG_ERROR([no argument expected for --$1-$2 option])], [m4_case([$3], yes, [pgac_arg_to_variable([$1], [$2])=yes diff --git a/configure.in b/configure.in index 4bfa459903..3a0d43f808 100644 --- a/configure.in +++ b/configure.in @@ -230,7 +230,7 @@ AC_SUBST(enable_coverage) # PGAC_ARG_BOOL(enable, dtrace, no, [build with DTrace support], -[AC_DEFINE([ENABLE_DTRACE], 1, +[AC_DEFINE([ENABLE_DTRACE], 1, [Define to 1 to enable DTrace support. (--enable-dtrace)]) AC_CHECK_PROGS(DTRACE, dtrace) if test -z "$DTRACE"; then @@ -262,14 +262,14 @@ AC_DEFINE_UNQUOTED([BLCKSZ], ${BLCKSZ}, [ can set it bigger if you need bigger tuples (although TOAST should reduce the need to have large tuples, since fields can be spread across multiple tuples). - + BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ is currently 2^15 (32768). This is determined by the 15-bit widths of the lp_off and lp_len fields in ItemIdData (see include/storage/itemid.h). - + Changing BLCKSZ requires an initdb. -]) +]) # # Relation segment size @@ -288,7 +288,7 @@ AC_DEFINE_UNQUOTED([RELSEG_SIZE], ${RELSEG_SIZE}, [ RELSEG_SIZE is the maximum number of blocks allowed in one disk file. Thus, the maximum size of a single file is RELSEG_SIZE * BLCKSZ; relations bigger than that are divided into multiple files. - + RELSEG_SIZE * BLCKSZ must be less than your OS' limit on file size. This is often 2 GB or 4GB in a 32-bit operating system, unless you have large file support enabled. By default, we make the limit 1 GB @@ -329,7 +329,7 @@ AC_DEFINE_UNQUOTED([XLOG_BLCKSZ], ${XLOG_BLCKSZ}, [ buffers, else direct I/O may fail. Changing XLOG_BLCKSZ requires an initdb. -]) +]) # # WAL segment size @@ -461,7 +461,7 @@ fi # enable profiling if --enable-profiling if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then if test "$GCC" = yes; then - AC_DEFINE([PROFILE_PID_DIR], 1, + AC_DEFINE([PROFILE_PID_DIR], 1, [Define to 1 to allow profiling output to be saved separately for each process.]) CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS" else @@ -1141,7 +1141,7 @@ if test "$with_krb5" = yes; then AC_MSG_CHECKING(for krb5_free_unparsed_name) AC_TRY_LINK([#include ], [krb5_free_unparsed_name(NULL,NULL);], - [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name]) + [AC_DEFINE(HAVE_KRB5_FREE_UNPARSED_NAME, 1, [Define to 1 if you have krb5_free_unparsed_name]) AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no)]) fi @@ -1156,8 +1156,8 @@ AC_SYS_LARGEFILE AC_CHECK_SIZEOF([off_t]) # If we don't have largefile support, can't handle segsize >= 2GB. -if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then - AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.]) +if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then + AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.]) fi @@ -1228,8 +1228,8 @@ if test "$PORTNAME" = "win32"; then # # To properly translate all NLS languages strings, we must support the # *printf() %$ format, which allows *printf() arguments to be selected - # by position in the translated string. - # + # by position in the translated string. + # # libintl versions < 0.13 use the native *printf() functions, and Win32 # *printf() doesn't understand %$, so we must use our /port versions, # which do understand %$. libintl versions >= 0.13 include their own @@ -1590,7 +1590,7 @@ AC_CHECK_SIZEOF([size_t]) AC_CHECK_SIZEOF([long]) # Decide whether float4 is passed by value: user-selectable, enabled by default -AC_MSG_CHECKING([whether to build with float4 passed by value]) +AC_MSG_CHECKING([whether to build with float4 passed by value]) PGAC_ARG_BOOL(enable, float4-byval, yes, [disable float4 passed by value], [AC_DEFINE([USE_FLOAT4_BYVAL], 1, [Define to 1 if you want float4 values to be passed by value. (--enable-float4-byval)]) @@ -1858,7 +1858,7 @@ AC_CONFIG_LINKS([ if test "$PORTNAME" = "win32"; then AC_CONFIG_COMMANDS([check_win32_symlinks],[ -# Links sometimes fail undetected on Mingw - +# Links sometimes fail undetected on Mingw - # so here we detect it and warn the user for FILE in $CONFIG_LINKS do diff --git a/contrib/README b/contrib/README index a04c04346e..6d29cfe2b3 100644 --- a/contrib/README +++ b/contrib/README @@ -90,13 +90,13 @@ isn - lo - Large Object maintenance - by Peter Mount + by Peter Mount ltree - Tree-like data structures by Teodor Sigaev and Oleg Bartunov -oid2name - +oid2name - Maps numeric files to table names by B Palmer @@ -161,7 +161,7 @@ sslinfo - Functions to get information about SSL certificates by Victor Wagner -start-scripts - +start-scripts - Scripts for starting the server at boot time on various platforms. tablefunc - diff --git a/contrib/btree_gin/Makefile b/contrib/btree_gin/Makefile index cba68af595..8bc53f72da 100644 --- a/contrib/btree_gin/Makefile +++ b/contrib/btree_gin/Makefile @@ -1,7 +1,7 @@ # contrib/btree_gin/Makefile MODULE_big = btree_gin -OBJS = btree_gin.o +OBJS = btree_gin.o DATA_built = btree_gin.sql DATA = uninstall_btree_gin.sql diff --git a/contrib/btree_gin/expected/cidr.out b/contrib/btree_gin/expected/cidr.out index 28ff9195b1..3d1198a4d7 100644 --- a/contrib/btree_gin/expected/cidr.out +++ b/contrib/btree_gin/expected/cidr.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_cidr ( i cidr ); -INSERT INTO test_cidr VALUES +INSERT INTO test_cidr VALUES ( '1.2.3.4' ), ( '1.2.4.4' ), ( '1.2.5.4' ), diff --git a/contrib/btree_gin/expected/date.out b/contrib/btree_gin/expected/date.out index 8da6ee4843..40dfa308cf 100644 --- a/contrib/btree_gin/expected/date.out +++ b/contrib/btree_gin/expected/date.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_date ( i date ); -INSERT INTO test_date VALUES +INSERT INTO test_date VALUES ( '2004-10-23' ), ( '2004-10-24' ), ( '2004-10-25' ), diff --git a/contrib/btree_gin/expected/inet.out b/contrib/btree_gin/expected/inet.out index bb2eaafc7f..aa6147fb7d 100644 --- a/contrib/btree_gin/expected/inet.out +++ b/contrib/btree_gin/expected/inet.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_inet ( i inet ); -INSERT INTO test_inet VALUES +INSERT INTO test_inet VALUES ( '1.2.3.4/16' ), ( '1.2.4.4/16' ), ( '1.2.5.4/16' ), diff --git a/contrib/btree_gin/expected/interval.out b/contrib/btree_gin/expected/interval.out index a3b99c1f28..1f6ef54070 100644 --- a/contrib/btree_gin/expected/interval.out +++ b/contrib/btree_gin/expected/interval.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_interval ( i interval ); -INSERT INTO test_interval VALUES +INSERT INTO test_interval VALUES ( '03:55:08' ), ( '04:55:08' ), ( '05:55:08' ), diff --git a/contrib/btree_gin/expected/macaddr.out b/contrib/btree_gin/expected/macaddr.out index d26d1f9ad3..ebceb01862 100644 --- a/contrib/btree_gin/expected/macaddr.out +++ b/contrib/btree_gin/expected/macaddr.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_macaddr ( i macaddr ); -INSERT INTO test_macaddr VALUES +INSERT INTO test_macaddr VALUES ( '22:00:5c:03:55:08' ), ( '22:00:5c:04:55:08' ), ( '22:00:5c:05:55:08' ), diff --git a/contrib/btree_gin/expected/time.out b/contrib/btree_gin/expected/time.out index bf65946835..be6b084038 100644 --- a/contrib/btree_gin/expected/time.out +++ b/contrib/btree_gin/expected/time.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_time ( i time ); -INSERT INTO test_time VALUES +INSERT INTO test_time VALUES ( '03:55:08' ), ( '04:55:08' ), ( '05:55:08' ), diff --git a/contrib/btree_gin/expected/timestamp.out b/contrib/btree_gin/expected/timestamp.out index 00b0b66106..a236cdc94a 100644 --- a/contrib/btree_gin/expected/timestamp.out +++ b/contrib/btree_gin/expected/timestamp.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_timestamp ( i timestamp ); -INSERT INTO test_timestamp VALUES +INSERT INTO test_timestamp VALUES ( '2004-10-26 03:55:08' ), ( '2004-10-26 04:55:08' ), ( '2004-10-26 05:55:08' ), diff --git a/contrib/btree_gin/expected/timestamptz.out b/contrib/btree_gin/expected/timestamptz.out index 8550d6b4d9..d53963d2a0 100644 --- a/contrib/btree_gin/expected/timestamptz.out +++ b/contrib/btree_gin/expected/timestamptz.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_timestamptz ( i timestamptz ); -INSERT INTO test_timestamptz VALUES +INSERT INTO test_timestamptz VALUES ( '2004-10-26 03:55:08' ), ( '2004-10-26 04:55:08' ), ( '2004-10-26 05:55:08' ), diff --git a/contrib/btree_gin/expected/timetz.out b/contrib/btree_gin/expected/timetz.out index 184bc310f6..45aee71371 100644 --- a/contrib/btree_gin/expected/timetz.out +++ b/contrib/btree_gin/expected/timetz.out @@ -2,7 +2,7 @@ set enable_seqscan=off; CREATE TABLE test_timetz ( i timetz ); -INSERT INTO test_timetz VALUES +INSERT INTO test_timetz VALUES ( '03:55:08 GMT+2' ), ( '04:55:08 GMT+2' ), ( '05:55:08 GMT+2' ), diff --git a/contrib/btree_gin/sql/cidr.sql b/contrib/btree_gin/sql/cidr.sql index a608a3ec78..4a76e5f10b 100644 --- a/contrib/btree_gin/sql/cidr.sql +++ b/contrib/btree_gin/sql/cidr.sql @@ -4,7 +4,7 @@ CREATE TABLE test_cidr ( i cidr ); -INSERT INTO test_cidr VALUES +INSERT INTO test_cidr VALUES ( '1.2.3.4' ), ( '1.2.4.4' ), ( '1.2.5.4' ), diff --git a/contrib/btree_gin/sql/date.sql b/contrib/btree_gin/sql/date.sql index c486f272a4..35086f6b81 100644 --- a/contrib/btree_gin/sql/date.sql +++ b/contrib/btree_gin/sql/date.sql @@ -4,7 +4,7 @@ CREATE TABLE test_date ( i date ); -INSERT INTO test_date VALUES +INSERT INTO test_date VALUES ( '2004-10-23' ), ( '2004-10-24' ), ( '2004-10-25' ), diff --git a/contrib/btree_gin/sql/inet.sql b/contrib/btree_gin/sql/inet.sql index fadc1c47ec..e5ec087856 100644 --- a/contrib/btree_gin/sql/inet.sql +++ b/contrib/btree_gin/sql/inet.sql @@ -4,7 +4,7 @@ CREATE TABLE test_inet ( i inet ); -INSERT INTO test_inet VALUES +INSERT INTO test_inet VALUES ( '1.2.3.4/16' ), ( '1.2.4.4/16' ), ( '1.2.5.4/16' ), diff --git a/contrib/btree_gin/sql/interval.sql b/contrib/btree_gin/sql/interval.sql index f245e4d4b3..e385158783 100644 --- a/contrib/btree_gin/sql/interval.sql +++ b/contrib/btree_gin/sql/interval.sql @@ -4,7 +4,7 @@ CREATE TABLE test_interval ( i interval ); -INSERT INTO test_interval VALUES +INSERT INTO test_interval VALUES ( '03:55:08' ), ( '04:55:08' ), ( '05:55:08' ), diff --git a/contrib/btree_gin/sql/macaddr.sql b/contrib/btree_gin/sql/macaddr.sql index e0402869a8..66566aa604 100644 --- a/contrib/btree_gin/sql/macaddr.sql +++ b/contrib/btree_gin/sql/macaddr.sql @@ -4,7 +4,7 @@ CREATE TABLE test_macaddr ( i macaddr ); -INSERT INTO test_macaddr VALUES +INSERT INTO test_macaddr VALUES ( '22:00:5c:03:55:08' ), ( '22:00:5c:04:55:08' ), ( '22:00:5c:05:55:08' ), diff --git a/contrib/btree_gin/sql/time.sql b/contrib/btree_gin/sql/time.sql index afb1e16ebf..62d709a846 100644 --- a/contrib/btree_gin/sql/time.sql +++ b/contrib/btree_gin/sql/time.sql @@ -4,7 +4,7 @@ CREATE TABLE test_time ( i time ); -INSERT INTO test_time VALUES +INSERT INTO test_time VALUES ( '03:55:08' ), ( '04:55:08' ), ( '05:55:08' ), diff --git a/contrib/btree_gin/sql/timestamp.sql b/contrib/btree_gin/sql/timestamp.sql index 6e00cd7e40..56727e81c4 100644 --- a/contrib/btree_gin/sql/timestamp.sql +++ b/contrib/btree_gin/sql/timestamp.sql @@ -4,7 +4,7 @@ CREATE TABLE test_timestamp ( i timestamp ); -INSERT INTO test_timestamp VALUES +INSERT INTO test_timestamp VALUES ( '2004-10-26 03:55:08' ), ( '2004-10-26 04:55:08' ), ( '2004-10-26 05:55:08' ), diff --git a/contrib/btree_gin/sql/timestamptz.sql b/contrib/btree_gin/sql/timestamptz.sql index 26c01ef804..e6cfdb1b07 100644 --- a/contrib/btree_gin/sql/timestamptz.sql +++ b/contrib/btree_gin/sql/timestamptz.sql @@ -4,7 +4,7 @@ CREATE TABLE test_timestamptz ( i timestamptz ); -INSERT INTO test_timestamptz VALUES +INSERT INTO test_timestamptz VALUES ( '2004-10-26 03:55:08' ), ( '2004-10-26 04:55:08' ), ( '2004-10-26 05:55:08' ), diff --git a/contrib/btree_gin/sql/timetz.sql b/contrib/btree_gin/sql/timetz.sql index a72b105fc1..ca947b753e 100644 --- a/contrib/btree_gin/sql/timetz.sql +++ b/contrib/btree_gin/sql/timetz.sql @@ -4,7 +4,7 @@ CREATE TABLE test_timetz ( i timetz ); -INSERT INTO test_timetz VALUES +INSERT INTO test_timetz VALUES ( '03:55:08 GMT+2' ), ( '04:55:08 GMT+2' ), ( '05:55:08 GMT+2' ), diff --git a/contrib/btree_gist/btree_gist.sql.in b/contrib/btree_gist/btree_gist.sql.in index 339087018a..01cd30f2de 100644 --- a/contrib/btree_gist/btree_gist.sql.in +++ b/contrib/btree_gist/btree_gist.sql.in @@ -136,7 +136,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_oid_ops -DEFAULT FOR TYPE oid USING gist +DEFAULT FOR TYPE oid USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -194,7 +194,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_int2_ops -DEFAULT FOR TYPE int2 USING gist +DEFAULT FOR TYPE int2 USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -251,7 +251,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_int4_ops -DEFAULT FOR TYPE int4 USING gist +DEFAULT FOR TYPE int4 USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -308,7 +308,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_int8_ops -DEFAULT FOR TYPE int8 USING gist +DEFAULT FOR TYPE int8 USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -366,7 +366,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_float4_ops -DEFAULT FOR TYPE float4 USING gist +DEFAULT FOR TYPE float4 USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -426,7 +426,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_float8_ops -DEFAULT FOR TYPE float8 USING gist +DEFAULT FOR TYPE float8 USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -448,7 +448,7 @@ AS -- -- -- timestamp ops --- +-- -- -- @@ -461,7 +461,7 @@ CREATE OR REPLACE FUNCTION gbt_tstz_consistent(internal,timestamptz,int2,oid,int RETURNS bool AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_ts_compress(internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -476,12 +476,12 @@ CREATE OR REPLACE FUNCTION gbt_ts_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_ts_picksplit(internal, internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_ts_union(bytea, internal) RETURNS gbtreekey16 AS 'MODULE_PATHNAME' @@ -494,7 +494,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_timestamp_ops -DEFAULT FOR TYPE timestamp USING gist +DEFAULT FOR TYPE timestamp USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -514,7 +514,7 @@ AS -- Create the operator class CREATE OPERATOR CLASS gist_timestamptz_ops -DEFAULT FOR TYPE timestamptz USING gist +DEFAULT FOR TYPE timestamptz USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -536,7 +536,7 @@ AS -- -- -- time ops --- +-- -- -- @@ -564,12 +564,12 @@ CREATE OR REPLACE FUNCTION gbt_time_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_time_picksplit(internal, internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_time_union(bytea, internal) RETURNS gbtreekey16 AS 'MODULE_PATHNAME' @@ -582,7 +582,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_time_ops -DEFAULT FOR TYPE time USING gist +DEFAULT FOR TYPE time USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -600,7 +600,7 @@ AS STORAGE gbtreekey16; CREATE OPERATOR CLASS gist_timetz_ops -DEFAULT FOR TYPE timetz USING gist +DEFAULT FOR TYPE timetz USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -622,7 +622,7 @@ AS -- -- -- date ops --- +-- -- -- @@ -640,12 +640,12 @@ CREATE OR REPLACE FUNCTION gbt_date_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_date_picksplit(internal, internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_date_union(bytea, internal) RETURNS gbtreekey8 AS 'MODULE_PATHNAME' @@ -658,7 +658,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_date_ops -DEFAULT FOR TYPE date USING gist +DEFAULT FOR TYPE date USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -680,7 +680,7 @@ AS -- -- -- interval ops --- +-- -- -- @@ -703,12 +703,12 @@ CREATE OR REPLACE FUNCTION gbt_intv_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_intv_picksplit(internal, internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gbt_intv_union(bytea, internal) RETURNS gbtreekey32 AS 'MODULE_PATHNAME' @@ -721,7 +721,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_interval_ops -DEFAULT FOR TYPE interval USING gist +DEFAULT FOR TYPE interval USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -778,7 +778,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_cash_ops -DEFAULT FOR TYPE money USING gist +DEFAULT FOR TYPE money USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -835,7 +835,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_macaddr_ops -DEFAULT FOR TYPE macaddr USING gist +DEFAULT FOR TYPE macaddr USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -904,7 +904,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_text_ops -DEFAULT FOR TYPE text USING gist +DEFAULT FOR TYPE text USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -919,12 +919,12 @@ AS FUNCTION 5 gbt_text_penalty (internal, internal, internal), FUNCTION 6 gbt_text_picksplit (internal, internal), FUNCTION 7 gbt_text_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; ---- Create the operator class CREATE OPERATOR CLASS gist_bpchar_ops -DEFAULT FOR TYPE bpchar USING gist +DEFAULT FOR TYPE bpchar USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -939,7 +939,7 @@ AS FUNCTION 5 gbt_text_penalty (internal, internal, internal), FUNCTION 6 gbt_text_picksplit (internal, internal), FUNCTION 7 gbt_text_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; @@ -982,7 +982,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_bytea_ops -DEFAULT FOR TYPE bytea USING gist +DEFAULT FOR TYPE bytea USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -997,7 +997,7 @@ AS FUNCTION 5 gbt_bytea_penalty (internal, internal, internal), FUNCTION 6 gbt_bytea_picksplit (internal, internal), FUNCTION 7 gbt_bytea_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; -- @@ -1040,7 +1040,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_numeric_ops -DEFAULT FOR TYPE numeric USING gist +DEFAULT FOR TYPE numeric USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -1055,7 +1055,7 @@ AS FUNCTION 5 gbt_numeric_penalty (internal, internal, internal), FUNCTION 6 gbt_numeric_picksplit (internal, internal), FUNCTION 7 gbt_numeric_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; -- -- @@ -1096,7 +1096,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_bit_ops -DEFAULT FOR TYPE bit USING gist +DEFAULT FOR TYPE bit USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -1111,12 +1111,12 @@ AS FUNCTION 5 gbt_bit_penalty (internal, internal, internal), FUNCTION 6 gbt_bit_picksplit (internal, internal), FUNCTION 7 gbt_bit_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; -- Create the operator class CREATE OPERATOR CLASS gist_vbit_ops -DEFAULT FOR TYPE varbit USING gist +DEFAULT FOR TYPE varbit USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -1131,7 +1131,7 @@ AS FUNCTION 5 gbt_bit_penalty (internal, internal, internal), FUNCTION 6 gbt_bit_picksplit (internal, internal), FUNCTION 7 gbt_bit_same (internal, internal, internal), - STORAGE gbtreekey_var; + STORAGE gbtreekey_var; @@ -1175,7 +1175,7 @@ LANGUAGE C IMMUTABLE STRICT; -- Create the operator class CREATE OPERATOR CLASS gist_inet_ops -DEFAULT FOR TYPE inet USING gist +DEFAULT FOR TYPE inet USING gist AS OPERATOR 1 < , OPERATOR 2 <= , @@ -1194,14 +1194,14 @@ AS -- Create the operator class CREATE OPERATOR CLASS gist_cidr_ops -DEFAULT FOR TYPE cidr USING gist +DEFAULT FOR TYPE cidr USING gist AS OPERATOR 1 < (inet, inet) , OPERATOR 2 <= (inet, inet) , OPERATOR 3 = (inet, inet) , OPERATOR 4 >= (inet, inet) , OPERATOR 5 > (inet, inet) , - OPERATOR 6 <> (inet, inet) , + OPERATOR 6 <> (inet, inet) , FUNCTION 1 gbt_inet_consistent (internal, inet, int2, oid, internal), FUNCTION 2 gbt_inet_union (bytea, internal), FUNCTION 3 gbt_inet_compress (internal), diff --git a/contrib/btree_gist/uninstall_btree_gist.sql b/contrib/btree_gist/uninstall_btree_gist.sql index 4163730e85..30b9da4c73 100644 --- a/contrib/btree_gist/uninstall_btree_gist.sql +++ b/contrib/btree_gist/uninstall_btree_gist.sql @@ -116,9 +116,9 @@ DROP OPERATOR CLASS gist_interval_ops USING gist; DROP FUNCTION gbt_intv_same(internal, internal, internal); DROP FUNCTION gbt_intv_union(bytea, internal); - + DROP FUNCTION gbt_intv_picksplit(internal, internal); - + DROP FUNCTION gbt_intv_penalty(internal,internal,internal); DROP FUNCTION gbt_intv_decompress(internal); @@ -132,9 +132,9 @@ DROP OPERATOR CLASS gist_date_ops USING gist; DROP FUNCTION gbt_date_same(internal, internal, internal); DROP FUNCTION gbt_date_union(bytea, internal); - + DROP FUNCTION gbt_date_picksplit(internal, internal); - + DROP FUNCTION gbt_date_penalty(internal,internal,internal); DROP FUNCTION gbt_date_compress(internal); @@ -148,9 +148,9 @@ DROP OPERATOR CLASS gist_time_ops USING gist; DROP FUNCTION gbt_time_same(internal, internal, internal); DROP FUNCTION gbt_time_union(bytea, internal); - + DROP FUNCTION gbt_time_picksplit(internal, internal); - + DROP FUNCTION gbt_time_penalty(internal,internal,internal); DROP FUNCTION gbt_timetz_compress(internal); @@ -168,15 +168,15 @@ DROP OPERATOR CLASS gist_timestamp_ops USING gist; DROP FUNCTION gbt_ts_same(internal, internal, internal); DROP FUNCTION gbt_ts_union(bytea, internal); - + DROP FUNCTION gbt_ts_picksplit(internal, internal); - + DROP FUNCTION gbt_ts_penalty(internal,internal,internal); DROP FUNCTION gbt_tstz_compress(internal); DROP FUNCTION gbt_ts_compress(internal); - + DROP FUNCTION gbt_tstz_consistent(internal,timestamptz,int2,oid,internal); DROP FUNCTION gbt_ts_consistent(internal,timestamp,int2,oid,internal); diff --git a/contrib/citext/citext.sql.in b/contrib/citext/citext.sql.in index 0aef0ad947..1e75b55397 100644 --- a/contrib/citext/citext.sql.in +++ b/contrib/citext/citext.sql.in @@ -343,7 +343,7 @@ CREATE OPERATOR !~~* ( ); -- --- Matching citext to text. +-- Matching citext to text. -- CREATE OR REPLACE FUNCTION texticlike(citext, text) diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out index 21e73be2d7..66ea5ee6ff 100644 --- a/contrib/citext/expected/citext.out +++ b/contrib/citext/expected/citext.out @@ -1046,7 +1046,7 @@ CREATE TABLE caster ( bpchar bpchar, char char, chr "char", - name name, + name name, bytea bytea, boolean boolean, float4 float4, @@ -1055,7 +1055,7 @@ CREATE TABLE caster ( int8 int8, int4 int4, int2 int2, - cidr cidr, + cidr cidr, inet inet, macaddr macaddr, money money, diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out index 5fa537bc19..c5ca1f6c54 100644 --- a/contrib/citext/expected/citext_1.out +++ b/contrib/citext/expected/citext_1.out @@ -1046,7 +1046,7 @@ CREATE TABLE caster ( bpchar bpchar, char char, chr "char", - name name, + name name, bytea bytea, boolean boolean, float4 float4, @@ -1055,7 +1055,7 @@ CREATE TABLE caster ( int8 int8, int4 int4, int2 int2, - cidr cidr, + cidr cidr, inet inet, macaddr macaddr, money money, diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql index 9014e5d931..2f9b46665c 100644 --- a/contrib/citext/sql/citext.sql +++ b/contrib/citext/sql/citext.sql @@ -302,7 +302,7 @@ CREATE TABLE caster ( bpchar bpchar, char char, chr "char", - name name, + name name, bytea bytea, boolean boolean, float4 float4, @@ -311,7 +311,7 @@ CREATE TABLE caster ( int8 int8, int4 int4, int2 int2, - cidr cidr, + cidr cidr, inet inet, macaddr macaddr, money money, diff --git a/contrib/cube/CHANGES b/contrib/cube/CHANGES index d3eca90f6d..7c5590c16f 100644 --- a/contrib/cube/CHANGES +++ b/contrib/cube/CHANGES @@ -6,10 +6,10 @@ Code Cleanup: Update the calling convention for all external facing functions. By external facing, I mean all functions that are directly referenced in cube.sql. Prior -to my update, all functions used the older V0 calling convention. They now +to my update, all functions used the older V0 calling convention. They now use V1. -New Functions: +New Functions: cube(float[]), which makes a zero volume cube from a float array diff --git a/contrib/cube/cube.sql.in b/contrib/cube/cube.sql.in index 3cd199530a..a7e6b1d2b9 100644 --- a/contrib/cube/cube.sql.in +++ b/contrib/cube/cube.sql.in @@ -4,7 +4,7 @@ SET search_path = public; -- Create the user-defined type for N-dimensional boxes --- +-- CREATE OR REPLACE FUNCTION cube_in(cstring) RETURNS cube @@ -268,12 +268,12 @@ AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION g_cube_compress(internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION g_cube_decompress(internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; @@ -288,12 +288,12 @@ AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION g_cube_union(internal, internal) -RETURNS cube +RETURNS cube AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION g_cube_same(cube, cube, internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; diff --git a/contrib/cube/cubeparse.y b/contrib/cube/cubeparse.y index d02941dd8c..9e7c87e903 100644 --- a/contrib/cube/cubeparse.y +++ b/contrib/cube/cubeparse.y @@ -51,7 +51,7 @@ box: O_BRACKET paren_list COMMA paren_list C_BRACKET { int dim; - + dim = delim_count($2, ',') + 1; if ( (delim_count($4, ',') + 1) != dim ) { ereport(ERROR, @@ -69,16 +69,16 @@ box: CUBE_MAX_DIM))); YYABORT; } - + *((void **)result) = write_box( dim, $2, $4 ); - + } | paren_list COMMA paren_list { int dim; dim = delim_count($1, ',') + 1; - + if ( (delim_count($3, ',') + 1) != dim ) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -95,7 +95,7 @@ box: CUBE_MAX_DIM))); YYABORT; } - + *((void **)result) = write_box( dim, $1, $3 ); } | @@ -146,7 +146,7 @@ list: $$ = palloc(scanbuflen + 1); strcpy($$, $1); } - | + | list COMMA CUBEFLOAT { $$ = $1; strcat($$, ","); @@ -169,31 +169,31 @@ delim_count(char *s, char delim) return (ndelim); } -static NDBOX * +static NDBOX * write_box(unsigned int dim, char *str1, char *str2) { NDBOX * bp; char * s; - int i; + int i; int size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2; - + bp = palloc0(size); SET_VARSIZE(bp, size); bp->dim = dim; - + s = str1; bp->x[i=0] = strtod(s, NULL); while ((s = strchr(s, ',')) != NULL) { s++; i++; bp->x[i] = strtod(s, NULL); - } - + } + s = str2; bp->x[i=dim] = strtod(s, NULL); while ((s = strchr(s, ',')) != NULL) { s++; i++; bp->x[i] = strtod(s, NULL); - } + } return(bp); } @@ -206,13 +206,13 @@ write_point_as_box(char *str, int dim) int i, size; double x; char * s = str; - + size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2; bp = palloc0(size); SET_VARSIZE(bp, size); bp->dim = dim; - + i = 0; x = strtod(s, NULL); bp->x[0] = x; @@ -222,7 +222,7 @@ write_point_as_box(char *str, int dim) x = strtod(s, NULL); bp->x[i] = x; bp->x[i+dim] = x; - } + } return(bp); } diff --git a/contrib/cube/cubescan.l b/contrib/cube/cubescan.l index b0e477bf1e..eb71b11adf 100644 --- a/contrib/cube/cubescan.l +++ b/contrib/cube/cubescan.l @@ -1,8 +1,8 @@ %{ -/* -** A scanner for EMP-style numeric ranges +/* + * A scanner for EMP-style numeric ranges * contrib/cube/cubescan.l -*/ + */ #include "postgres.h" diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out index 101a63b723..ae7b5b22c2 100644 --- a/contrib/cube/expected/cube.out +++ b/contrib/cube/expected/cube.out @@ -473,13 +473,13 @@ SELECT cube('{0,1,2}'::float[]); (0, 1, 2) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); cube_subset --------------------------- (5, 3, 1, 1),(8, 7, 6, 6) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); ERROR: Index out of bounds -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. @@ -1107,11 +1107,11 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2); (1 row) -- Load some example data and build the index --- +-- CREATE TABLE test_cube (c cube); \copy test_cube from 'data/test_cube.data' CREATE INDEX test_cube_ix ON test_cube USING gist (c); -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; c -------------------------- (337, 455),(240, 359) @@ -1121,8 +1121,8 @@ SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; (2424, 160),(2424, 81) (5 rows) --- Test sorting -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; +-- Test sorting +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; c -------------------------- (337, 455),(240, 359) diff --git a/contrib/cube/expected/cube_1.out b/contrib/cube/expected/cube_1.out index 55f6861daf..f27e832d63 100644 --- a/contrib/cube/expected/cube_1.out +++ b/contrib/cube/expected/cube_1.out @@ -473,13 +473,13 @@ SELECT cube('{0,1,2}'::float[]); (0, 1, 2) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); cube_subset --------------------------- (5, 3, 1, 1),(8, 7, 6, 6) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); ERROR: Index out of bounds -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. @@ -1107,11 +1107,11 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2); (1 row) -- Load some example data and build the index --- +-- CREATE TABLE test_cube (c cube); \copy test_cube from 'data/test_cube.data' CREATE INDEX test_cube_ix ON test_cube USING gist (c); -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; c -------------------------- (337, 455),(240, 359) @@ -1121,8 +1121,8 @@ SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; (2424, 160),(2424, 81) (5 rows) --- Test sorting -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; +-- Test sorting +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; c -------------------------- (337, 455),(240, 359) diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out index c449395818..f534ccf0b5 100644 --- a/contrib/cube/expected/cube_2.out +++ b/contrib/cube/expected/cube_2.out @@ -473,13 +473,13 @@ SELECT cube('{0,1,2}'::float[]); (0, 1, 2) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); cube_subset --------------------------- (5, 3, 1, 1),(8, 7, 6, 6) (1 row) -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); ERROR: Index out of bounds -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. @@ -1107,11 +1107,11 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2); (1 row) -- Load some example data and build the index --- +-- CREATE TABLE test_cube (c cube); \copy test_cube from 'data/test_cube.data' CREATE INDEX test_cube_ix ON test_cube USING gist (c); -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; c -------------------------- (337, 455),(240, 359) @@ -1121,8 +1121,8 @@ SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; (2424, 160),(2424, 81) (5 rows) --- Test sorting -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; +-- Test sorting +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; c -------------------------- (337, 455),(240, 359) diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql index 1931dfbc80..5c12183dfd 100644 --- a/contrib/cube/sql/cube.sql +++ b/contrib/cube/sql/cube.sql @@ -119,8 +119,8 @@ SELECT cube('{0,1,2}'::float[], '{3,4,5}'::float[]); SELECT cube('{0,1,2}'::float[], '{3}'::float[]); SELECT cube(NULL::float[], '{3}'::float[]); SELECT cube('{0,1,2}'::float[]); -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); +SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. @@ -275,13 +275,13 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -1, 2); SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2); -- Load some example data and build the index --- +-- CREATE TABLE test_cube (c cube); \copy test_cube from 'data/test_cube.data' CREATE INDEX test_cube_ix ON test_cube USING gist (c); -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; --- Test sorting -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; +-- Test sorting +SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile index cc59128cb2..fdfd03a4cf 100644 --- a/contrib/dblink/Makefile +++ b/contrib/dblink/Makefile @@ -6,8 +6,8 @@ OBJS = dblink.o SHLIB_LINK = $(libpq) SHLIB_PREREQS = submake-libpq -DATA_built = dblink.sql -DATA = uninstall_dblink.sql +DATA_built = dblink.sql +DATA = uninstall_dblink.sql REGRESS = dblink diff --git a/contrib/dblink/dblink.sql.in b/contrib/dblink/dblink.sql.in index acad2c94d0..3c9d66e7df 100644 --- a/contrib/dblink/dblink.sql.in +++ b/contrib/dblink/dblink.sql.in @@ -207,7 +207,7 @@ CREATE OR REPLACE FUNCTION dblink_get_notify( OUT notify_name TEXT, OUT be_pid INT4, OUT extra TEXT -) +) RETURNS setof record AS 'MODULE_PATHNAME', 'dblink_get_notify' LANGUAGE C STRICT; @@ -217,7 +217,7 @@ CREATE OR REPLACE FUNCTION dblink_get_notify( OUT notify_name TEXT, OUT be_pid INT4, OUT extra TEXT -) +) RETURNS setof record AS 'MODULE_PATHNAME', 'dblink_get_notify' LANGUAGE C STRICT; diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out index c59a67c737..15848dd922 100644 --- a/contrib/dblink/expected/dblink.out +++ b/contrib/dblink/expected/dblink.out @@ -668,7 +668,7 @@ SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); OK (1 row) -SELECT * from +SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; t1 ---- @@ -681,7 +681,7 @@ SELECT dblink_connect('dtest2', 'dbname=contrib_regression'); OK (1 row) -SELECT * from +SELECT * from dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1; t1 ---- @@ -694,7 +694,7 @@ SELECT dblink_connect('dtest3', 'dbname=contrib_regression'); OK (1 row) -SELECT * from +SELECT * from dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1; t1 ---- @@ -768,7 +768,7 @@ SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); OK (1 row) -SELECT * from +SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; t1 ---- diff --git a/contrib/dblink/sql/dblink.sql b/contrib/dblink/sql/dblink.sql index a6d7811bfc..062bc9ee0e 100644 --- a/contrib/dblink/sql/dblink.sql +++ b/contrib/dblink/sql/dblink.sql @@ -327,15 +327,15 @@ SELECT dblink_disconnect('myconn'); -- test asynchronous queries SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); -SELECT * from +SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; SELECT dblink_connect('dtest2', 'dbname=contrib_regression'); -SELECT * from +SELECT * from dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1; SELECT dblink_connect('dtest3', 'dbname=contrib_regression'); -SELECT * from +SELECT * from dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1; CREATE TEMPORARY TABLE result AS @@ -364,7 +364,7 @@ SELECT dblink_disconnect('dtest3'); SELECT * from result; SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); -SELECT * from +SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; SELECT dblink_cancel_query('dtest1'); diff --git a/contrib/earthdistance/earthdistance.sql.in b/contrib/earthdistance/earthdistance.sql.in index a4799914bd..a4ce812584 100644 --- a/contrib/earthdistance/earthdistance.sql.in +++ b/contrib/earthdistance/earthdistance.sql.in @@ -35,7 +35,7 @@ CREATE DOMAIN earth AS cube CONSTRAINT on_surface check(abs(cube_distance(value, '(0)'::cube) / earth() - 1) < '10e-7'::float8); -CREATE OR REPLACE FUNCTION sec_to_gc(float8) +CREATE OR REPLACE FUNCTION sec_to_gc(float8) RETURNS float8 LANGUAGE SQL IMMUTABLE STRICT @@ -76,7 +76,7 @@ RETURNS cube LANGUAGE SQL IMMUTABLE STRICT AS 'SELECT cube_enlarge($1, gc_to_sec($2), 3)'; - + --------------- geo_distance CREATE OR REPLACE FUNCTION geo_distance (point, point) diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.sql.in b/contrib/fuzzystrmatch/fuzzystrmatch.sql.in index 0e75491cbe..0f2ea85e48 100644 --- a/contrib/fuzzystrmatch/fuzzystrmatch.sql.in +++ b/contrib/fuzzystrmatch/fuzzystrmatch.sql.in @@ -35,10 +35,10 @@ CREATE OR REPLACE FUNCTION difference(text,text) RETURNS int AS 'MODULE_PATHNAME', 'difference' LANGUAGE C IMMUTABLE STRICT; -CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text +CREATE OR REPLACE FUNCTION dmetaphone (text) RETURNS text AS 'MODULE_PATHNAME', 'dmetaphone' LANGUAGE C IMMUTABLE STRICT; -CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text +CREATE OR REPLACE FUNCTION dmetaphone_alt (text) RETURNS text AS 'MODULE_PATHNAME', 'dmetaphone_alt' LANGUAGE C IMMUTABLE STRICT; diff --git a/contrib/hstore/expected/hstore.out b/contrib/hstore/expected/hstore.out index 0ed109203c..19dd299af7 100644 --- a/contrib/hstore/expected/hstore.out +++ b/contrib/hstore/expected/hstore.out @@ -438,7 +438,7 @@ select hstore 'a=>NULL, b=>qq' ?& '{}'::text[]; f (1 row) --- delete +-- delete select delete('a=>1 , b=>2, c=>3'::hstore, 'a'); delete -------------------- diff --git a/contrib/hstore/sql/hstore.sql b/contrib/hstore/sql/hstore.sql index 76f742299e..58a7967526 100644 --- a/contrib/hstore/sql/hstore.sql +++ b/contrib/hstore/sql/hstore.sql @@ -97,7 +97,7 @@ select hstore 'a=>NULL, b=>qq' ?& ARRAY['c','a']; select hstore 'a=>NULL, b=>qq' ?& ARRAY['c','d']; select hstore 'a=>NULL, b=>qq' ?& '{}'::text[]; --- delete +-- delete select delete('a=>1 , b=>2, c=>3'::hstore, 'a'); select delete('a=>null , b=>2, c=>3'::hstore, 'a'); diff --git a/contrib/intarray/Makefile b/contrib/intarray/Makefile index 18340f9d71..a10d7c6b1f 100644 --- a/contrib/intarray/Makefile +++ b/contrib/intarray/Makefile @@ -1,7 +1,7 @@ # contrib/intarray/Makefile MODULE_big = _int -OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o +OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o DATA_built = _int.sql DATA = uninstall__int.sql REGRESS = _int diff --git a/contrib/intarray/bench/bench.pl b/contrib/intarray/bench/bench.pl index 1887211989..4e18624b9c 100755 --- a/contrib/intarray/bench/bench.pl +++ b/contrib/intarray/bench/bench.pl @@ -1,4 +1,4 @@ -#!/usr/bin/perl +#!/usr/bin/perl use strict; # make sure we are in a sane environment. @@ -14,16 +14,16 @@ if ( !( scalar %opt && defined $opt{s} ) ) { print <{mid}\t$_->{sections}\n"; } -} +} print sprintf("total: %.02f sec; number: %d; for one: %.03f sec; found %d docs\n", $elapsed, $b, $elapsed/$b, $count+1 ); $dbi -> disconnect; sub exec_sql { my ($dbi, $sql, @keys) = @_; my $sth=$dbi->prepare($sql) || die; - $sth->execute( @keys ) || die; - my $r; + $sth->execute( @keys ) || die; + my $r; my @row; while ( defined ( $r=$sth->fetchrow_hashref ) ) { push @row, $r; - } - $sth->finish; + } + $sth->finish; return @row; } diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl index 3a5e96301b..67394f87b7 100755 --- a/contrib/intarray/bench/create_test.pl +++ b/contrib/intarray/bench/create_test.pl @@ -9,7 +9,7 @@ create table message ( sections int[] ); create table message_section_map ( - mid int not null, + mid int not null, sid int not null ); @@ -66,7 +66,7 @@ unlink 'message.tmp', 'message_section_map.tmp'; sub copytable { my $t = shift; - + print "COPY $t from stdin;\n"; open( FFF, "$t.tmp") || die; while() { print; } diff --git a/contrib/isn/ISBN.h b/contrib/isn/ISBN.h index 6e6d95b09f..c0301ced1e 100644 --- a/contrib/isn/ISBN.h +++ b/contrib/isn/ISBN.h @@ -32,7 +32,7 @@ * For ISBN with prefix 978 * Range Table as of 2010-Jul-29 */ - + /* where the digit set begins, and how many of them are in the table */ const unsigned ISBN_index[10][2] = { {0, 6}, diff --git a/contrib/ltree/ltree.sql.in b/contrib/ltree/ltree.sql.in index 4ea6277c57..1b985a7a99 100644 --- a/contrib/ltree/ltree.sql.in +++ b/contrib/ltree/ltree.sql.in @@ -482,18 +482,18 @@ CREATE OR REPLACE FUNCTION ltree_gist_in(cstring) RETURNS ltree_gist AS 'MODULE_PATHNAME' LANGUAGE C STRICT; - + CREATE OR REPLACE FUNCTION ltree_gist_out(ltree_gist) RETURNS cstring AS 'MODULE_PATHNAME' LANGUAGE C STRICT; - + CREATE TYPE ltree_gist ( internallength = -1, input = ltree_gist_in, output = ltree_gist_out, storage = plain -); +); CREATE OR REPLACE FUNCTION ltree_consistent(internal,internal,int2,oid,internal) diff --git a/contrib/ltree/uninstall_ltree.sql b/contrib/ltree/uninstall_ltree.sql index 07ce1189b5..2e10b10e97 100644 --- a/contrib/ltree/uninstall_ltree.sql +++ b/contrib/ltree/uninstall_ltree.sql @@ -110,7 +110,7 @@ DROP FUNCTION ltree_compress(internal); DROP FUNCTION ltree_consistent(internal,internal,int2,oid,internal); DROP TYPE ltree_gist CASCADE; - + DROP OPERATOR ^@ (ltxtquery, ltree); DROP OPERATOR ^@ (ltree, ltxtquery); diff --git a/contrib/pg_buffercache/Makefile b/contrib/pg_buffercache/Makefile index 6a47a2241e..ffcf0c3b92 100644 --- a/contrib/pg_buffercache/Makefile +++ b/contrib/pg_buffercache/Makefile @@ -1,10 +1,10 @@ # contrib/pg_buffercache/Makefile MODULE_big = pg_buffercache -OBJS = pg_buffercache_pages.o +OBJS = pg_buffercache_pages.o -DATA_built = pg_buffercache.sql -DATA = uninstall_pg_buffercache.sql +DATA_built = pg_buffercache.sql +DATA = uninstall_pg_buffercache.sql ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_buffercache/pg_buffercache.sql.in b/contrib/pg_buffercache/pg_buffercache.sql.in index b23e94ed12..88b5e643ac 100644 --- a/contrib/pg_buffercache/pg_buffercache.sql.in +++ b/contrib/pg_buffercache/pg_buffercache.sql.in @@ -12,9 +12,9 @@ LANGUAGE C; -- Create a view for convenient access. CREATE VIEW pg_buffercache AS SELECT P.* FROM pg_buffercache_pages() AS P - (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid, + (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid, relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2); - + -- Don't want these to be available at public. REVOKE ALL ON FUNCTION pg_buffercache_pages() FROM PUBLIC; REVOKE ALL ON pg_buffercache FROM PUBLIC; diff --git a/contrib/pg_freespacemap/Makefile b/contrib/pg_freespacemap/Makefile index da335a86ca..65539d5d71 100644 --- a/contrib/pg_freespacemap/Makefile +++ b/contrib/pg_freespacemap/Makefile @@ -1,10 +1,10 @@ # contrib/pg_freespacemap/Makefile MODULE_big = pg_freespacemap -OBJS = pg_freespacemap.o +OBJS = pg_freespacemap.o -DATA_built = pg_freespacemap.sql -DATA = uninstall_pg_freespacemap.sql +DATA_built = pg_freespacemap.sql +DATA = uninstall_pg_freespacemap.sql ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_trgm/pg_trgm.sql.in b/contrib/pg_trgm/pg_trgm.sql.in index b1f094ab40..cce6cd9872 100644 --- a/contrib/pg_trgm/pg_trgm.sql.in +++ b/contrib/pg_trgm/pg_trgm.sql.in @@ -59,7 +59,7 @@ CREATE OR REPLACE FUNCTION gtrgm_consistent(internal,text,int,oid,internal) RETURNS bool AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; - + CREATE OR REPLACE FUNCTION gtrgm_compress(internal) RETURNS internal AS 'MODULE_PATHNAME' diff --git a/contrib/pg_trgm/uninstall_pg_trgm.sql b/contrib/pg_trgm/uninstall_pg_trgm.sql index 239cd85b5b..6706dd133e 100644 --- a/contrib/pg_trgm/uninstall_pg_trgm.sql +++ b/contrib/pg_trgm/uninstall_pg_trgm.sql @@ -16,7 +16,7 @@ DROP FUNCTION gtrgm_penalty(internal,internal,internal); DROP FUNCTION gtrgm_decompress(internal); DROP FUNCTION gtrgm_compress(internal); - + DROP FUNCTION gtrgm_consistent(internal,text,int,oid,internal); DROP TYPE gtrgm CASCADE; diff --git a/contrib/pg_upgrade/IMPLEMENTATION b/contrib/pg_upgrade/IMPLEMENTATION index bbd36ac9e9..a0cfcf15da 100644 --- a/contrib/pg_upgrade/IMPLEMENTATION +++ b/contrib/pg_upgrade/IMPLEMENTATION @@ -13,7 +13,7 @@ old data. If you have a lot of data, that can take a considerable amount of time. If you have too much data, you may have to buy more storage since you need enough room to hold the original data plus the exported data. pg_upgrade can reduce the amount of time and disk space required -for many upgrades. +for many upgrades. The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a presentation about pg_upgrade internals that mirrors the text diff --git a/contrib/pg_upgrade/TESTING b/contrib/pg_upgrade/TESTING index 88adfea276..85de8da7f7 100644 --- a/contrib/pg_upgrade/TESTING +++ b/contrib/pg_upgrade/TESTING @@ -35,10 +35,10 @@ Here are the steps needed to create a regression database dump file: b) For pre-9.0, remove 'regex_flavor' f) For pre-9.0, adjust extra_float_digits - Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0 - databases, and extra_float_digits=-3 for >= 9.0 databases. - It is necessary to modify 9.0 pg_dump to always use -3, and - modify the pre-9.0 old server to accept extra_float_digits=-3. + Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0 + databases, and extra_float_digits=-3 for >= 9.0 databases. + It is necessary to modify 9.0 pg_dump to always use -3, and + modify the pre-9.0 old server to accept extra_float_digits=-3. Once the dump is created, it can be repeatedly loaded into the old database, upgraded, and dumped out of the new database, and then @@ -52,7 +52,7 @@ steps: 3) Create the regression database in the old server. -4) Load the dump file created above into the regression database; +4) Load the dump file created above into the regression database; check for errors while loading. 5) Upgrade the old database to the new major version, as outlined in diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c index 7b73b5e91f..4ded5d9d99 100644 --- a/contrib/pg_upgrade/relfilenode.c +++ b/contrib/pg_upgrade/relfilenode.c @@ -171,7 +171,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, namelist[fileno]->d_name); snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir, maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '_')); - + unlink(new_file); transfer_relfile(pageConverter, old_file, new_file, maps[mapnum].old_nspname, maps[mapnum].old_relname, diff --git a/contrib/pgcrypto/expected/blowfish.out b/contrib/pgcrypto/expected/blowfish.out index 86c3244cec..72557ea161 100644 --- a/contrib/pgcrypto/expected/blowfish.out +++ b/contrib/pgcrypto/expected/blowfish.out @@ -108,7 +108,7 @@ decode('37363534333231204e6f77206973207468652074696d6520666f722000', 'hex'), 3ea6357a0ee7fad6d0c4b63464f2aafa40c2e91b4b7e1bba8114932fd92b5c8f111e7e50e7b2e541 (1 row) --- blowfish-448 +-- blowfish-448 SELECT encode(encrypt( decode('fedcba9876543210', 'hex'), decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'), @@ -120,21 +120,21 @@ decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f58402364 -- result: c04504012e4e1f53 -- empty data -select encode( encrypt('', 'foo', 'bf'), 'hex'); +select encode(encrypt('', 'foo', 'bf'), 'hex'); encode ------------------ 1871949bb2311c8e (1 row) -- 10 bytes key -select encode( encrypt('foo', '0123456789', 'bf'), 'hex'); +select encode(encrypt('foo', '0123456789', 'bf'), 'hex'); encode ------------------ 42f58af3b2c03f46 (1 row) -- 22 bytes key -select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex'); +select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex'); encode ------------------ 86ab6f0bc72b5f22 diff --git a/contrib/pgcrypto/expected/crypt-blowfish.out b/contrib/pgcrypto/expected/crypt-blowfish.out index 8a8b007181..329d78f625 100644 --- a/contrib/pgcrypto/expected/crypt-blowfish.out +++ b/contrib/pgcrypto/expected/crypt-blowfish.out @@ -17,7 +17,7 @@ CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); UPDATE ctest SET salt = gen_salt('bf', 8); UPDATE ctest SET res = crypt(data, salt); -SELECT res = crypt(data, res) AS "worked" +SELECT res = crypt(data, res) AS "worked" FROM ctest; worked -------- diff --git a/contrib/pgcrypto/expected/rijndael.out b/contrib/pgcrypto/expected/rijndael.out index 106181ef22..14b2650c32 100644 --- a/contrib/pgcrypto/expected/rijndael.out +++ b/contrib/pgcrypto/expected/rijndael.out @@ -70,21 +70,21 @@ decode('000102030405060708090a0b0c0d0e0f101112131415161718191a1b', 'hex'), (1 row) -- empty data -select encode( encrypt('', 'foo', 'aes'), 'hex'); +select encode(encrypt('', 'foo', 'aes'), 'hex'); encode ---------------------------------- b48cc3338a2eb293b6007ef72c360d48 (1 row) -- 10 bytes key -select encode( encrypt('foo', '0123456789', 'aes'), 'hex'); +select encode(encrypt('foo', '0123456789', 'aes'), 'hex'); encode ---------------------------------- f397f03d2819b7172b68d0706fda4693 (1 row) -- 22 bytes key -select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex'); +select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex'); encode ---------------------------------- 5c9db77af02b4678117bcd8a71ae7f53 @@ -105,7 +105,7 @@ select encode(encrypt_iv('foo', '0123456', 'abcd', 'aes'), 'hex'); (1 row) select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'), - '0123456', 'abcd', 'aes'); + '0123456', 'abcd', 'aes'); decrypt_iv ------------ foo diff --git a/contrib/pgcrypto/rijndael.tbl b/contrib/pgcrypto/rijndael.tbl index 8ea62eae1b..c7610c0134 100644 --- a/contrib/pgcrypto/rijndael.tbl +++ b/contrib/pgcrypto/rijndael.tbl @@ -1133,6 +1133,6 @@ static const u4byte il_tab[4][256] = { }; static const u4byte rco_tab[10] = { - 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, + 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010, 0x00000020, 0x00000040, 0x00000080, 0x0000001b, 0x00000036 }; diff --git a/contrib/pgcrypto/sql/blowfish.sql b/contrib/pgcrypto/sql/blowfish.sql index 951cbc0519..ba8df41c68 100644 --- a/contrib/pgcrypto/sql/blowfish.sql +++ b/contrib/pgcrypto/sql/blowfish.sql @@ -66,7 +66,7 @@ decode('6b77b4d63006dee605b156e27403979358deb9e7154616d959f1652bd5ff92cc', 'hex' decode('37363534333231204e6f77206973207468652074696d6520666f722000', 'hex'), 'bf-cbc'), 'hex'); --- blowfish-448 +-- blowfish-448 SELECT encode(encrypt( decode('fedcba9876543210', 'hex'), decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f584023641aba61761f1f1f1f0e0e0e0effffffffffffffff', 'hex'), @@ -74,11 +74,11 @@ decode('f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455667704689104c2fd3b2f58402364 -- result: c04504012e4e1f53 -- empty data -select encode( encrypt('', 'foo', 'bf'), 'hex'); +select encode(encrypt('', 'foo', 'bf'), 'hex'); -- 10 bytes key -select encode( encrypt('foo', '0123456789', 'bf'), 'hex'); +select encode(encrypt('foo', '0123456789', 'bf'), 'hex'); -- 22 bytes key -select encode( encrypt('foo', '0123456789012345678901', 'bf'), 'hex'); +select encode(encrypt('foo', '0123456789012345678901', 'bf'), 'hex'); -- decrypt select decrypt(encrypt('foo', '0123456', 'bf'), '0123456', 'bf'); diff --git a/contrib/pgcrypto/sql/crypt-blowfish.sql b/contrib/pgcrypto/sql/crypt-blowfish.sql index b89dfd22b7..60c1140055 100644 --- a/contrib/pgcrypto/sql/crypt-blowfish.sql +++ b/contrib/pgcrypto/sql/crypt-blowfish.sql @@ -11,7 +11,7 @@ INSERT INTO ctest VALUES ('password', '', ''); UPDATE ctest SET salt = gen_salt('bf', 8); UPDATE ctest SET res = crypt(data, salt); -SELECT res = crypt(data, res) AS "worked" +SELECT res = crypt(data, res) AS "worked" FROM ctest; DROP TABLE ctest; diff --git a/contrib/pgcrypto/sql/rijndael.sql b/contrib/pgcrypto/sql/rijndael.sql index 41595074bc..bfbf95d39b 100644 --- a/contrib/pgcrypto/sql/rijndael.sql +++ b/contrib/pgcrypto/sql/rijndael.sql @@ -44,11 +44,11 @@ decode('000102030405060708090a0b0c0d0e0f101112131415161718191a1b', 'hex'), 'aes-cbc'), 'hex'); -- empty data -select encode( encrypt('', 'foo', 'aes'), 'hex'); +select encode(encrypt('', 'foo', 'aes'), 'hex'); -- 10 bytes key -select encode( encrypt('foo', '0123456789', 'aes'), 'hex'); +select encode(encrypt('foo', '0123456789', 'aes'), 'hex'); -- 22 bytes key -select encode( encrypt('foo', '0123456789012345678901', 'aes'), 'hex'); +select encode(encrypt('foo', '0123456789012345678901', 'aes'), 'hex'); -- decrypt select decrypt(encrypt('foo', '0123456', 'aes'), '0123456', 'aes'); @@ -56,7 +56,7 @@ select decrypt(encrypt('foo', '0123456', 'aes'), '0123456', 'aes'); -- iv select encode(encrypt_iv('foo', '0123456', 'abcd', 'aes'), 'hex'); select decrypt_iv(decode('2c24cb7da91d6d5699801268b0f5adad', 'hex'), - '0123456', 'abcd', 'aes'); + '0123456', 'abcd', 'aes'); -- long message select encode(encrypt('Lets try a longer message.', '0123456789', 'aes'), 'hex'); diff --git a/contrib/seg/expected/seg.out b/contrib/seg/expected/seg.out index bd099a222c..17c803e50e 100644 --- a/contrib/seg/expected/seg.out +++ b/contrib/seg/expected/seg.out @@ -924,7 +924,7 @@ SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; (1 row) -- Load some example data and build the index --- +-- CREATE TABLE test_seg (s seg); \copy test_seg from 'data/test_seg.data' CREATE INDEX test_seg_ix ON test_seg USING gist (s); @@ -934,7 +934,7 @@ SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; 143 (1 row) --- Test sorting +-- Test sorting SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; s ----------------- diff --git a/contrib/seg/expected/seg_1.out b/contrib/seg/expected/seg_1.out index c92cd83510..a4cca8b391 100644 --- a/contrib/seg/expected/seg_1.out +++ b/contrib/seg/expected/seg_1.out @@ -924,7 +924,7 @@ SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; (1 row) -- Load some example data and build the index --- +-- CREATE TABLE test_seg (s seg); \copy test_seg from 'data/test_seg.data' CREATE INDEX test_seg_ix ON test_seg USING gist (s); @@ -934,7 +934,7 @@ SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; 143 (1 row) --- Test sorting +-- Test sorting SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; s ----------------- diff --git a/contrib/seg/seg.sql.in b/contrib/seg/seg.sql.in index 2713c4a8dc..9bd747656c 100644 --- a/contrib/seg/seg.sql.in +++ b/contrib/seg/seg.sql.in @@ -4,7 +4,7 @@ SET search_path = public; -- Create the user-defined type for 1-D floating point intervals (seg) --- +-- CREATE OR REPLACE FUNCTION seg_in(cstring) RETURNS seg @@ -333,12 +333,12 @@ AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION gseg_compress(internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION gseg_decompress(internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; @@ -353,12 +353,12 @@ AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION gseg_union(internal, internal) -RETURNS seg +RETURNS seg AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; CREATE OR REPLACE FUNCTION gseg_same(seg, seg, internal) -RETURNS internal +RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; @@ -375,7 +375,7 @@ CREATE OPERATOR CLASS seg_ops FUNCTION 1 seg_cmp(seg, seg); CREATE OPERATOR CLASS gist_seg_ops -DEFAULT FOR TYPE seg USING gist +DEFAULT FOR TYPE seg USING gist AS OPERATOR 1 << , OPERATOR 2 &< , diff --git a/contrib/seg/segparse.y b/contrib/seg/segparse.y index ca351c661b..1f5f0affe8 100644 --- a/contrib/seg/segparse.y +++ b/contrib/seg/segparse.y @@ -1,6 +1,6 @@ %{ #define YYPARSE_PARAM result /* need this to pass a pointer (void *) to yyparse */ - + #include "postgres.h" #include @@ -23,7 +23,7 @@ extern int seg_yylex(void); extern int significant_digits(char *str); /* defined in seg.c */ - + void seg_yyerror(const char *message); int seg_yyparse(void *result); @@ -126,7 +126,7 @@ boundary: $$.sigd = significant_digits($1); $$.val = val; } - | + | EXTENSION SEGFLOAT { /* temp variable avoids a gcc 3.3.x bug on Sparc64 */ float val = seg_atof($2); diff --git a/contrib/seg/segscan.l b/contrib/seg/segscan.l index 36da5fa395..c2b5ca8789 100644 --- a/contrib/seg/segscan.l +++ b/contrib/seg/segscan.l @@ -1,7 +1,7 @@ %{ -/* -** A scanner for EMP-style numeric ranges -*/ +/* + * A scanner for EMP-style numeric ranges + */ #include "postgres.h" diff --git a/contrib/seg/sort-segments.pl b/contrib/seg/sort-segments.pl index 1205d3b972..62cdfb1ffd 100755 --- a/contrib/seg/sort-segments.pl +++ b/contrib/seg/sort-segments.pl @@ -7,7 +7,7 @@ while (<>) { push @rows, $_; } -foreach ( sort { +foreach ( sort { @ar = split("\t", $a); $valA = pop @ar; $valA =~ s/[~<> ]+//g; diff --git a/contrib/seg/sql/seg.sql b/contrib/seg/sql/seg.sql index 61ad519613..b8a29d659a 100644 --- a/contrib/seg/sql/seg.sql +++ b/contrib/seg/sql/seg.sql @@ -213,7 +213,7 @@ SELECT '-1'::seg <@ '-1 .. 1'::seg AS bool; SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; -- Load some example data and build the index --- +-- CREATE TABLE test_seg (s seg); \copy test_seg from 'data/test_seg.data' @@ -221,7 +221,7 @@ CREATE TABLE test_seg (s seg); CREATE INDEX test_seg_ix ON test_seg USING gist (s); SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; --- Test sorting +-- Test sorting SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; -- Test functions diff --git a/contrib/spi/autoinc.example b/contrib/spi/autoinc.example index a2f470dc2d..08880ce5fa 100644 --- a/contrib/spi/autoinc.example +++ b/contrib/spi/autoinc.example @@ -8,9 +8,9 @@ CREATE TABLE ids ( idesc text ); -CREATE TRIGGER ids_nextid +CREATE TRIGGER ids_nextid BEFORE INSERT OR UPDATE ON ids - FOR EACH ROW + FOR EACH ROW EXECUTE PROCEDURE autoinc (id, next_id); INSERT INTO ids VALUES (0, 'first (-2 ?)'); @@ -19,11 +19,11 @@ INSERT INTO ids(idesc) VALUES ('third (1 ?!)'); SELECT * FROM ids; -UPDATE ids SET id = null, idesc = 'first: -2 --> 2' +UPDATE ids SET id = null, idesc = 'first: -2 --> 2' WHERE idesc = 'first (-2 ?)'; -UPDATE ids SET id = 0, idesc = 'second: -1 --> 3' +UPDATE ids SET id = 0, idesc = 'second: -1 --> 3' WHERE id = -1; -UPDATE ids SET id = 4, idesc = 'third: 1 --> 4' +UPDATE ids SET id = 4, idesc = 'third: 1 --> 4' WHERE id = 1; SELECT * FROM ids; diff --git a/contrib/spi/autoinc.sql.in b/contrib/spi/autoinc.sql.in index d38c9df2d4..1fa322f9c7 100644 --- a/contrib/spi/autoinc.sql.in +++ b/contrib/spi/autoinc.sql.in @@ -3,7 +3,7 @@ -- Adjust this setting to control where the objects get created. SET search_path = public; -CREATE OR REPLACE FUNCTION autoinc() -RETURNS trigger +CREATE OR REPLACE FUNCTION autoinc() +RETURNS trigger AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/contrib/spi/insert_username.example b/contrib/spi/insert_username.example index a9d23fb2ad..2c1eeb0e0d 100644 --- a/contrib/spi/insert_username.example +++ b/contrib/spi/insert_username.example @@ -7,7 +7,7 @@ CREATE TABLE username_test ( CREATE TRIGGER insert_usernames BEFORE INSERT OR UPDATE ON username_test - FOR EACH ROW + FOR EACH ROW EXECUTE PROCEDURE insert_username (username); INSERT INTO username_test VALUES ('nothing'); diff --git a/contrib/spi/insert_username.sql.in b/contrib/spi/insert_username.sql.in index f06cc0cb5a..bdc2deb340 100644 --- a/contrib/spi/insert_username.sql.in +++ b/contrib/spi/insert_username.sql.in @@ -3,7 +3,7 @@ -- Adjust this setting to control where the objects get created. SET search_path = public; -CREATE OR REPLACE FUNCTION insert_username() -RETURNS trigger +CREATE OR REPLACE FUNCTION insert_username() +RETURNS trigger AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/contrib/spi/moddatetime.example b/contrib/spi/moddatetime.example index e4a713c12a..65af388214 100644 --- a/contrib/spi/moddatetime.example +++ b/contrib/spi/moddatetime.example @@ -8,7 +8,7 @@ CREATE TABLE mdt ( CREATE TRIGGER mdt_moddatetime BEFORE UPDATE ON mdt - FOR EACH ROW + FOR EACH ROW EXECUTE PROCEDURE moddatetime (moddate); INSERT INTO mdt VALUES (1, 'first'); diff --git a/contrib/spi/refint.example b/contrib/spi/refint.example index 1300e81654..d0ff744164 100644 --- a/contrib/spi/refint.example +++ b/contrib/spi/refint.example @@ -20,11 +20,11 @@ CREATE INDEX CI ON C (REFC); --Trigger for table A: CREATE TRIGGER AT BEFORE DELETE OR UPDATE ON A FOR EACH ROW -EXECUTE PROCEDURE +EXECUTE PROCEDURE check_foreign_key (2, 'cascade', 'ID', 'B', 'REFB', 'C', 'REFC'); /* 2 - means that check must be performed for foreign keys of 2 tables. -cascade - defines that corresponding keys must be deleted. +cascade - defines that corresponding keys must be deleted. ID - name of primary key column in triggered table (A). You may use as many columns as you need. B - name of (first) table with foreign keys. @@ -38,11 +38,11 @@ REFC - name of foreign key column in this table. --Trigger for table B: CREATE TRIGGER BT BEFORE INSERT OR UPDATE ON B FOR EACH ROW -EXECUTE PROCEDURE +EXECUTE PROCEDURE check_primary_key ('REFB', 'A', 'ID'); /* -REFB - name of foreign key column in triggered (B) table. You may use as +REFB - name of foreign key column in triggered (B) table. You may use as many columns as you need, but number of key columns in referenced table must be the same. A - referenced table name. @@ -52,7 +52,7 @@ ID - name of primary key column in referenced table. --Trigger for table C: CREATE TRIGGER CT BEFORE INSERT OR UPDATE ON C FOR EACH ROW -EXECUTE PROCEDURE +EXECUTE PROCEDURE check_primary_key ('REFC', 'A', 'ID'); -- Now try diff --git a/contrib/spi/timetravel.example b/contrib/spi/timetravel.example index 1769e48154..35a7f65408 100644 --- a/contrib/spi/timetravel.example +++ b/contrib/spi/timetravel.example @@ -1,8 +1,8 @@ drop table tttest; create table tttest ( - price_id int4, - price_val int4, + price_id int4, + price_val int4, price_on abstime, price_off abstime ); @@ -12,17 +12,17 @@ alter table tttest add column q1 text; alter table tttest add column q2 int; alter table tttest drop column q1; -create trigger timetravel +create trigger timetravel before insert or delete or update on tttest - for each row - execute procedure + for each row + execute procedure timetravel (price_on, price_off); insert into tttest values (1, 1, null, null); insert into tttest(price_id, price_val) values (2, 2); insert into tttest(price_id, price_val,price_off) values (3, 3, 'infinity'); -insert into tttest(price_id, price_val,price_off) values (4, 4, +insert into tttest(price_id, price_val,price_off) values (4, 4, abstime('now'::timestamp - '100 days'::interval)); insert into tttest(price_id, price_val,price_on) values (3, 3, 'infinity'); -- duplicate key @@ -62,7 +62,7 @@ select set_timetravel('tttest', 1); -- turn TT ON! select get_timetravel('tttest'); -- check status -- we want to correct some date -update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and +update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and price_off <> 'infinity'; -- but this doesn't work @@ -71,11 +71,11 @@ select set_timetravel('tttest', 0); -- turn TT OFF! select get_timetravel('tttest'); -- check status -update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and +update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and price_off <> 'infinity'; select * from tttest; -- isn't it what we need ? -- get price for price_id == 5 as it was '10-Jan-1990' -select * from tttest where price_id = 5 and +select * from tttest where price_id = 5 and price_on <= '10-Jan-1990' and price_off > '10-Jan-1990'; diff --git a/contrib/spi/timetravel.sql.in b/contrib/spi/timetravel.sql.in index 4c64f211d9..83dc958a88 100644 --- a/contrib/spi/timetravel.sql.in +++ b/contrib/spi/timetravel.sql.in @@ -3,17 +3,17 @@ -- Adjust this setting to control where the objects get created. SET search_path = public; -CREATE OR REPLACE FUNCTION timetravel() -RETURNS trigger +CREATE OR REPLACE FUNCTION timetravel() +RETURNS trigger AS 'MODULE_PATHNAME' LANGUAGE C; -CREATE OR REPLACE FUNCTION set_timetravel(name, int4) -RETURNS int4 +CREATE OR REPLACE FUNCTION set_timetravel(name, int4) +RETURNS int4 AS 'MODULE_PATHNAME' LANGUAGE C RETURNS NULL ON NULL INPUT; -CREATE OR REPLACE FUNCTION get_timetravel(name) -RETURNS int4 +CREATE OR REPLACE FUNCTION get_timetravel(name) +RETURNS int4 AS 'MODULE_PATHNAME' LANGUAGE C RETURNS NULL ON NULL INPUT; diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL index 65150d0fd5..58e69bc784 100755 --- a/contrib/start-scripts/osx/PostgreSQL +++ b/contrib/start-scripts/osx/PostgreSQL @@ -30,9 +30,9 @@ # # Created by David Wheeler, 2002. -# modified by Ray Aspeitia 12-03-2003 : +# modified by Ray Aspeitia 12-03-2003 : # added log rotation script to db startup -# modified StartupParameters.plist "Provides" parameter to make it easier to +# modified StartupParameters.plist "Provides" parameter to make it easier to # start and stop with the SystemStarter utitlity # use the below command in order to correctly start/stop/restart PG with log rotation script: diff --git a/contrib/test_parser/expected/test_parser.out b/contrib/test_parser/expected/test_parser.out index 600086c4ae..3d0fd4210f 100644 --- a/contrib/test_parser/expected/test_parser.out +++ b/contrib/test_parser/expected/test_parser.out @@ -41,7 +41,7 @@ SELECT to_tsquery('testcfg', 'star'); 'star' (1 row) -SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', +SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', to_tsquery('testcfg', 'stars')); ts_headline ----------------------------------------------------------------- diff --git a/contrib/test_parser/sql/test_parser.sql b/contrib/test_parser/sql/test_parser.sql index f43d4c7e09..97c2cb5a5d 100644 --- a/contrib/test_parser/sql/test_parser.sql +++ b/contrib/test_parser/sql/test_parser.sql @@ -22,5 +22,5 @@ SELECT to_tsvector('testcfg','That''s my first own parser'); SELECT to_tsquery('testcfg', 'star'); -SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', +SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', to_tsquery('testcfg', 'stars')); diff --git a/contrib/tsearch2/expected/tsearch2.out b/contrib/tsearch2/expected/tsearch2.out index 8674337e52..18b591e0aa 100644 --- a/contrib/tsearch2/expected/tsearch2.out +++ b/contrib/tsearch2/expected/tsearch2.out @@ -815,13 +815,13 @@ SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://ae 53 (1 row) -select to_tsquery('english', 'qwe & sKies '); +select to_tsquery('english', 'qwe & sKies '); to_tsquery --------------- 'qwe' & 'sky' (1 row) -select to_tsquery('simple', 'qwe & sKies '); +select to_tsquery('simple', 'qwe & sKies '); to_tsquery ----------------- 'qwe' & 'skies' @@ -2337,7 +2337,6 @@ Upon a woman s face. E. J. Pratt (1882 1964) The granite features of this cliff (1 row) - select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -2354,7 +2353,6 @@ Upon a woman s face. E. J. Pratt (1882 1964) The granite features of this cliff (1 row) - select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -2382,7 +2380,7 @@ ff-bg document.write(15); -', +', to_tsquery('sea&foo'), 'HighlightAll=true'); headline ----------------------------------------------------------------------------- diff --git a/contrib/tsearch2/expected/tsearch2_1.out b/contrib/tsearch2/expected/tsearch2_1.out index a26c5162d1..f7cb0963b8 100644 --- a/contrib/tsearch2/expected/tsearch2_1.out +++ b/contrib/tsearch2/expected/tsearch2_1.out @@ -815,13 +815,13 @@ SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://ae 53 (1 row) -select to_tsquery('english', 'qwe & sKies '); +select to_tsquery('english', 'qwe & sKies '); to_tsquery --------------- 'qwe' & 'sky' (1 row) -select to_tsquery('simple', 'qwe & sKies '); +select to_tsquery('simple', 'qwe & sKies '); to_tsquery ----------------- 'qwe' & 'skies' @@ -2337,7 +2337,6 @@ Upon a woman s face. E. J. Pratt (1882 1964) The granite features of this cliff (1 row) - select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -2354,7 +2353,6 @@ Upon a woman s face. E. J. Pratt (1882 1964) The granite features of this cliff (1 row) - select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -2382,7 +2380,7 @@ ff-bg document.write(15); -', +', to_tsquery('sea&foo'), 'HighlightAll=true'); headline ----------------------------------------------------------------------------- diff --git a/contrib/tsearch2/sql/tsearch2.sql b/contrib/tsearch2/sql/tsearch2.sql index bbae7b45db..99d808a1b3 100644 --- a/contrib/tsearch2/sql/tsearch2.sql +++ b/contrib/tsearch2/sql/tsearch2.sql @@ -168,8 +168,8 @@ SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://ae wow < jqw <> qwerty')); -select to_tsquery('english', 'qwe & sKies '); -select to_tsquery('simple', 'qwe & sKies '); +select to_tsquery('english', 'qwe & sKies '); +select to_tsquery('simple', 'qwe & sKies '); select to_tsquery('english', '''the wether'':dc & '' sKies '':BC '); select to_tsquery('english', 'asd&(and|fghj)'); select to_tsquery('english', '(asd&and)|fghj'); @@ -288,7 +288,7 @@ An hour of storm to place The sculpture of these granite seams, Upon a woman s face. E. J. Pratt (1882 1964) ', to_tsquery('sea&thousand&years')); - + select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -298,7 +298,7 @@ An hour of storm to place The sculpture of these granite seams, Upon a woman s face. E. J. Pratt (1882 1964) ', to_tsquery('granite&sea')); - + select headline('Erosion It took the sea a thousand years, A thousand years to trace The granite features of this cliff @@ -321,7 +321,7 @@ ff-bg document.write(15); -', +', to_tsquery('sea&foo'), 'HighlightAll=true'); --check debug select * from public.ts_debug('Tsearch module for PostgreSQL 7.3.3'); diff --git a/contrib/tsearch2/tsearch2.sql.in b/contrib/tsearch2/tsearch2.sql.in index 739d57eaa9..1df2908285 100644 --- a/contrib/tsearch2/tsearch2.sql.in +++ b/contrib/tsearch2/tsearch2.sql.in @@ -11,7 +11,7 @@ CREATE DOMAIN gtsvector AS pg_catalog.gtsvector; CREATE DOMAIN gtsq AS pg_catalog.text; --dict interface -CREATE FUNCTION lexize(oid, text) +CREATE FUNCTION lexize(oid, text) RETURNS _text as 'ts_lexize' LANGUAGE INTERNAL @@ -44,7 +44,7 @@ CREATE FUNCTION set_curdict(text) --built-in dictionaries CREATE FUNCTION dex_init(internal) RETURNS internal - as 'MODULE_PATHNAME', 'tsa_dex_init' + as 'MODULE_PATHNAME', 'tsa_dex_init' LANGUAGE C; CREATE FUNCTION dex_lexize(internal,internal,int4) @@ -66,7 +66,7 @@ CREATE FUNCTION snb_lexize(internal,internal,int4) CREATE FUNCTION snb_ru_init_koi8(internal) RETURNS internal - as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8' + as 'MODULE_PATHNAME', 'tsa_snb_ru_init_koi8' LANGUAGE C; CREATE FUNCTION snb_ru_init_utf8(internal) @@ -81,7 +81,7 @@ CREATE FUNCTION snb_ru_init(internal) CREATE FUNCTION spell_init(internal) RETURNS internal - as 'MODULE_PATHNAME', 'tsa_spell_init' + as 'MODULE_PATHNAME', 'tsa_spell_init' LANGUAGE C; CREATE FUNCTION spell_lexize(internal,internal,int4) @@ -92,7 +92,7 @@ CREATE FUNCTION spell_lexize(internal,internal,int4) CREATE FUNCTION syn_init(internal) RETURNS internal - as 'MODULE_PATHNAME', 'tsa_syn_init' + as 'MODULE_PATHNAME', 'tsa_syn_init' LANGUAGE C; CREATE FUNCTION syn_lexize(internal,internal,int4) @@ -113,8 +113,8 @@ CREATE FUNCTION thesaurus_lexize(internal,internal,int4,internal) RETURNS NULL ON NULL INPUT; --sql-level interface -CREATE TYPE tokentype - as (tokid int4, alias text, descr text); +CREATE TYPE tokentype + as (tokid int4, alias text, descr text); CREATE FUNCTION token_type(int4) RETURNS setof tokentype @@ -149,7 +149,7 @@ CREATE FUNCTION set_curprs(text) LANGUAGE C RETURNS NULL ON NULL INPUT; -CREATE TYPE tokenout +CREATE TYPE tokenout as (tokid int4, token text); CREATE FUNCTION parse(oid,text) @@ -157,19 +157,19 @@ CREATE FUNCTION parse(oid,text) as 'ts_parse_byid' LANGUAGE INTERNAL RETURNS NULL ON NULL INPUT; - + CREATE FUNCTION parse(text,text) RETURNS setof tokenout as 'ts_parse_byname' LANGUAGE INTERNAL RETURNS NULL ON NULL INPUT; - + CREATE FUNCTION parse(text) RETURNS setof tokenout as 'MODULE_PATHNAME', 'tsa_parse_current' LANGUAGE C RETURNS NULL ON NULL INPUT; - + --default parser CREATE FUNCTION prsd_start(internal,int4) RETURNS internal @@ -399,7 +399,7 @@ AS STORAGE gtsvector; --stat info -CREATE TYPE statinfo +CREATE TYPE statinfo as (word text, ndoc int4, nentry int4); CREATE FUNCTION stat(text) @@ -560,7 +560,7 @@ AS CREATE OPERATOR CLASS tsvector_ops FOR TYPE tsvector USING btree AS OPERATOR 1 < , - OPERATOR 2 <= , + OPERATOR 2 <= , OPERATOR 3 = , OPERATOR 4 >= , OPERATOR 5 > , diff --git a/contrib/unaccent/Makefile b/contrib/unaccent/Makefile index 36415fef77..254155dcca 100644 --- a/contrib/unaccent/Makefile +++ b/contrib/unaccent/Makefile @@ -9,7 +9,7 @@ DATA_TSEARCH = unaccent.rules REGRESS = unaccent # Adjust REGRESS_OPTS because we need a UTF8 database -REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale +REGRESS_OPTS = --dbname=$(CONTRIB_TESTDB) --multibyte=UTF8 --no-locale ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/xml2/expected/xml2.out b/contrib/xml2/expected/xml2.out index 53b8064cc3..8ce04d0b84 100644 --- a/contrib/xml2/expected/xml2.out +++ b/contrib/xml2/expected/xml2.out @@ -18,7 +18,7 @@ select query_to_xml('select 1 as x',true,false,''); (1 row) -select xslt_process( query_to_xml('select x from generate_series(1,5) as +select xslt_process( query_to_xml('select x from generate_series(1,5) as x',true,false,'')::text, $$ diff --git a/contrib/xml2/expected/xml2_1.out b/contrib/xml2/expected/xml2_1.out index b465ea27b6..d2d243ada7 100644 --- a/contrib/xml2/expected/xml2_1.out +++ b/contrib/xml2/expected/xml2_1.out @@ -18,7 +18,7 @@ select query_to_xml('select 1 as x',true,false,''); (1 row) -select xslt_process( query_to_xml('select x from generate_series(1,5) as +select xslt_process( query_to_xml('select x from generate_series(1,5) as x',true,false,'')::text, $$ diff --git a/contrib/xml2/sql/xml2.sql b/contrib/xml2/sql/xml2.sql index 202a72baed..5b3cc997f5 100644 --- a/contrib/xml2/sql/xml2.sql +++ b/contrib/xml2/sql/xml2.sql @@ -10,7 +10,7 @@ RESET client_min_messages; select query_to_xml('select 1 as x',true,false,''); -select xslt_process( query_to_xml('select x from generate_series(1,5) as +select xslt_process( query_to_xml('select x from generate_series(1,5) as x',true,false,'')::text, $$ diff --git a/doc/bug.template b/doc/bug.template index 7204935426..f1c5dc9d04 100644 --- a/doc/bug.template +++ b/doc/bug.template @@ -40,7 +40,7 @@ Please enter a FULL description of your problem: Please describe a way to repeat the problem. Please try to provide a -concise reproducible example, if at all possible: +concise reproducible example, if at all possible: ---------------------------------------------------------------------- diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile index a7f0c8d634..a797499c79 100644 --- a/doc/src/sgml/Makefile +++ b/doc/src/sgml/Makefile @@ -64,7 +64,7 @@ CATALOG = -c $(DOCBOOKSTYLE)/catalog endif # Enable some extra warnings -# -wfully-tagged needed to throw a warning on missing tags +# -wfully-tagged needed to throw a warning on missing tags # for older tool chains, 2007-08-31 # Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs, # in particular < and & that haven't been made into entities. It's far too diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml index 82b209f9c6..027138b92e 100644 --- a/doc/src/sgml/auto-explain.sgml +++ b/doc/src/sgml/auto-explain.sgml @@ -112,8 +112,8 @@ LOAD 'auto_explain'; auto_explain.log_buffers causes EXPLAIN - (ANALYZE, BUFFERS) output, rather than just EXPLAIN - output, to be printed when an execution plan is logged. This parameter is + (ANALYZE, BUFFERS) output, rather than just EXPLAIN + output, to be printed when an execution plan is logged. This parameter is off by default. Only superusers can change this setting. This parameter has no effect unless auto_explain.log_analyze parameter is set. diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index e02f443566..edc59bdbb6 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -257,7 +257,7 @@ ssimkovi@ag.or.at Proceedings and Articles This section is for articles and newsletters. - + Partial indexing in POSTGRES: research project Olson, 1993 @@ -328,7 +328,7 @@ ssimkovi@ag.or.at Generalized Partial Indexes <ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">(cached version) -<!-- +<!-- Original URL: http://citeseer.ist.psu.edu/seshadri95generalized.html --> </ulink> diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index e94922124c..9e047e7dbd 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -71,7 +71,7 @@ initdb --locale=sv_SE locale then the specifications can take the form <replaceable>language_territory.codeset</>. For example, <literal>fr_BE.UTF-8</> represents the French language (fr) as - spoken in Belgium (BE), with a <acronym>UTF-8</> character set + spoken in Belgium (BE), with a <acronym>UTF-8</> character set encoding. </para> diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 898cdacbb1..96f1ef49b2 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -718,7 +718,7 @@ SET ENABLE_SEQSCAN TO OFF; <listitem> <para> Sets the location of the Kerberos server key file. See - <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth"> + <xref linkend="kerberos-auth"> or <xref linkend="gssapi-auth"> for details. This parameter can only be set in the <filename>postgresql.conf</> file or on the server command line. </para> @@ -748,7 +748,7 @@ SET ENABLE_SEQSCAN TO OFF; <para> Sets whether Kerberos and GSSAPI user names should be treated case-insensitively. - The default is <literal>off</> (case sensitive). This parameter can only be + The default is <literal>off</> (case sensitive). This parameter can only be set in the <filename>postgresql.conf</> file or on the server command line. </para> </listitem> @@ -1044,7 +1044,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-shared-preload-libraries" xreflabel="shared_preload_libraries"> <term><varname>shared_preload_libraries</varname> (<type>string</type>)</term> <indexterm> @@ -1076,7 +1076,7 @@ SET ENABLE_SEQSCAN TO OFF; when the library is first used. However, the time to start each new server process might increase slightly, even if that process never uses the library. So this parameter is recommended only for - libraries that will be used in most sessions. + libraries that will be used in most sessions. </para> <note> @@ -1084,7 +1084,7 @@ SET ENABLE_SEQSCAN TO OFF; On Windows hosts, preloading a library at server start will not reduce the time required to start each new server process; each server process will re-load all preload libraries. However, <varname>shared_preload_libraries - </varname> is still useful on Windows hosts because some shared libraries may + </varname> is still useful on Windows hosts because some shared libraries may need to perform certain operations that only take place at postmaster start (for example, a shared library may need to reserve lightweight locks or shared memory and you can't do that after the postmaster has started). @@ -1097,8 +1097,8 @@ SET ENABLE_SEQSCAN TO OFF; <para> Every PostgreSQL-supported library has a <quote>magic - block</> that is checked to guarantee compatibility. - For this reason, non-PostgreSQL libraries cannot be + block</> that is checked to guarantee compatibility. + For this reason, non-PostgreSQL libraries cannot be loaded in this way. </para> </listitem> @@ -1487,7 +1487,7 @@ SET ENABLE_SEQSCAN TO OFF; <para> <varname>fsync</varname> can only be set in the <filename>postgresql.conf</> file or on the server command line. - If you turn this parameter off, also consider turning off + If you turn this parameter off, also consider turning off <xref linkend="guc-full-page-writes">. </para> </listitem> @@ -1528,7 +1528,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-wal-sync-method" xreflabel="wal_sync_method"> <term><varname>wal_sync_method</varname> (<type>enum</type>)</term> <indexterm> @@ -1584,7 +1584,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-full-page-writes" xreflabel="full_page_writes"> <indexterm> <primary><varname>full_page_writes</> configuration parameter</primary> @@ -1848,7 +1848,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-archive-timeout" xreflabel="archive_timeout"> <term><varname>archive_timeout</varname> (<type>integer</type>)</term> <indexterm> @@ -2257,7 +2257,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + </variablelist> </sect2> <sect2 id="runtime-config-query-constants"> @@ -2368,7 +2368,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-cpu-operator-cost" xreflabel="cpu_operator_cost"> <term><varname>cpu_operator_cost</varname> (<type>floating point</type>)</term> <indexterm> @@ -2382,7 +2382,7 @@ SET ENABLE_SEQSCAN TO OFF; </para> </listitem> </varlistentry> - + <varlistentry id="guc-effective-cache-size" xreflabel="effective_cache_size"> <term><varname>effective_cache_size</varname> (<type>integer</type>)</term> <indexterm> @@ -2745,10 +2745,10 @@ SELECT * FROM parent WHERE key = 2400; <productname>PostgreSQL</productname> supports several methods for logging server messages, including <systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem> and - <systemitem>syslog</systemitem>. On Windows, + <systemitem>syslog</systemitem>. On Windows, <systemitem>eventlog</systemitem> is also supported. Set this parameter to a list of desired log destinations separated by - commas. The default is to log to <systemitem>stderr</systemitem> + commas. The default is to log to <systemitem>stderr</systemitem> only. This parameter can only be set in the <filename>postgresql.conf</> file or on the server command line. @@ -2759,7 +2759,7 @@ SELECT * FROM parent WHERE key = 2400; value</> (<acronym>CSV</>) format, which is convenient for loading logs into programs. See <xref linkend="runtime-config-logging-csvlog"> for details. - <varname>logging_collector</varname> must be enabled to generate + <varname>logging_collector</varname> must be enabled to generate CSV-format log output. </para> @@ -2822,7 +2822,7 @@ local0.* /var/log/postgresql </indexterm> <listitem> <para> - When <varname>logging_collector</> is enabled, + When <varname>logging_collector</> is enabled, this parameter determines the directory in which log files will be created. It can be specified as an absolute path, or relative to the cluster data directory. @@ -2861,7 +2861,7 @@ local0.* /var/log/postgresql </para> <para> If CSV-format output is enabled in <varname>log_destination</>, - <literal>.csv</> will be appended to the timestamped + <literal>.csv</> will be appended to the timestamped log file name to create the file name for CSV-format output. (If <varname>log_filename</> ends in <literal>.log</>, the suffix is replaced instead.) @@ -2966,18 +2966,18 @@ local0.* /var/log/postgresql </para> <para> Example: To keep 7 days of logs, one log file per day named - <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>, + <literal>server_log.Mon</literal>, <literal>server_log.Tue</literal>, etc, and automatically overwrite last week's log with this week's log, - set <varname>log_filename</varname> to <literal>server_log.%a</literal>, - <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and + set <varname>log_filename</varname> to <literal>server_log.%a</literal>, + <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, and <varname>log_rotation_age</varname> to <literal>1440</literal>. </para> <para> - Example: To keep 24 hours of logs, one log file per hour, but - also rotate sooner if the log file size exceeds 1GB, set - <varname>log_filename</varname> to <literal>server_log.%H%M</literal>, - <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, - <varname>log_rotation_age</varname> to <literal>60</literal>, and + Example: To keep 24 hours of logs, one log file per hour, but + also rotate sooner if the log file size exceeds 1GB, set + <varname>log_filename</varname> to <literal>server_log.%H%M</literal>, + <varname>log_truncate_on_rotation</varname> to <literal>on</literal>, + <varname>log_rotation_age</varname> to <literal>60</literal>, and <varname>log_rotation_size</varname> to <literal>1000000</literal>. Including <literal>%M</> in <varname>log_filename</varname> allows any size-driven rotations that might occur to select a file name @@ -3007,7 +3007,7 @@ local0.* /var/log/postgresql </para> </listitem> </varlistentry> - + <varlistentry id="guc-syslog-ident" xreflabel="syslog_ident"> <term><varname>syslog_ident</varname> (<type>string</type>)</term> <indexterm> @@ -3132,7 +3132,7 @@ local0.* /var/log/postgresql </para> </listitem> </varlistentry> - + <varlistentry id="guc-log-min-duration-statement" xreflabel="log_min_duration_statement"> <term><varname>log_min_duration_statement</varname> (<type>integer</type>)</term> <indexterm> @@ -3163,7 +3163,7 @@ local0.* /var/log/postgresql the text of statements that are logged because of <varname>log_statement</> will not be repeated in the duration log message. - If you are not using <application>syslog</>, it is recommended + If you are not using <application>syslog</>, it is recommended that you log the PID or session ID using <xref linkend="guc-log-line-prefix"> so that you can link the statement message to the later @@ -3365,8 +3365,8 @@ local0.* /var/log/postgresql <note> <para> - Some client programs, like <application>psql</>, attempt - to connect twice while determining if a password is required, so + Some client programs, like <application>psql</>, attempt + to connect twice while determining if a password is required, so duplicate <quote>connection received</> messages do not necessarily indicate a problem. </para> @@ -3462,7 +3462,7 @@ local0.* /var/log/postgresql </para> </listitem> </varlistentry> - + <varlistentry id="guc-log-line-prefix" xreflabel="log_line_prefix"> <term><varname>log_line_prefix</varname> (<type>string</type>)</term> <indexterm> @@ -3607,7 +3607,7 @@ FROM pg_stat_activity; <tip> <para> - <application>Syslog</> produces its own + <application>Syslog</> produces its own time stamp and process ID information, so you probably do not want to include those escapes if you are logging to <application>syslog</>. </para> @@ -3808,9 +3808,9 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; <listitem> <para> - Set <varname>log_rotation_size</varname> to 0 to disable - size-based log rotation, as it makes the log file name difficult - to predict. + Set <varname>log_rotation_size</varname> to 0 to disable + size-based log rotation, as it makes the log file name difficult + to predict. </para> </listitem> @@ -5000,7 +5000,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' <para> Every PostgreSQL-supported library has a <quote>magic - block</> that is checked to guarantee compatibility. + block</> that is checked to guarantee compatibility. For this reason, non-PostgreSQL libraries cannot be loaded in this way. </para> diff --git a/doc/src/sgml/contacts.sgml b/doc/src/sgml/contacts.sgml index 996c0771bb..a981625027 100644 --- a/doc/src/sgml/contacts.sgml +++ b/doc/src/sgml/contacts.sgml @@ -15,7 +15,7 @@ and the mailing lists themselves. <para> Refer to the introduction in this manual or to the -<productname>PostgreSQL</productname> +<productname>PostgreSQL</productname> <ulink url="http://www.postgresql.org">web page</ulink> for subscription information to the no-cost mailing lists. </para> diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml index 9057996014..a7c2a1d43e 100644 --- a/doc/src/sgml/contrib.sgml +++ b/doc/src/sgml/contrib.sgml @@ -16,7 +16,7 @@ <para> When building from the source distribution, these modules are not built - automatically, unless you build the "world" target + automatically, unless you build the "world" target (see <xref linkend="build">). You can build and install all of them by running: <screen> diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 02eaedf943..66aef15608 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -21,7 +21,7 @@ <para> <xref linkend="datatype-table"> shows all the built-in general-purpose data - types. Most of the alternative names listed in the + types. Most of the alternative names listed in the <quote>Aliases</quote> column are the names used internally by <productname>PostgreSQL</productname> for historical reasons. In addition, some internally used or deprecated types are available, @@ -555,7 +555,7 @@ NUMERIC <para> In addition to ordinary numeric values, the <type>numeric</type> - type allows the special value <literal>NaN</>, meaning + type allows the special value <literal>NaN</>, meaning <quote>not-a-number</quote>. Any operation on <literal>NaN</> yields another <literal>NaN</>. When writing this value as a constant in an SQL command, you must put quotes around it, @@ -703,9 +703,9 @@ NUMERIC <type>float(<replaceable>p</replaceable>)</type> for specifying inexact numeric types. Here, <replaceable>p</replaceable> specifies the minimum acceptable precision in <emphasis>binary</> digits. - <productname>PostgreSQL</productname> accepts + <productname>PostgreSQL</productname> accepts <type>float(1)</type> to <type>float(24)</type> as selecting the - <type>real</type> type, while + <type>real</type> type, while <type>float(25)</type> to <type>float(53)</type> select <type>double precision</type>. Values of <replaceable>p</replaceable> outside the allowed range draw an error. @@ -1628,7 +1628,7 @@ MINUTE TO SECOND <para> Date and time input is accepted in almost any reasonable format, including - ISO 8601, <acronym>SQL</acronym>-compatible, + ISO 8601, <acronym>SQL</acronym>-compatible, traditional <productname>POSTGRES</productname>, and others. For some formats, ordering of day, month, and year in date input is ambiguous and there is support for specifying the expected @@ -1645,12 +1645,12 @@ MINUTE TO SECOND See <xref linkend="datetime-appendix"> for the exact parsing rules of date/time input and for the recognized text fields including months, days of the week, and - time zones. + time zones. </para> <para> Remember that any date or time literal input needs to be enclosed - in single quotes, like text strings. Refer to + in single quotes, like text strings. Refer to <xref linkend="sql-syntax-constants-generic"> for more information. <acronym>SQL</acronym> requires the following syntax @@ -1672,7 +1672,7 @@ MINUTE TO SECOND <indexterm> <primary>date</primary> </indexterm> - + <para> <xref linkend="datatype-datetime-date-table"> shows some possible inputs for the <type>date</type> type. @@ -1787,7 +1787,7 @@ MINUTE TO SECOND <para> Valid input for these types consists of a time of day followed by an optional time zone. (See <xref - linkend="datatype-datetime-time-table"> + linkend="datatype-datetime-time-table"> and <xref linkend="datatype-timezone-table">.) If a time zone is specified in the input for <type>time without time zone</type>, it is silently ignored. You can also specify a date but it will @@ -1954,8 +1954,8 @@ January 8 04:05:06 1999 PST <para> The <acronym>SQL</acronym> standard differentiates - <type>timestamp without time zone</type> - and <type>timestamp with time zone</type> literals by the presence of a + <type>timestamp without time zone</type> + and <type>timestamp with time zone</type> literals by the presence of a <quote>+</quote> or <quote>-</quote> symbol and time zone offset after the time. Hence, according to the standard, @@ -2097,10 +2097,10 @@ January 8 04:05:06 1999 PST The following <acronym>SQL</acronym>-compatible functions can also be used to obtain the current time value for the corresponding data type: - <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>, - <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>, - <literal>LOCALTIMESTAMP</literal>. The latter four accept an - optional subsecond precision specification. (See <xref + <literal>CURRENT_DATE</literal>, <literal>CURRENT_TIME</literal>, + <literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>, + <literal>LOCALTIMESTAMP</literal>. The latter four accept an + optional subsecond precision specification. (See <xref linkend="functions-datetime-current">.) Note that these are SQL functions and are <emphasis>not</> recognized in data input strings. </para> @@ -2255,10 +2255,10 @@ January 8 04:05:06 1999 PST <itemizedlist> <listitem> <para> - Although the <type>date</type> type + Although the <type>date</type> type cannot have an associated time zone, the <type>time</type> type can. - Time zones in the real world have little meaning unless + Time zones in the real world have little meaning unless associated with a date as well as a time, since the offset can vary through the year with daylight-saving time boundaries. @@ -2267,7 +2267,7 @@ January 8 04:05:06 1999 PST <listitem> <para> - The default time zone is specified as a constant numeric offset + The default time zone is specified as a constant numeric offset from <acronym>UTC</>. It is therefore impossible to adapt to daylight-saving time when doing date/time arithmetic across <acronym>DST</acronym> boundaries. @@ -2901,7 +2901,7 @@ SELECT * FROM person WHERE current_mood = 'happy'; order in which the values were listed when the type was created. All standard comparison operators and related aggregate functions are supported for enums. For example: - + <programlisting> INSERT INTO person VALUES ('Larry', 'sad'); INSERT INTO person VALUES ('Curly', 'ok'); @@ -2919,7 +2919,7 @@ SELECT * FROM person WHERE current_mood > 'sad' ORDER BY current_mood; Moe | happy (2 rows) -SELECT name +SELECT name FROM person WHERE current_mood = (SELECT MIN(current_mood) FROM person); name @@ -2972,7 +2972,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays <sect2> <title>Implementation Details - + An enum value occupies four bytes on disk. The length of an enum value's textual label is limited by the NAMEDATALEN @@ -3409,8 +3409,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays <type>cidr</> Type Input Examples - - + + cidr Input cidr Output abbrev(cidr) @@ -3772,7 +3772,7 @@ select 'The Fat Rats'::tsvector; for searching: -SELECT to_tsvector('english', 'The Fat Rats'); +SELECT to_tsvector('english', 'The Fat Rats'); to_tsvector ----------------- 'fat':2 'rat':3 @@ -3913,7 +3913,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 functions for UUIDs, but the core database does not include any function for generating UUIDs, because no single algorithm is well suited for every application. The contrib module - contrib/uuid-ossp provides functions that implement + contrib/uuid-ossp provides functions that implement several standard algorithms. Alternatively, UUIDs could be generated by client applications or other libraries invoked through a server-side function. @@ -3933,7 +3933,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 checks the input values for well-formedness, and there are support functions to perform type-safe operations on it; see . Use of this data type requires the - installation to have been built with configure + installation to have been built with configure --with-libxml. diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index fb75a1e8b0..0b55446245 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -75,7 +75,7 @@ If the token is a text string, match up with possible strings: - + @@ -83,7 +83,7 @@ abbreviation. - + If not found, do a similar binary-search table lookup to match @@ -101,7 +101,7 @@ - + When the token is a number or number field: @@ -111,7 +111,7 @@ If there are eight or six digits, - and if no other date fields have been previously read, then interpret + and if no other date fields have been previously read, then interpret as a concatenated date (e.g., 19990118 or 990118). The interpretation is YYYYMMDD or YYMMDD. @@ -124,7 +124,7 @@ and a year has already been read, then interpret as day of year. - + If four or six digits and a year has already been read, then @@ -465,7 +465,7 @@ about 1 day in 128 years. - + The accumulating calendar error prompted Pope Gregory XIII to reform the calendar in accordance with instructions from the Council of Trent. @@ -544,7 +544,7 @@ $ cal 9 1752 the beginnings of the Chinese calendar can be traced back to the 14th century BC. Legend has it that the Emperor Huangdi invented that calendar in 2637 BC. - + The People's Republic of China uses the Gregorian calendar for civil purposes. The Chinese calendar is used for determining festivals. @@ -552,7 +552,7 @@ $ cal 9 1752 The Julian Date is unrelated to the Julian - calendar. + calendar. The Julian Date system was invented by the French scholar Joseph Justus Scaliger (1540-1609) and probably takes its name from Scaliger's father, diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index 689b14ffa5..155207bd3e 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -160,7 +160,7 @@ cc -shared -o foo.so foo.o Here is an example. It assumes the developer tools are installed. -cc -c foo.c +cc -c foo.c cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o @@ -226,7 +226,7 @@ gcc -G -o foo.so foo.o - Tru64 UNIX + Tru64 UNIX Tru64 UNIXshared library Digital UNIXTru64 UNIX @@ -272,7 +272,7 @@ gcc -shared -o foo.so foo.o - If this is too complicated for you, you should consider using + If this is too complicated for you, you should consider using GNU Libtool, which hides the platform differences behind a uniform interface. diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index 5da2d61b20..008ebcdcf6 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -240,7 +240,7 @@ It's possible that the ports do not update the main catalog file - in /usr/local/share/sgml/catalog.ports or order + in /usr/local/share/sgml/catalog.ports or order isn't proper . Be sure to have the following lines in beginning of file: CATALOG "openjade/catalog" @@ -613,7 +613,7 @@ gmake man - + To make a PDF: @@ -1059,7 +1059,7 @@ save_size.pdfjadetex = 15000 Norm Walsh offers a major mode - specifically for DocBook which also has font-lock and a number of features to + specifically for DocBook which also has font-lock and a number of features to reduce typing. @@ -1114,7 +1114,7 @@ save_size.pdfjadetex = 15000 - + Description @@ -1123,7 +1123,7 @@ save_size.pdfjadetex = 15000 - + Options @@ -1133,7 +1133,7 @@ save_size.pdfjadetex = 15000 - + Exit Status @@ -1144,7 +1144,7 @@ save_size.pdfjadetex = 15000 - + Usage @@ -1156,7 +1156,7 @@ save_size.pdfjadetex = 15000 - + Environment @@ -1167,7 +1167,7 @@ save_size.pdfjadetex = 15000 - + Files @@ -1178,7 +1178,7 @@ save_size.pdfjadetex = 15000 - + Diagnostics @@ -1191,7 +1191,7 @@ save_size.pdfjadetex = 15000 - + Notes @@ -1202,7 +1202,7 @@ save_size.pdfjadetex = 15000 - + Examples @@ -1211,7 +1211,7 @@ save_size.pdfjadetex = 15000 - + History @@ -1222,7 +1222,7 @@ save_size.pdfjadetex = 15000 - + See Also diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 387f50d6d0..83f396ad21 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -110,7 +110,7 @@ EXEC SQL CONNECT TO target AS unix:postgresql://hostname:port/dbname?options - + an SQL string literal containing one of the above forms @@ -122,7 +122,7 @@ EXEC SQL CONNECT TO target AS a reference to a character variable containing one of the above forms (see examples) - + DEFAULT @@ -2743,7 +2743,6 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); The function returns the parsed timestamp on success. On error, PGTYPESInvalidTimestamp is returned and errno is set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. - In general, the input string can contain any combination of an allowed @@ -2839,7 +2838,7 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt You can use the following format specifiers for the format mask. The format specifiers are the same ones that are used in the strftime function in libc. Any - non-format specifier will be copied into the output buffer. + non-format specifier will be copied into the output buffer. @@ -2897,24 +2896,24 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt %E* %O* - POSIX locale extensions. The sequences %Ec - %EC - %Ex - %EX - %Ey - %EY - %Od + %EC + %Ex + %EX + %Ey + %EY + %Od %Oe - %OH - %OI - %Om - %OM - %OS - %Ou - %OU - %OV - %Ow - %OW - %Oy + %OH + %OI + %Om + %OM + %OS + %Ou + %OU + %OV + %Ow + %OW + %Oy are supposed to provide alternative representations. @@ -5763,10 +5762,10 @@ ECPG = ecpg On Windows, if the ecpg libraries and an application are - compiled with different flags, this function call will crash the - application because the internal representation of the + compiled with different flags, this function call will crash the + application because the internal representation of the FILE pointers differ. Specifically, - multithreaded/single-threaded, release/debug, and static/dynamic + multithreaded/single-threaded, release/debug, and static/dynamic flags should be the same for the library and all applications using that library. @@ -5778,7 +5777,7 @@ ECPG = ecpg ECPGget_PGconn(const char *connection_name) returns the library database connection handle identified by the given name. If connection_name is set to NULL, the current - connection handle is returned. If no connection handle can be identified, the function returns + connection handle is returned. If no connection handle can be identified, the function returns NULL. The returned connection handle can be used to call any other functions from libpq, if necessary. @@ -5803,7 +5802,7 @@ ECPG = ecpg ECPGstatus(int lineno, const char* connection_name) returns true if you are connected to a database and false if not. - connection_name can be NULL + connection_name can be NULL if a single connection is being used. @@ -8064,7 +8063,7 @@ typedef struct sqlda_compat sqlda_t; Pointer to the field data. The pointer is of char * type, the data pointed by it is in a binary format. Example: -int intval; +int intval; switch (sqldata->sqlvar[i].sqltype) { @@ -8083,7 +8082,7 @@ switch (sqldata->sqlvar[i].sqltype) Pointer to the NULL indicator. If returned by DESCRIBE or FETCH then it's always a valid pointer. - If used as input for EXECUTE ... USING sqlda; then NULL-pointer value means + If used as input for EXECUTE ... USING sqlda; then NULL-pointer value means that the value for this field is non-NULL. Otherwise a valid pointer and sqlitype has to be properly set. Example: @@ -8117,7 +8116,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) Type of the NULL indicator data. It's always SQLSMINT when returning data from the server. - When the SQLDA is used for a parametrized query, the data is treated + When the SQLDA is used for a parametrized query, the data is treated according to the set type. @@ -8143,13 +8142,13 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) sqltypename - sqltypelen + sqltypelen sqlownerlen sqlsourcetype - sqlownername - sqlsourceid - sqlflags - sqlreserved + sqlownername + sqlsourceid + sqlflags + sqlreserved Unused. @@ -8469,7 +8468,7 @@ int dectoasc(decimal *np, char *cp, int len, int right) The function returns either -1 if the buffer cp was too small or ECPG_INFORMIX_OUT_OF_MEMORY if memory was - exhausted. + exhausted. @@ -9548,7 +9547,7 @@ risnull(CINTTYPE, (char *) &i); - + A pointer to the value or a pointer to the pointer. diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index 2e8f77cf4f..246451a42e 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -9,7 +9,7 @@ In the sections that follow, we will discuss how you - can extend the PostgreSQL + can extend the PostgreSQL SQL query language by adding: @@ -45,8 +45,8 @@ How Extensibility Works - PostgreSQL is extensible because its operation is - catalog-driven. If you are familiar with standard + PostgreSQL is extensible because its operation is + catalog-driven. If you are familiar with standard relational database systems, you know that they store information about databases, tables, columns, etc., in what are commonly known as system catalogs. (Some systems call @@ -54,14 +54,14 @@ user as tables like any other, but the DBMS stores its internal bookkeeping in them. One key difference between PostgreSQL and standard relational database systems is - that PostgreSQL stores much more information in its + that PostgreSQL stores much more information in its catalogs: not only information about tables and columns, but also information about data types, functions, access methods, and so on. These tables can be modified by - the user, and since PostgreSQL bases its operation + the user, and since PostgreSQL bases its operation on these tables, this means that PostgreSQL can be extended by users. By comparison, conventional - database systems can only be extended by changing hardcoded + database systems can only be extended by changing hardcoded procedures in the source code or by loading modules specially written by the DBMS vendor. @@ -209,7 +209,7 @@ parsed. Each position (either argument or return value) declared as anyelement is allowed to have any specific actual data type, but in any given call they must all be the - same actual type. Each + same actual type. Each position declared as anyarray can have any array data type, but similarly they must all be the same type. If there are positions declared anyarray and others declared diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml index 8927ef344b..129b9814eb 100644 --- a/doc/src/sgml/external-projects.sgml +++ b/doc/src/sgml/external-projects.sgml @@ -218,7 +218,7 @@
- + Extensions @@ -247,7 +247,7 @@ There are several administration tools available for PostgreSQL. The most popular is - pgAdmin III, + pgAdmin III, and there are several commercially available ones as well. diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index 39cfcee961..4361991ea9 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -134,7 +134,7 @@ - + diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index e4d00b2403..6992aaa281 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -30,7 +30,7 @@ is present in other SQL database management systems, and in many cases this functionality is compatible and consistent between the various implementations. This chapter is also - not exhaustive; additional functions appear in relevant sections of + not exhaustive; additional functions appear in relevant sections of the manual.
@@ -416,7 +416,7 @@ IS NOT NULL, respectively, except that the input expression must be of Boolean type.
- + - A relation scheme R is a + A relation scheme R is a finite set of attributes A1, A2, @@ -416,7 +416,7 @@ attributes are taken from. We often write a relation scheme as SELECT (σ): extracts tuples from a relation that - satisfy a given restriction. Let R be a + satisfy a given restriction. Let R be a table that contains an attribute A. σA=a(R) = {t ∈ R ∣ t(A) = a} @@ -447,7 +447,7 @@ attributes are taken from. We often write a relation scheme as S be a table with arity k2. R × S - is the set of all + is the set of all k1 + k2-tuples whose first k1 @@ -477,7 +477,7 @@ attributes are taken from. We often write a relation scheme as set of tuples that are in R and in S. - We again require that R and + We again require that R and S have the same arity. @@ -497,14 +497,14 @@ attributes are taken from. We often write a relation scheme as JOIN (∏): connects two tables by their common - attributes. Let R be a table with the - attributes A,B + attributes. Let R be a table with the + attributes A,B and C and let S be a table with the attributes C,D and E. There is one attribute common to both relations, - the attribute C. + the attribute C. @@ -45,7 +45,7 @@ ;; Don't append period if run-in title ends with any of these ;; characters. We had to add the colon here. This is fixed in ;; stylesheets version 1.71, so it can be removed sometime. -(define %content-title-end-punct% +(define %content-title-end-punct% '(#\. #\! #\? #\:)) ;; No automatic punctuation after honorific name parts @@ -114,7 +114,7 @@ (normalize "author") (normalize "authorgroup") (normalize "title") - (normalize "subtitle") + (normalize "subtitle") (normalize "volumenum") (normalize "edition") (normalize "othercredit") @@ -214,7 +214,7 @@ (empty-sosofo))) ;; Add character encoding and time of creation into HTML header -(define %html-header-tags% +(define %html-header-tags% (list (list "META" '("HTTP-EQUIV" "Content-Type") '("CONTENT" "text/html; charset=ISO-8859-1")) (list "META" '("NAME" "creation") (list "CONTENT" (time->string (time) #t))))) @@ -332,7 +332,7 @@ (make element gi: "A" attributes: (list (list "TITLE" (element-title-string nextsib)) - (list "HREF" + (list "HREF" (href-to nextsib))) (gentext-nav-next-sibling nextsib)))) @@ -346,7 +346,7 @@ (make element gi: "A" attributes: (list (list "TITLE" (element-title-string next)) - (list "HREF" + (list "HREF" (href-to next)) (list "ACCESSKEY" @@ -556,7 +556,7 @@ (my-simplelist-vert members)) ((equal? type (normalize "horiz")) (simplelist-table 'row cols members))))) - + (element member (let ((type (inherited-attribute-string (normalize "type")))) (cond @@ -585,7 +585,7 @@ (let ((table (ancestor-member nd ($table-element-list$)))) (if (node-list-empty? table) nd - table))) + table))) ;; (The function below overrides the one in print/dbindex.dsl.) @@ -652,7 +652,7 @@ (define (part-titlepage elements #!optional (side 'recto)) - (let ((nodelist (titlepage-nodelist + (let ((nodelist (titlepage-nodelist (if (equal? side 'recto) (reference-titlepage-recto-elements) (reference-titlepage-verso-elements)) @@ -670,7 +670,7 @@ page-number-restart?: (first-part?) input-whitespace-treatment: 'collapse use: default-text-style - + ;; This hack is required for the RTF backend. If an external-graphic ;; is the first thing on the page, RTF doesn't seem to do the right ;; thing (the graphic winds up on the baseline of the first line @@ -679,7 +679,7 @@ (make paragraph line-spacing: 1pt (literal "")) - + (let loop ((nl nodelist) (lastnode (empty-node-list))) (if (node-list-empty? nl) (empty-sosofo) @@ -717,7 +717,7 @@ (define (reference-titlepage elements #!optional (side 'recto)) - (let ((nodelist (titlepage-nodelist + (let ((nodelist (titlepage-nodelist (if (equal? side 'recto) (reference-titlepage-recto-elements) (reference-titlepage-verso-elements)) @@ -735,7 +735,7 @@ page-number-restart?: (first-reference?) input-whitespace-treatment: 'collapse use: default-text-style - + ;; This hack is required for the RTF backend. If an external-graphic ;; is the first thing on the page, RTF doesn't seem to do the right ;; thing (the graphic winds up on the baseline of the first line @@ -744,7 +744,7 @@ (make paragraph line-spacing: 1pt (literal "")) - + (let loop ((nl nodelist) (lastnode (empty-node-list))) (if (node-list-empty? nl) (empty-sosofo) @@ -812,13 +812,13 @@ Lynx, or similar). (literal "*") sosofo (literal "*"))) - + (define ($dquote-seq$ #!optional (sosofo (process-children))) (make sequence (literal (gentext-start-quote)) sosofo (literal (gentext-end-quote)))) - + (element (para command) ($dquote-seq$)) (element (para emphasis) ($asterix-seq$)) (element (para filename) ($dquote-seq$)) diff --git a/doc/src/sgml/vacuumlo.sgml b/doc/src/sgml/vacuumlo.sgml index 76e2282ad6..65f55f03c9 100644 --- a/doc/src/sgml/vacuumlo.sgml +++ b/doc/src/sgml/vacuumlo.sgml @@ -75,7 +75,7 @@ vacuumlo [options] database [database2 ... databaseN] Force vacuumlo to prompt for a - password before connecting to a database. + password before connecting to a database. diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml index 2266d549ff..a2724fad6b 100644 --- a/doc/src/sgml/wal.sgml +++ b/doc/src/sgml/wal.sgml @@ -390,7 +390,7 @@ are points in the sequence of transactions at which it is guaranteed that the heap and index data files have been updated with all information written before the checkpoint. At checkpoint time, all dirty data pages are flushed to - disk and a special checkpoint record is written to the log file. + disk and a special checkpoint record is written to the log file. (The changes were previously flushed to the WAL files.) In the event of a crash, the crash recovery procedure looks at the latest checkpoint record to determine the point in the log (known as the redo diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 8c829a9df7..8f9fd21f38 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -373,7 +373,7 @@ - consistent - determine whether key satisfies the + consistent - determine whether key satisfies the query qualifier 1 @@ -387,12 +387,12 @@ 3 - decompress - compute a decompressed representation of a + decompress - compute a decompressed representation of a compressed key 4 - penalty - compute penalty for inserting new key into subtree + penalty - compute penalty for inserting new key into subtree with given subtree's key 5 @@ -940,7 +940,7 @@ SELECT * FROM table WHERE integer_column < 4; can be satisfied exactly by a B-tree index on the integer column. But there are cases where an index is useful as an inexact guide to the matching rows. For example, if a GiST index stores only bounding boxes - for geometric objects, then it cannot exactly satisfy a WHERE + for geometric objects, then it cannot exactly satisfy a WHERE condition that tests overlap between nonrectangular objects such as polygons. Yet we could use the index to find objects whose bounding box overlaps the bounding box of the target object, and then do the diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index a2592c304d..ea64a152f7 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -52,7 +52,7 @@ CREATE OPERATOR + ( Now we could execute a query like this: - + SELECT (a + b) AS c FROM test_complex; diff --git a/doc/src/sgml/xtypes.sgml b/doc/src/sgml/xtypes.sgml index b020f28e87..972cc76bf9 100644 --- a/doc/src/sgml/xtypes.sgml +++ b/doc/src/sgml/xtypes.sgml @@ -207,7 +207,7 @@ CREATE FUNCTION complex_send(complex) Finally, we can provide the full definition of the data type: CREATE TYPE complex ( - internallength = 16, + internallength = 16, input = complex_in, output = complex_out, receive = complex_recv, diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 85cf617786..7a61a5a1a1 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -258,7 +258,7 @@ RANLIB = @RANLIB@ WINDRES = @WINDRES@ X = @EXEEXT@ -# Perl +# Perl ifneq (@PERL@,) # quoted to protect pathname with spaces @@ -391,7 +391,7 @@ endif # This macro is for use by libraries linking to libpq. (Because libpgport # isn't created with the same link flags as libpq, it can't be used.) libpq = -L$(libpq_builddir) -lpq - + # If doing static linking, shared library dependency info isn't available, # so add in the libraries that libpq depends on. ifeq ($(enable_shared), no) @@ -400,9 +400,9 @@ libpq += $(filter -lintl -lssl -lcrypto -lkrb5 -lcrypt, $(LIBS)) \ endif # This macro is for use by client executables (not libraries) that use libpq. -# We force clients to pull symbols from the non-shared library libpgport -# rather than pulling some libpgport symbols from libpq just because -# libpq uses those functions too. This makes applications less +# We force clients to pull symbols from the non-shared library libpgport +# rather than pulling some libpgport symbols from libpq just because +# libpq uses those functions too. This makes applications less # dependent on changes in libpq's usage of pgport. To do this we link to # pgport before libpq. This does cause duplicate -lpgport's to appear # on client link lines. @@ -517,7 +517,7 @@ $(top_builddir)/src/include/pg_config.h: $(top_builddir)/src/include/stamp-h $(top_builddir)/src/include/stamp-h: $(top_srcdir)/src/include/pg_config.h.in $(top_builddir)/config.status cd $(top_builddir) && ./config.status src/include/pg_config.h -# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same +# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same # logic as above. $(top_builddir)/src/interfaces/ecpg/include/ecpg_config.h: $(top_builddir)/src/interfaces/ecpg/include/stamp-h diff --git a/src/Makefile.shlib b/src/Makefile.shlib index b6dea47f90..a5cf6c6c16 100644 --- a/src/Makefile.shlib +++ b/src/Makefile.shlib @@ -271,7 +271,7 @@ endif ifeq ($(PORTNAME), sunos4) LINK.shared = $(LD) -assert pure-text -Bdynamic endif - + ifeq ($(PORTNAME), osf) LINK.shared = $(LD) -shared -expect_unresolved '*' endif diff --git a/src/backend/Makefile b/src/backend/Makefile index 9a0f2e21e5..87b97c21d9 100644 --- a/src/backend/Makefile +++ b/src/backend/Makefile @@ -187,7 +187,7 @@ distprep: $(MAKE) -C parser gram.c gram.h scan.c $(MAKE) -C bootstrap bootparse.c bootscanner.c $(MAKE) -C catalog schemapg.h postgres.bki postgres.description postgres.shdescription - $(MAKE) -C utils fmgrtab.c fmgroids.h + $(MAKE) -C utils fmgrtab.c fmgroids.h $(MAKE) -C utils/misc guc-file.c @@ -305,7 +305,7 @@ maintainer-clean: distclean # # Support for code development. # -# Use target "quick" to build "postgres" when you know all the subsystems +# Use target "quick" to build "postgres" when you know all the subsystems # are up to date. It saves the time of doing all the submakes. .PHONY: quick quick: $(OBJS) diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README index 69d5a31941..0f634f83d1 100644 --- a/src/backend/access/gin/README +++ b/src/backend/access/gin/README @@ -9,27 +9,27 @@ Gin stands for Generalized Inverted Index and should be considered as a genie, not a drink. Generalized means that the index does not know which operation it accelerates. -It instead works with custom strategies, defined for specific data types (read -"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin +It instead works with custom strategies, defined for specific data types (read +"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin is similar to GiST and differs from btree indices, which have predefined, comparison-based operations. -An inverted index is an index structure storing a set of (key, posting list) -pairs, where 'posting list' is a set of documents in which the key occurs. -(A text document would usually contain many keys.) The primary goal of +An inverted index is an index structure storing a set of (key, posting list) +pairs, where 'posting list' is a set of documents in which the key occurs. +(A text document would usually contain many keys.) The primary goal of Gin indices is support for highly scalable, full-text search in PostgreSQL. Gin consists of a B-tree index constructed over entries (ET, entries tree), where each entry is an element of the indexed value (element of array, lexeme -for tsvector) and where each tuple in a leaf page is either a pointer to a -B-tree over item pointers (PT, posting tree), or a list of item pointers +for tsvector) and where each tuple in a leaf page is either a pointer to a +B-tree over item pointers (PT, posting tree), or a list of item pointers (PL, posting list) if the tuple is small enough. Note: There is no delete operation for ET. The reason for this is that in our experience, the set of distinct words in a large corpus changes very rarely. This greatly simplifies the code and concurrency algorithms. -Gin comes with built-in support for one-dimensional arrays (eg. integer[], +Gin comes with built-in support for one-dimensional arrays (eg. integer[], text[]), but no support for NULL elements. The following operations are available: @@ -59,25 +59,25 @@ Gin Fuzzy Limit There are often situations when a full-text search returns a very large set of results. Since reading tuples from the disk and sorting them could take a -lot of time, this is unacceptable for production. (Note that the search +lot of time, this is unacceptable for production. (Note that the search itself is very fast.) -Such queries usually contain very frequent lexemes, so the results are not -very helpful. To facilitate execution of such queries Gin has a configurable -soft upper limit on the size of the returned set, determined by the -'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no +Such queries usually contain very frequent lexemes, so the results are not +very helpful. To facilitate execution of such queries Gin has a configurable +soft upper limit on the size of the returned set, determined by the +'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no limit). If a non-zero search limit is set, then the returned set is a subset of the whole result set, chosen at random. "Soft" means that the actual number of returned results could slightly differ -from the specified limit, depending on the query and the quality of the +from the specified limit, depending on the query and the quality of the system's random number generator. From experience, a value of 'gin_fuzzy_search_limit' in the thousands (eg. 5000-20000) works well. This means that 'gin_fuzzy_search_limit' will -have no effect for queries returning a result set with less tuples than this +have no effect for queries returning a result set with less tuples than this number. Limitations @@ -115,5 +115,5 @@ Distant future: Authors ------- -All work was done by Teodor Sigaev (teodor@sigaev.ru) and Oleg Bartunov +All work was done by Teodor Sigaev (teodor@sigaev.ru) and Oleg Bartunov (oleg@sai.msu.su). diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README index b613a4831f..66d559d33b 100644 --- a/src/backend/access/gist/README +++ b/src/backend/access/gist/README @@ -24,21 +24,21 @@ The current implementation of GiST supports: * Concurrency * Recovery support via WAL logging -The support for concurrency implemented in PostgreSQL was developed based on -the paper "Access Methods for Next-Generation Database Systems" by +The support for concurrency implemented in PostgreSQL was developed based on +the paper "Access Methods for Next-Generation Database Systems" by Marcel Kornaker: http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz The original algorithms were modified in several ways: -* They should be adapted to PostgreSQL conventions. For example, the SEARCH - algorithm was considerably changed, because in PostgreSQL function search - should return one tuple (next), not all tuples at once. Also, it should +* They should be adapted to PostgreSQL conventions. For example, the SEARCH + algorithm was considerably changed, because in PostgreSQL function search + should return one tuple (next), not all tuples at once. Also, it should release page locks between calls. -* Since we added support for variable length keys, it's not possible to - guarantee enough free space for all keys on pages after splitting. User - defined function picksplit doesn't have information about size of tuples +* Since we added support for variable length keys, it's not possible to + guarantee enough free space for all keys on pages after splitting. User + defined function picksplit doesn't have information about size of tuples (each tuple may contain several keys as in multicolumn index while picksplit could work with only one key) and pages. * We modified original INSERT algorithm for performance reason. In particular, @@ -67,7 +67,7 @@ gettuple(search-pred) ptr = top of stack while(true) latch( ptr->page, S-mode ) - if ( ptr->page->lsn != ptr->lsn ) + if ( ptr->page->lsn != ptr->lsn ) ptr->lsn = ptr->page->lsn currentposition=0 if ( ptr->parentlsn < ptr->page->nsn ) @@ -88,7 +88,7 @@ gettuple(search-pred) else if ( ptr->page is leaf ) unlatch( ptr->page ) return tuple - else + else add to stack child page end currentposition++ @@ -99,20 +99,20 @@ gettuple(search-pred) Insert Algorithm ---------------- -INSERT guarantees that the GiST tree remains balanced. User defined key method -Penalty is used for choosing a subtree to insert; method PickSplit is used for -the node splitting algorithm; method Union is used for propagating changes +INSERT guarantees that the GiST tree remains balanced. User defined key method +Penalty is used for choosing a subtree to insert; method PickSplit is used for +the node splitting algorithm; method Union is used for propagating changes upward to maintain the tree properties. -NOTICE: We modified original INSERT algorithm for performance reason. In +NOTICE: We modified original INSERT algorithm for performance reason. In particularly, it is now a single-pass algorithm. -Function findLeaf is used to identify subtree for insertion. Page, in which -insertion is proceeded, is locked as well as its parent page. Functions -findParent and findPath are used to find parent pages, which could be changed -because of concurrent access. Function pageSplit is recurrent and could split -page by more than 2 pages, which could be necessary if keys have different -lengths or more than one key are inserted (in such situation, user defined +Function findLeaf is used to identify subtree for insertion. Page, in which +insertion is proceeded, is locked as well as its parent page. Functions +findParent and findPath are used to find parent pages, which could be changed +because of concurrent access. Function pageSplit is recurrent and could split +page by more than 2 pages, which could be necessary if keys have different +lengths or more than one key are inserted (in such situation, user defined function pickSplit cannot guarantee free space on page). findLeaf(new-key) @@ -143,7 +143,7 @@ findLeaf(new-key) end findPath( stack item ) - push stack, [root, 0, 0] // page, LSN, parent + push stack, [root, 0, 0] // page, LSN, parent while( stack ) ptr = top of stack latch( ptr->page, S-mode ) @@ -152,7 +152,7 @@ findPath( stack item ) end for( each tuple on page ) if ( tuple->pagepointer == item->page ) - return stack + return stack else add to stack at the end [tuple->pagepointer,0, ptr] end @@ -160,12 +160,12 @@ findPath( stack item ) unlatch( ptr->page ) pop stack end - + findParent( stack item ) parent = item->parent latch( parent->page, X-mode ) if ( parent->page->lsn != parent->lsn ) - while(true) + while(true) search parent tuple on parent->page, if found the return rightlink = parent->page->rightlink unlatch( parent->page ) @@ -214,7 +214,7 @@ placetopage(page, keysarray) keysarray = [ union(keysarray) ] end end - + insert(new-key) stack = findLeaf(new-key) keysarray = [new-key] @@ -236,4 +236,4 @@ insert(new-key) Authors: Teodor Sigaev - Oleg Bartunov + Oleg Bartunov diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index de0da704a3..561ffbb9d4 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -154,7 +154,7 @@ even pages that don't contain any deletable tuples. This guarantees that the btbulkdelete call cannot return while any indexscan is still holding a copy of a deleted index tuple. Note that this requirement does not say that btbulkdelete must visit the pages in any particular order. (See also -on-the-fly deletion, below.) +on-the-fly deletion, below.) There is no such interlocking for deletion of items in internal pages, since backends keep no lock nor pin on a page they have descended past. diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 70f4cc5d2e..ede6ceb6af 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5608,7 +5608,7 @@ GetLatestXTime(void) * Returns timestamp of latest processed commit/abort record. * * When the server has been started normally without recovery the function - * returns NULL. + * returns NULL. */ Datum pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS) diff --git a/src/backend/bootstrap/Makefile b/src/backend/bootstrap/Makefile index 4d68649ccc..a77d864800 100644 --- a/src/backend/bootstrap/Makefile +++ b/src/backend/bootstrap/Makefile @@ -12,7 +12,7 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) -OBJS= bootparse.o bootstrap.o +OBJS= bootparse.o bootstrap.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql index def273d3c0..8d9790d196 100644 --- a/src/backend/catalog/information_schema.sql +++ b/src/backend/catalog/information_schema.sql @@ -1269,7 +1269,7 @@ GRANT SELECT ON role_routine_grants TO PUBLIC; -- not tracked by PostgreSQL -/* +/* * 5.47 * ROUTINE_SEQUENCE_USAGE view */ @@ -1385,7 +1385,7 @@ CREATE VIEW routines AS CAST(null AS sql_identifier) AS result_cast_scope_schema, CAST(null AS sql_identifier) AS result_cast_scope_name, CAST(null AS cardinal_number) AS result_cast_maximum_cardinality, - CAST(null AS sql_identifier) AS result_cast_dtd_identifier + CAST(null AS sql_identifier) AS result_cast_dtd_identifier FROM pg_namespace n, pg_proc p, pg_language l, pg_type t, pg_namespace nt @@ -2323,7 +2323,7 @@ CREATE VIEW element_types AS CAST(null AS cardinal_number) AS datetime_precision, CAST(null AS character_data) AS interval_type, CAST(null AS character_data) AS interval_precision, - + CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard CAST(current_database() AS sql_identifier) AS udt_catalog, diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 5e23fea705..6a37e61dba 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -552,7 +552,7 @@ object_exists(ObjectAddress address) else { found = ((Form_pg_attribute) GETSTRUCT(atttup))->attisdropped; - ReleaseSysCache(atttup); + ReleaseSysCache(atttup); } return found; } @@ -654,5 +654,5 @@ object_exists(ObjectAddress address) found = HeapTupleIsValid(systable_getnext(sd)); systable_endscan(sd); heap_close(rel, AccessShareLock); - return found; + return found; } diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 82788fa14a..346eaaf892 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -6,8 +6,8 @@ * src/backend/catalog/system_views.sql */ -CREATE VIEW pg_roles AS - SELECT +CREATE VIEW pg_roles AS + SELECT rolname, rolsuper, rolinherit, @@ -47,72 +47,72 @@ CREATE VIEW pg_group AS FROM pg_authid WHERE NOT rolcanlogin; -CREATE VIEW pg_user AS - SELECT - usename, - usesysid, - usecreatedb, - usesuper, - usecatupd, - '********'::text as passwd, - valuntil, - useconfig +CREATE VIEW pg_user AS + SELECT + usename, + usesysid, + usecreatedb, + usesuper, + usecatupd, + '********'::text as passwd, + valuntil, + useconfig FROM pg_shadow; -CREATE VIEW pg_rules AS - SELECT - N.nspname AS schemaname, - C.relname AS tablename, - R.rulename AS rulename, - pg_get_ruledef(R.oid) AS definition - FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class)) - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_rules AS + SELECT + N.nspname AS schemaname, + C.relname AS tablename, + R.rulename AS rulename, + pg_get_ruledef(R.oid) AS definition + FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class)) + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE R.rulename != '_RETURN'; -CREATE VIEW pg_views AS - SELECT - N.nspname AS schemaname, - C.relname AS viewname, - pg_get_userbyid(C.relowner) AS viewowner, - pg_get_viewdef(C.oid) AS definition - FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_views AS + SELECT + N.nspname AS schemaname, + C.relname AS viewname, + pg_get_userbyid(C.relowner) AS viewowner, + pg_get_viewdef(C.oid) AS definition + FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'v'; -CREATE VIEW pg_tables AS - SELECT - N.nspname AS schemaname, - C.relname AS tablename, - pg_get_userbyid(C.relowner) AS tableowner, +CREATE VIEW pg_tables AS + SELECT + N.nspname AS schemaname, + C.relname AS tablename, + pg_get_userbyid(C.relowner) AS tableowner, T.spcname AS tablespace, - C.relhasindex AS hasindexes, - C.relhasrules AS hasrules, - C.relhastriggers AS hastriggers - FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) + C.relhasindex AS hasindexes, + C.relhasrules AS hasrules, + C.relhastriggers AS hastriggers + FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) LEFT JOIN pg_tablespace T ON (T.oid = C.reltablespace) WHERE C.relkind = 'r'; -CREATE VIEW pg_indexes AS - SELECT - N.nspname AS schemaname, - C.relname AS tablename, - I.relname AS indexname, +CREATE VIEW pg_indexes AS + SELECT + N.nspname AS schemaname, + C.relname AS tablename, + I.relname AS indexname, T.spcname AS tablespace, - pg_get_indexdef(I.oid) AS indexdef - FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid) - JOIN pg_class I ON (I.oid = X.indexrelid) - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) + pg_get_indexdef(I.oid) AS indexdef + FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid) + JOIN pg_class I ON (I.oid = X.indexrelid) + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) LEFT JOIN pg_tablespace T ON (T.oid = I.reltablespace) WHERE C.relkind = 'r' AND I.relkind = 'i'; -CREATE VIEW pg_stats AS - SELECT - nspname AS schemaname, - relname AS tablename, - attname AS attname, - stainherit AS inherited, - stanullfrac AS null_frac, - stawidth AS avg_width, - stadistinct AS n_distinct, +CREATE VIEW pg_stats AS + SELECT + nspname AS schemaname, + relname AS tablename, + attname AS attname, + stainherit AS inherited, + stanullfrac AS null_frac, + stawidth AS avg_width, + stadistinct AS n_distinct, CASE WHEN stakind1 IN (1, 4) THEN stavalues1 WHEN stakind2 IN (1, 4) THEN stavalues2 @@ -137,14 +137,14 @@ CREATE VIEW pg_stats AS WHEN stakind3 = 3 THEN stanumbers3[1] WHEN stakind4 = 3 THEN stanumbers4[1] END AS correlation - FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid) - JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum) - LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) + FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid) + JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum) + LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) WHERE NOT attisdropped AND has_column_privilege(c.oid, a.attnum, 'select'); REVOKE ALL on pg_statistic FROM public; -CREATE VIEW pg_locks AS +CREATE VIEW pg_locks AS SELECT * FROM pg_lock_status() AS L; CREATE VIEW pg_cursors AS @@ -268,16 +268,16 @@ FROM WHERE l.objsubid = 0; -CREATE VIEW pg_settings AS - SELECT * FROM pg_show_all_settings() AS A; +CREATE VIEW pg_settings AS + SELECT * FROM pg_show_all_settings() AS A; -CREATE RULE pg_settings_u AS - ON UPDATE TO pg_settings - WHERE new.name = old.name DO +CREATE RULE pg_settings_u AS + ON UPDATE TO pg_settings + WHERE new.name = old.name DO SELECT set_config(old.name, new.setting, 'f'); -CREATE RULE pg_settings_n AS - ON UPDATE TO pg_settings +CREATE RULE pg_settings_n AS + ON UPDATE TO pg_settings DO INSTEAD NOTHING; GRANT SELECT, UPDATE ON pg_settings TO PUBLIC; @@ -290,21 +290,21 @@ CREATE VIEW pg_timezone_names AS -- Statistics views -CREATE VIEW pg_stat_all_tables AS - SELECT - C.oid AS relid, - N.nspname AS schemaname, - C.relname AS relname, - pg_stat_get_numscans(C.oid) AS seq_scan, - pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, - sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, +CREATE VIEW pg_stat_all_tables AS + SELECT + C.oid AS relid, + N.nspname AS schemaname, + C.relname AS relname, + pg_stat_get_numscans(C.oid) AS seq_scan, + pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, + sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, sum(pg_stat_get_tuples_fetched(I.indexrelid))::bigint + - pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, + pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, + pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, pg_stat_get_tuples_deleted(C.oid) AS n_tup_del, pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd, - pg_stat_get_live_tuples(C.oid) AS n_live_tup, + pg_stat_get_live_tuples(C.oid) AS n_live_tup, pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, @@ -314,9 +314,9 @@ CREATE VIEW pg_stat_all_tables AS pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count, pg_stat_get_analyze_count(C.oid) AS analyze_count, pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count - FROM pg_class C LEFT JOIN - pg_index I ON C.oid = I.indrelid - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) + FROM pg_class C LEFT JOIN + pg_index I ON C.oid = I.indrelid + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind IN ('r', 't') GROUP BY C.oid, N.nspname, C.relname; @@ -340,8 +340,8 @@ CREATE VIEW pg_stat_xact_all_tables AS WHERE C.relkind IN ('r', 't') GROUP BY C.oid, N.nspname, C.relname; -CREATE VIEW pg_stat_sys_tables AS - SELECT * FROM pg_stat_all_tables +CREATE VIEW pg_stat_sys_tables AS + SELECT * FROM pg_stat_all_tables WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; @@ -350,8 +350,8 @@ CREATE VIEW pg_stat_xact_sys_tables AS WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; -CREATE VIEW pg_stat_user_tables AS - SELECT * FROM pg_stat_all_tables +CREATE VIEW pg_stat_user_tables AS + SELECT * FROM pg_stat_all_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; @@ -360,117 +360,117 @@ CREATE VIEW pg_stat_xact_user_tables AS WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; -CREATE VIEW pg_statio_all_tables AS - SELECT - C.oid AS relid, - N.nspname AS schemaname, - C.relname AS relname, - pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, - pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, - sum(pg_stat_get_blocks_fetched(I.indexrelid) - - pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, - sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, - pg_stat_get_blocks_fetched(T.oid) - - pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, - pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, - pg_stat_get_blocks_fetched(X.oid) - - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit - FROM pg_class C LEFT JOIN - pg_index I ON C.oid = I.indrelid LEFT JOIN - pg_class T ON C.reltoastrelid = T.oid LEFT JOIN - pg_class X ON T.reltoastidxid = X.oid - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_statio_all_tables AS + SELECT + C.oid AS relid, + N.nspname AS schemaname, + C.relname AS relname, + pg_stat_get_blocks_fetched(C.oid) - + pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, + pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, + sum(pg_stat_get_blocks_fetched(I.indexrelid) - + pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, + sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, + pg_stat_get_blocks_fetched(T.oid) - + pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, + pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, + pg_stat_get_blocks_fetched(X.oid) - + pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, + pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit + FROM pg_class C LEFT JOIN + pg_index I ON C.oid = I.indrelid LEFT JOIN + pg_class T ON C.reltoastrelid = T.oid LEFT JOIN + pg_class X ON T.reltoastidxid = X.oid + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind IN ('r', 't') GROUP BY C.oid, N.nspname, C.relname, T.oid, X.oid; -CREATE VIEW pg_statio_sys_tables AS - SELECT * FROM pg_statio_all_tables +CREATE VIEW pg_statio_sys_tables AS + SELECT * FROM pg_statio_all_tables WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; -CREATE VIEW pg_statio_user_tables AS - SELECT * FROM pg_statio_all_tables +CREATE VIEW pg_statio_user_tables AS + SELECT * FROM pg_statio_all_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; -CREATE VIEW pg_stat_all_indexes AS - SELECT - C.oid AS relid, - I.oid AS indexrelid, - N.nspname AS schemaname, - C.relname AS relname, - I.relname AS indexrelname, - pg_stat_get_numscans(I.oid) AS idx_scan, - pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch - FROM pg_class C JOIN - pg_index X ON C.oid = X.indrelid JOIN - pg_class I ON I.oid = X.indexrelid - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_stat_all_indexes AS + SELECT + C.oid AS relid, + I.oid AS indexrelid, + N.nspname AS schemaname, + C.relname AS relname, + I.relname AS indexrelname, + pg_stat_get_numscans(I.oid) AS idx_scan, + pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, + pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch + FROM pg_class C JOIN + pg_index X ON C.oid = X.indrelid JOIN + pg_class I ON I.oid = X.indexrelid + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind IN ('r', 't'); -CREATE VIEW pg_stat_sys_indexes AS - SELECT * FROM pg_stat_all_indexes +CREATE VIEW pg_stat_sys_indexes AS + SELECT * FROM pg_stat_all_indexes WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; -CREATE VIEW pg_stat_user_indexes AS - SELECT * FROM pg_stat_all_indexes +CREATE VIEW pg_stat_user_indexes AS + SELECT * FROM pg_stat_all_indexes WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; -CREATE VIEW pg_statio_all_indexes AS - SELECT - C.oid AS relid, - I.oid AS indexrelid, - N.nspname AS schemaname, - C.relname AS relname, - I.relname AS indexrelname, - pg_stat_get_blocks_fetched(I.oid) - - pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, - pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit - FROM pg_class C JOIN - pg_index X ON C.oid = X.indrelid JOIN - pg_class I ON I.oid = X.indexrelid - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_statio_all_indexes AS + SELECT + C.oid AS relid, + I.oid AS indexrelid, + N.nspname AS schemaname, + C.relname AS relname, + I.relname AS indexrelname, + pg_stat_get_blocks_fetched(I.oid) - + pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, + pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit + FROM pg_class C JOIN + pg_index X ON C.oid = X.indrelid JOIN + pg_class I ON I.oid = X.indexrelid + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind IN ('r', 't'); -CREATE VIEW pg_statio_sys_indexes AS - SELECT * FROM pg_statio_all_indexes +CREATE VIEW pg_statio_sys_indexes AS + SELECT * FROM pg_statio_all_indexes WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; -CREATE VIEW pg_statio_user_indexes AS - SELECT * FROM pg_statio_all_indexes +CREATE VIEW pg_statio_user_indexes AS + SELECT * FROM pg_statio_all_indexes WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; -CREATE VIEW pg_statio_all_sequences AS - SELECT - C.oid AS relid, - N.nspname AS schemaname, - C.relname AS relname, - pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS blks_read, - pg_stat_get_blocks_hit(C.oid) AS blks_hit - FROM pg_class C - LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) +CREATE VIEW pg_statio_all_sequences AS + SELECT + C.oid AS relid, + N.nspname AS schemaname, + C.relname AS relname, + pg_stat_get_blocks_fetched(C.oid) - + pg_stat_get_blocks_hit(C.oid) AS blks_read, + pg_stat_get_blocks_hit(C.oid) AS blks_hit + FROM pg_class C + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'S'; -CREATE VIEW pg_statio_sys_sequences AS - SELECT * FROM pg_statio_all_sequences +CREATE VIEW pg_statio_sys_sequences AS + SELECT * FROM pg_statio_all_sequences WHERE schemaname IN ('pg_catalog', 'information_schema') OR schemaname ~ '^pg_toast'; -CREATE VIEW pg_statio_user_sequences AS - SELECT * FROM pg_statio_all_sequences +CREATE VIEW pg_statio_user_sequences AS + SELECT * FROM pg_statio_all_sequences WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND schemaname !~ '^pg_toast'; -CREATE VIEW pg_stat_activity AS - SELECT +CREATE VIEW pg_stat_activity AS + SELECT S.datid AS datid, D.datname AS datname, S.procpid, @@ -485,18 +485,18 @@ CREATE VIEW pg_stat_activity AS S.waiting, S.current_query FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U - WHERE S.datid = D.oid AND + WHERE S.datid = D.oid AND S.usesysid = U.oid; -CREATE VIEW pg_stat_database AS - SELECT - D.oid AS datid, - D.datname AS datname, - pg_stat_get_db_numbackends(D.oid) AS numbackends, - pg_stat_get_db_xact_commit(D.oid) AS xact_commit, - pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, - pg_stat_get_db_blocks_fetched(D.oid) - - pg_stat_get_db_blocks_hit(D.oid) AS blks_read, +CREATE VIEW pg_stat_database AS + SELECT + D.oid AS datid, + D.datname AS datname, + pg_stat_get_db_numbackends(D.oid) AS numbackends, + pg_stat_get_db_xact_commit(D.oid) AS xact_commit, + pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, + pg_stat_get_db_blocks_fetched(D.oid) - + pg_stat_get_db_blocks_hit(D.oid) AS blks_read, pg_stat_get_db_blocks_hit(D.oid) AS blks_hit, pg_stat_get_db_tuples_returned(D.oid) AS tup_returned, pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched, @@ -505,16 +505,16 @@ CREATE VIEW pg_stat_database AS pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted FROM pg_database D; -CREATE VIEW pg_stat_user_functions AS +CREATE VIEW pg_stat_user_functions AS SELECT - P.oid AS funcid, + P.oid AS funcid, N.nspname AS schemaname, P.proname AS funcname, pg_stat_get_function_calls(P.oid) AS calls, pg_stat_get_function_time(P.oid) / 1000 AS total_time, pg_stat_get_function_self_time(P.oid) / 1000 AS self_time FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace) - WHERE P.prolang != 12 -- fast check to eliminate built-in functions + WHERE P.prolang != 12 -- fast check to eliminate built-in functions AND pg_stat_get_function_calls(P.oid) IS NOT NULL; CREATE VIEW pg_stat_xact_user_functions AS @@ -580,7 +580,7 @@ CREATE FUNCTION ts_debug(IN config regconfig, IN document text, OUT lexemes text[]) RETURNS SETOF record AS $$ -SELECT +SELECT tt.alias AS alias, tt.description AS description, parse.token AS token, @@ -602,7 +602,7 @@ SELECT LIMIT 1 ) AS lexemes FROM pg_catalog.ts_parse( - (SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2 + (SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2 ) AS parse, pg_catalog.ts_token_type( (SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ) diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 4ae161a625..b578818b4f 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -208,7 +208,7 @@ CommentObject(CommentStmt *stmt) * catalog. Comments on all other objects are recorded in pg_description. */ if (stmt->objtype == OBJECT_DATABASE || stmt->objtype == OBJECT_TABLESPACE - || stmt->objtype == OBJECT_ROLE) + || stmt->objtype == OBJECT_ROLE) CreateSharedComments(address.objectId, address.classId, stmt->comment); else CreateComments(address.objectId, address.classId, address.objectSubId, diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 98110dfd2a..3ffd10b143 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -2064,7 +2064,7 @@ CopyFrom(CopyState cstate) done = true; break; } - + if (fld_count == -1) { /* diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index f494ec98e5..a5e44c046f 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -1191,7 +1191,7 @@ ExplainNode(PlanState *planstate, List *ancestors, { ExplainOpenGroup("Plans", "Plans", false, es); /* Pass current PlanState as head of ancestors list for children */ - ancestors = lcons(planstate, ancestors); + ancestors = lcons(planstate, ancestors); } /* initPlan-s */ @@ -1251,7 +1251,7 @@ ExplainNode(PlanState *planstate, List *ancestors, /* end of child plans */ if (haschildren) { - ancestors = list_delete_first(ancestors); + ancestors = list_delete_first(ancestors); ExplainCloseGroup("Plans", "Plans", false, es); } diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 590eee5dec..305ac46b40 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -608,7 +608,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) errmsg("could not remove symbolic link \"%s\": %m", linkloc))); } - + /* * Create the symlink under PGDATA */ diff --git a/src/backend/libpq/README.SSL b/src/backend/libpq/README.SSL index d3b6e831ce..53dc9dd005 100644 --- a/src/backend/libpq/README.SSL +++ b/src/backend/libpq/README.SSL @@ -28,7 +28,7 @@ SSL | | Normal startup - + diff --git a/src/backend/nodes/README b/src/backend/nodes/README index f4034be276..95de7a1e2a 100644 --- a/src/backend/nodes/README +++ b/src/backend/nodes/README @@ -40,7 +40,7 @@ FILES IN src/include/nodes/ relation.h - planner internal nodes execnodes.h - executor nodes memnodes.h - memory nodes - pg_list.h - generic list + pg_list.h - generic list Steps to Add a Node @@ -69,7 +69,7 @@ Suppose you wanna define a node Foo: Historical Note --------------- -Prior to the current simple C structure definitions, the Node structures +Prior to the current simple C structure definitions, the Node structures used a pseudo-inheritance system which automatically generated creator and accessor functions. Since every node inherited from LispValue, the whole thing was a mess. Here's a little anecdote: diff --git a/src/backend/optimizer/plan/README b/src/backend/optimizer/plan/README index e52e3d34e7..013c0f9ea2 100644 --- a/src/backend/optimizer/plan/README +++ b/src/backend/optimizer/plan/README @@ -37,19 +37,19 @@ This is some implementation notes and opened issues... First, implementation uses new type of parameters - PARAM_EXEC - to deal with correlation Vars. When query_planner() is called, it first tries to -replace all upper queries Var referenced in current query with Param of -this type. Some global variables are used to keep mapping of Vars to -Params and Params to Vars. +replace all upper queries Var referenced in current query with Param of +this type. Some global variables are used to keep mapping of Vars to +Params and Params to Vars. -After this, all current query' SubLinks are processed: for each SubLink -found in query' qual union_planner() (old planner() function) will be -called to plan corresponding subselect (union_planner() calls -query_planner() for "simple" query and supports UNIONs). After subselect -are planned, optimizer knows about is this correlated, un-correlated or -_undirect_ correlated (references some grand-parent Vars but no parent -ones: uncorrelated from the parent' point of view) query. +After this, all current query' SubLinks are processed: for each SubLink +found in query' qual union_planner() (old planner() function) will be +called to plan corresponding subselect (union_planner() calls +query_planner() for "simple" query and supports UNIONs). After subselect +are planned, optimizer knows about is this correlated, un-correlated or +_undirect_ correlated (references some grand-parent Vars but no parent +ones: uncorrelated from the parent' point of view) query. -For uncorrelated and undirect correlated subqueries of EXPRession or +For uncorrelated and undirect correlated subqueries of EXPRession or EXISTS type SubLinks will be replaced with "normal" clauses from SubLink->Oper list (I changed this list to be list of EXPR nodes, not just Oper ones). Right sides of these nodes are replaced with @@ -81,7 +81,7 @@ plan->qual) - to initialize them and let them know about changed Params (from the list of their "interests"). After all SubLinks are processed, query_planner() calls qual' -canonificator and does "normal" work. By using Params optimizer +canonificator and does "normal" work. By using Params optimizer is mostly unchanged. Well, Executor. To get subplans re-evaluated without ExecutorStart() @@ -91,7 +91,7 @@ on each call) ExecReScan() now supports most of Plan types... Explanation of EXPLAIN. -vac=> explain select * from tmp where x >= (select max(x2) from test2 +vac=> explain select * from tmp where x >= (select max(x2) from test2 where y2 = y and exists (select * from tempx where tx = x)); NOTICE: QUERY PLAN: @@ -128,17 +128,17 @@ Opened issues. for each parent tuple - very slow... Results of some test. TMP is table with x,y (int4-s), x in 0-9, -y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table +y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table with x2, y2 (int4-s), x2 in 1-99, y2 = 100 -x2, 10000 tuples (100 dups). - Trying + Trying select * from tmp where x >= (select max(x2) from test2 where y2 = y); - + and begin; -select y as ty, max(x2) as mx into table tsub from test2, tmp +select y as ty, max(x2) as mx into table tsub from test2, tmp where y2 = y group by ty; vacuum tsub; select x, y from tmp, tsub where x >= mx and y = ty; diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l index 09eac791c3..1fe2b9bcf3 100644 --- a/src/backend/parser/scan.l +++ b/src/backend/parser/scan.l @@ -247,8 +247,8 @@ xqinside [^']+ /* $foo$ style quotes ("dollar quoting") * The quoted string starts with $foo$ where "foo" is an optional string - * in the form of an identifier, except that it may not contain "$", - * and extends to the first occurrence of an identical string. + * in the form of an identifier, except that it may not contain "$", + * and extends to the first occurrence of an identical string. * There is *no* processing of the quoted text. * * {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim} @@ -334,7 +334,7 @@ self [,()\[\].;\:\+\-\*\/\%\^\<\>\=] op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=] operator {op_chars}+ -/* we no longer allow unary minus in numbers. +/* we no longer allow unary minus in numbers. * instead we pass it separately to parser. there it gets * coerced via doNegate() -- Leon aug 20 1999 * diff --git a/src/backend/port/Makefile b/src/backend/port/Makefile index 44e873bebd..8ebb6d5703 100644 --- a/src/backend/port/Makefile +++ b/src/backend/port/Makefile @@ -4,13 +4,13 @@ # Makefile for the port-specific subsystem of the backend # # We have two different modes of operation: 1) put stuff specific to Port X -# in subdirectory X and have that subdirectory's make file make it all, and +# in subdirectory X and have that subdirectory's make file make it all, and # 2) use conditional statements in the present make file to include what's # necessary for a specific port in our own output. (1) came first, but (2) # is superior for many things, like when the same thing needs to be done for -# multiple ports and you don't want to duplicate files in multiple +# multiple ports and you don't want to duplicate files in multiple # subdirectories. Much of the stuff done via Method 1 today should probably -# be converted to Method 2. +# be converted to Method 2. # # IDENTIFICATION # src/backend/port/Makefile diff --git a/src/backend/port/aix/mkldexport.sh b/src/backend/port/aix/mkldexport.sh index 070423ba9b..adf3793e86 100755 --- a/src/backend/port/aix/mkldexport.sh +++ b/src/backend/port/aix/mkldexport.sh @@ -9,13 +9,13 @@ # mkldexport objectfile [location] # where # objectfile is the current location of the object file. -# location is the eventual (installed) location of the +# location is the eventual (installed) location of the # object file (if different from the current # working directory). # # [This file comes from the Postgres 4.2 distribution. - ay 7/95] # -# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp +# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp # # setting this to nm -B might be better diff --git a/src/backend/port/darwin/README b/src/backend/port/darwin/README index c8be401beb..2d9df79683 100644 --- a/src/backend/port/darwin/README +++ b/src/backend/port/darwin/README @@ -16,7 +16,7 @@ that a backend attempting to execute CREATE DATABASE core-dumps.) I would love to know why there is a discrepancy between the published source and the actual behavior --- tgl 7-Nov-2001. -Appropriate bug reports have been filed with Apple --- see +Appropriate bug reports have been filed with Apple --- see Radar Bug#s 2767956, 2683531, 2805147. One hopes we can retire this kluge in the not too distant future. diff --git a/src/backend/port/tas/sunstudio_sparc.s b/src/backend/port/tas/sunstudio_sparc.s index 8c655875ec..c8c20e747a 100644 --- a/src/backend/port/tas/sunstudio_sparc.s +++ b/src/backend/port/tas/sunstudio_sparc.s @@ -24,14 +24,14 @@ .global pg_atomic_cas pg_atomic_cas: - + ! "cas" only works on sparcv9 and sparcv8plus chips, and ! requies a compiler targeting these CPUs. It will fail ! on a compiler targeting sparcv8, and of course will not ! be understood by a sparcv8 CPU. gcc continues to use ! "ldstub" because it targets sparcv7. ! - ! There is actually a trick for embedding "cas" in a + ! There is actually a trick for embedding "cas" in a ! sparcv8-targeted compiler, but it can only be run ! on a sparcv8plus/v9 cpus: ! diff --git a/src/backend/snowball/Makefile b/src/backend/snowball/Makefile index 054880866d..c528be9d53 100644 --- a/src/backend/snowball/Makefile +++ b/src/backend/snowball/Makefile @@ -83,7 +83,7 @@ include $(top_srcdir)/src/Makefile.shlib $(SQLSCRIPT): Makefile snowball_func.sql.in snowball.sql.in ifeq ($(enable_shared), yes) echo '-- Language-specific snowball dictionaries' > $@ - cat $(srcdir)/snowball_func.sql.in >> $@ + cat $(srcdir)/snowball_func.sql.in >> $@ @set -e; \ set $(LANGUAGES) ; \ while [ "$$#" -gt 0 ] ; \ diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README index 3b46094623..38e67c1c90 100644 --- a/src/backend/storage/buffer/README +++ b/src/backend/storage/buffer/README @@ -264,7 +264,7 @@ while scanning the buffers. (This is a very substantial improvement in the contention cost of the writer compared to PG 8.0.) During a checkpoint, the writer's strategy must be to write every dirty -buffer (pinned or not!). We may as well make it start this scan from +buffer (pinned or not!). We may as well make it start this scan from NextVictimBuffer, however, so that the first-to-be-written pages are the ones that backends might otherwise have to write for themselves soon. diff --git a/src/backend/storage/freespace/README b/src/backend/storage/freespace/README index b3b0e3a680..d591cbb585 100644 --- a/src/backend/storage/freespace/README +++ b/src/backend/storage/freespace/README @@ -84,7 +84,7 @@ backends are concurrently inserting into a relation, contention can be avoided by having them insert into different pages. But it is also desirable to fill up pages in sequential order, to get the benefit of OS prefetching and batched writes. The FSM is responsible for making that happen, and the next slot -pointer helps provide the desired behavior. +pointer helps provide the desired behavior. Higher-level structure ---------------------- diff --git a/src/backend/storage/ipc/README b/src/backend/storage/ipc/README index a56729db1a..913a4dab2b 100644 --- a/src/backend/storage/ipc/README +++ b/src/backend/storage/ipc/README @@ -7,7 +7,7 @@ Mon Jul 18 11:09:22 PDT 1988 W.KLAS The cache synchronization is done using a message queue. Every backend can register a message which then has to be read by -all backends. A message read by all backends is removed from the +all backends. A message read by all backends is removed from the queue automatically. If a message has been lost because the buffer was full, all backends that haven't read this message will be told that they have to reset their cache state. This is done diff --git a/src/backend/storage/lmgr/Makefile b/src/backend/storage/lmgr/Makefile index b0bfe66fe6..9aa9a5c086 100644 --- a/src/backend/storage/lmgr/Makefile +++ b/src/backend/storage/lmgr/Makefile @@ -27,5 +27,5 @@ s_lock_test: s_lock.c $(top_builddir)/src/port/libpgport.a check: s_lock_test ./s_lock_test -clean distclean maintainer-clean: +clean distclean maintainer-clean: rm -f s_lock_test diff --git a/src/backend/storage/lmgr/README b/src/backend/storage/lmgr/README index 0358594bad..87cae18cb6 100644 --- a/src/backend/storage/lmgr/README +++ b/src/backend/storage/lmgr/README @@ -31,7 +31,7 @@ arrival order. There is no timeout. * Regular locks (a/k/a heavyweight locks). The regular lock manager supports a variety of lock modes with table-driven semantics, and it has -full deadlock detection and automatic release at transaction end. +full deadlock detection and automatic release at transaction end. Regular locks should be used for all user-driven lock requests. Acquisition of either a spinlock or a lightweight lock causes query @@ -260,7 +260,7 @@ A key design consideration is that we want to make routine operations (lock grant and release) run quickly when there is no deadlock, and avoid the overhead of deadlock handling as much as possible. We do this using an "optimistic waiting" approach: if a process cannot acquire the -lock it wants immediately, it goes to sleep without any deadlock check. +lock it wants immediately, it goes to sleep without any deadlock check. But it also sets a delay timer, with a delay of DeadlockTimeout milliseconds (typically set to one second). If the delay expires before the process is granted the lock it wants, it runs the deadlock diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index ce0b7586c8..e10457797e 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -423,8 +423,8 @@ TParserCopyClose(TParser *prs) * Character-type support functions, equivalent to is* macros, but * working with any possible encodings and locales. Notes: * - with multibyte encoding and C-locale isw* function may fail - * or give wrong result. - * - multibyte encoding and C-locale often are used for + * or give wrong result. + * - multibyte encoding and C-locale often are used for * Asian languages. * - if locale is C the we use pgwstr instead of wstr */ @@ -761,8 +761,8 @@ p_isURLPath(TParser *prs) /* * returns true if current character has zero display length or * it's a special sign in several languages. Such characters - * aren't a word-breaker although they aren't an isalpha. - * In beginning of word they aren't a part of it. + * aren't a word-breaker although they aren't an isalpha. + * In beginning of word they aren't a part of it. */ static int p_isspecial(TParser *prs) @@ -2099,7 +2099,7 @@ hlCover(HeadlineParsedText *prs, TSQuery query, int *p, int *q) return false; } -static void +static void mark_fragment(HeadlineParsedText *prs, int highlight, int startpos, int endpos) { int i; @@ -2125,7 +2125,7 @@ mark_fragment(HeadlineParsedText *prs, int highlight, int startpos, int endpos) } } -typedef struct +typedef struct { int4 startpos; int4 endpos; @@ -2135,16 +2135,16 @@ typedef struct int2 excluded; } CoverPos; -static void +static void get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos, int *curlen, int *poslen, int max_words) { int i; - /* Objective: Generate a fragment of words between startpos and endpos - * such that it has at most max_words and both ends has query words. - * If the startpos and endpos are the endpoints of the cover and the - * cover has fewer words than max_words, then this function should - * just return the cover + /* Objective: Generate a fragment of words between startpos and endpos + * such that it has at most max_words and both ends has query words. + * If the startpos and endpos are the endpoints of the cover and the + * cover has fewer words than max_words, then this function should + * just return the cover */ /* first move startpos to an item */ for(i = *startpos; i <= *endpos; i++) @@ -2156,14 +2156,14 @@ get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos, /* cut endpos to have only max_words */ *curlen = 0; *poslen = 0; - for(i = *startpos; i <= *endpos && *curlen < max_words; i++) + for(i = *startpos; i <= *endpos && *curlen < max_words; i++) { if (!NONWORDTOKEN(prs->words[i].type)) *curlen += 1; if (prs->words[i].item && !prs->words[i].repeated) *poslen += 1; } - /* if the cover was cut then move back endpos to a query item */ + /* if the cover was cut then move back endpos to a query item */ if (*endpos > i) { *endpos = i; @@ -2174,31 +2174,31 @@ get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos, break; if (!NONWORDTOKEN(prs->words[i].type)) *curlen -= 1; - } - } + } + } } static void mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, - int shortword, int min_words, + int shortword, int min_words, int max_words, int max_fragments) { int4 poslen, curlen, i, f, num_f = 0; int4 stretch, maxstretch, posmarker; - int4 startpos = 0, - endpos = 0, + int4 startpos = 0, + endpos = 0, p = 0, q = 0; - int4 numcovers = 0, + int4 numcovers = 0, maxcovers = 32; int4 minI, minwords, maxitems; CoverPos *covers; covers = palloc(maxcovers * sizeof(CoverPos)); - + /* get all covers */ while (hlCover(prs, query, &p, &q)) { @@ -2207,7 +2207,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, /* Break the cover into smaller fragments such that each fragment * has at most max_words. Also ensure that each end of the fragment - * is a query word. This will allow us to stretch the fragment in + * is a query word. This will allow us to stretch the fragment in * either direction */ @@ -2228,9 +2228,9 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, numcovers ++; startpos = endpos + 1; endpos = q; - } + } /* move p to generate the next cover */ - p++; + p++; } /* choose best covers */ @@ -2240,13 +2240,13 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, minwords = 0x7fffffff; minI = -1; /* Choose the cover that contains max items. - * In case of tie choose the one with smaller - * number of words. + * In case of tie choose the one with smaller + * number of words. */ for (i = 0; i < numcovers; i ++) { - if (!covers[i].in && !covers[i].excluded && - (maxitems < covers[i].poslen || (maxitems == covers[i].poslen + if (!covers[i].in && !covers[i].excluded && + (maxitems < covers[i].poslen || (maxitems == covers[i].poslen && minwords > covers[i].curlen))) { maxitems = covers[i].poslen; @@ -2263,15 +2263,15 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, endpos = covers[minI].endpos; curlen = covers[minI].curlen; /* stretch the cover if cover size is lower than max_words */ - if (curlen < max_words) + if (curlen < max_words) { /* divide the stretch on both sides of cover */ maxstretch = (max_words - curlen)/2; - /* first stretch the startpos - * stop stretching if - * 1. we hit the beginning of document - * 2. exceed maxstretch - * 3. we hit an already marked fragment + /* first stretch the startpos + * stop stretching if + * 1. we hit the beginning of document + * 2. exceed maxstretch + * 3. we hit an already marked fragment */ stretch = 0; posmarker = startpos; @@ -2297,7 +2297,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, { if (!NONWORDTOKEN(prs->words[i].type)) curlen ++; - posmarker = i; + posmarker = i; } /* cut back endpos till we find a non-short token */ for ( i = posmarker; i > endpos && (NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword); i--) @@ -2316,7 +2316,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, /* exclude overlapping covers */ for (i = 0; i < numcovers; i ++) { - if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos))) + if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos))) covers[i].excluded = 1; } } @@ -2340,7 +2340,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight, } static void -mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight, +mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight, int shortword, int min_words, int max_words) { int p = 0, @@ -2552,7 +2552,7 @@ prsd_headline(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("MaxFragments should be >= 0"))); - } + } if (max_fragments == 0) /* call the default headline generator */ diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl index 57cc5f70ff..797324dcf3 100644 --- a/src/backend/utils/Gen_fmgrtab.pl +++ b/src/backend/utils/Gen_fmgrtab.pl @@ -93,7 +93,7 @@ my $tabfile = $output_path . 'fmgrtab.c'; open H, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!"; open T, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!"; -print H +print H qq|/*------------------------------------------------------------------------- * * fmgroids.h diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 9ae6492982..5d29cf6533 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -102,7 +102,7 @@ typedef int16 NumericDigit; * remaining bits are never examined. Currently, we always initialize these * to zero, but it might be possible to use them for some other purpose in * the future. - * + * * In the NumericShort format, the remaining 14 bits of the header word * (n_short.n_header) are allocated as follows: 1 for sign (positive or * negative), 6 for dynamic scale, and 7 for weight. In practice, most @@ -3725,7 +3725,7 @@ make_result(NumericVar *var) len = NUMERIC_HDRSZ_SHORT + n * sizeof(NumericDigit); result = (Numeric) palloc(len); SET_VARSIZE(result, len); - result->choice.n_short.n_header = + result->choice.n_short.n_header = (sign == NUMERIC_NEG ? (NUMERIC_SHORT | NUMERIC_SHORT_SIGN_MASK) : NUMERIC_SHORT) | (var->dscale << NUMERIC_SHORT_DSCALE_SHIFT) diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 15b6d7c4fd..ee83e2f308 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -3054,7 +3054,7 @@ text_to_array_internal(PG_FUNCTION_ARGS) int start_posn; int end_posn; int chunk_len; - + text_position_setup(inputstring, fldsep, &state); /* @@ -3085,7 +3085,7 @@ text_to_array_internal(PG_FUNCTION_ARGS) PointerGetDatum(inputstring), is_null, 1)); } - + start_posn = 1; /* start_ptr points to the start_posn'th character of inputstring */ start_ptr = VARDATA_ANY(inputstring); @@ -3110,7 +3110,7 @@ text_to_array_internal(PG_FUNCTION_ARGS) /* must build a temp text datum to pass to accumArrayResult */ result_text = cstring_to_text_with_len(start_ptr, chunk_len); is_null = null_string ? text_isequal(result_text, null_string) : false; - + /* stash away this field */ astate = accumArrayResult(astate, PointerGetDatum(result_text), @@ -3133,19 +3133,19 @@ text_to_array_internal(PG_FUNCTION_ARGS) } else { - /* + /* * When fldsep is NULL, each character in the inputstring becomes an * element in the result array. The separator is effectively the space * between characters. */ inputstring_len = VARSIZE_ANY_EXHDR(inputstring); - + /* return empty array for empty input string */ if (inputstring_len < 1) PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID)); - + start_ptr = VARDATA_ANY(inputstring); - + while (inputstring_len > 0) { int chunk_len = pg_mblen(start_ptr); @@ -3155,7 +3155,7 @@ text_to_array_internal(PG_FUNCTION_ARGS) /* must build a temp text datum to pass to accumArrayResult */ result_text = cstring_to_text_with_len(start_ptr, chunk_len); is_null = null_string ? text_isequal(result_text, null_string) : false; - + /* stash away this field */ astate = accumArrayResult(astate, PointerGetDatum(result_text), @@ -3205,7 +3205,7 @@ array_to_text_null(PG_FUNCTION_ARGS) /* returns NULL when first or second parameter is NULL */ if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) PG_RETURN_NULL(); - + v = PG_GETARG_ARRAYTYPE_P(0); fldsep = text_to_cstring(PG_GETARG_TEXT_PP(1)); @@ -3332,7 +3332,7 @@ array_to_text_internal(FunctionCallInfo fcinfo, ArrayType *v, } } } - + result = cstring_to_text_with_len(buf.data, buf.len); pfree(buf.data); diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 6e9c7fe2b0..726780bf37 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -3601,7 +3601,7 @@ xml_is_well_formed(PG_FUNCTION_ARGS) { #ifdef USE_LIBXML text *data = PG_GETARG_TEXT_P(0); - + PG_RETURN_BOOL(wellformed_xml(data, xmloption)); #else NO_XML_SUPPORT(); @@ -3614,7 +3614,7 @@ xml_is_well_formed_document(PG_FUNCTION_ARGS) { #ifdef USE_LIBXML text *data = PG_GETARG_TEXT_P(0); - + PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_DOCUMENT)); #else NO_XML_SUPPORT(); @@ -3627,7 +3627,7 @@ xml_is_well_formed_content(PG_FUNCTION_ARGS) { #ifdef USE_LIBXML text *data = PG_GETARG_TEXT_P(0); - + PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_CONTENT)); #else NO_XML_SUPPORT(); diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl index 909c7d272e..f2bf957a2e 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl @@ -8,7 +8,7 @@ # map files provided by Unicode organization. # Unfortunately it is prohibited by the organization # to distribute the map files. So if you try to use this script, -# you have to obtain GB2312.TXT from +# you have to obtain GB2312.TXT from # the organization's ftp site. # # GB2312.TXT format: diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl index 4552e06628..94e850cc7b 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl @@ -45,7 +45,7 @@ while($line = ){ } else { next; } - + $ucs = hex($u); $code = hex($c); $utf = &ucs2utf($ucs); @@ -73,7 +73,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){ if( $count == 0 ){ printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code }; } else { - printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code }; + printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code }; } } @@ -135,7 +135,7 @@ if ($TEST == 1) { ($code >= 0x8ea1 && $code <= 0x8efe) || ($code >= 0x8fa1a1 && $code <= 0x8ffefe) || ($code >= 0xa1a1 && $code <= 0x8fefe))) { - + $v1 = hex(substr($index, 0, 8)); $v2 = hex(substr($index, 8, 8)); @@ -192,7 +192,7 @@ while($line = ){ } else { next; } - + $ucs = hex($u); $code = hex($c); $utf = &ucs2utf($ucs); @@ -220,7 +220,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){ if( $count == 0 ){ printf FILE " {0x%06x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code }; } else { - printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code }; + printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl index daaaea0bd5..2951fc2f1b 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl @@ -8,7 +8,7 @@ # map files provided by Unicode organization. # Unfortunately it is prohibited by the organization # to distribute the map files. So if you try to use this script, -# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from +# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from # the organization's ftp site. # # JIS0201.TXT format: diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl index 4e2296a838..2d44837133 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl @@ -8,7 +8,7 @@ # map files provided by Unicode organization. # Unfortunately it is prohibited by the organization # to distribute the map files. So if you try to use this script, -# you have to obtain OLD5601.TXT from +# you have to obtain OLD5601.TXT from # the organization's ftp site. # # OLD5601.TXT format: diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl index 9434298927..176f765a28 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl @@ -8,7 +8,7 @@ # map files provided by Unicode organization. # Unfortunately it is prohibited by the organization # to distribute the map files. So if you try to use this script, -# you have to obtain CNS11643.TXT from +# you have to obtain CNS11643.TXT from # the organization's ftp site. # # CNS11643.TXT format: diff --git a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl index 828f34ed5a..5b7254feb0 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl @@ -43,7 +43,7 @@ while($line = ){ } else { next; } - + $ucs = hex($u); $code = hex($c); $utf = &ucs2utf($ucs); @@ -71,7 +71,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){ if( $count == 0 ){ printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code }; } else { - printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code }; + printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code }; } } @@ -132,7 +132,7 @@ while($line = ){ } else { next; } - + $ucs = hex($u); $code = hex($c); $utf = &ucs2utf($ucs); @@ -161,7 +161,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){ if( $count == 0 ){ printf FILE " {0x%04x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code }; } else { - printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code }; + printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl index 00517a03f9..68037bd77a 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl @@ -8,7 +8,7 @@ # map files provided by Unicode organization. # Unfortunately it is prohibited by the organization # to distribute the map files. So if you try to use this script, -# you have to obtain SHIFTJIS.TXT from +# you have to obtain SHIFTJIS.TXT from # the organization's ftp site. # # SHIFTJIS.TXT format: diff --git a/src/backend/utils/mb/Unicode/ucs2utf.pl b/src/backend/utils/mb/Unicode/ucs2utf.pl index 6ca982f8cb..7e137f2e47 100644 --- a/src/backend/utils/mb/Unicode/ucs2utf.pl +++ b/src/backend/utils/mb/Unicode/ucs2utf.pl @@ -13,12 +13,12 @@ sub ucs2utf { } elsif ($ucs > 0x007f && $ucs <= 0x07ff) { $utf = (($ucs & 0x003f) | 0x80) | ((($ucs >> 6) | 0xc0) << 8); } elsif ($ucs > 0x07ff && $ucs <= 0xffff) { - $utf = ((($ucs >> 12) | 0xe0) << 16) | + $utf = ((($ucs >> 12) | 0xe0) << 16) | (((($ucs & 0x0fc0) >> 6) | 0x80) << 8) | (($ucs & 0x003f) | 0x80); } else { $utf = ((($ucs >> 18) | 0xf0) << 24) | - (((($ucs & 0x3ffff) >> 12) | 0x80) << 16) | + (((($ucs & 0x3ffff) >> 12) | 0x80) << 16) | (((($ucs & 0x0fc0) >> 6) | 0x80) << 8) | (($ucs & 0x003f) | 0x80); } diff --git a/src/backend/utils/misc/Makefile b/src/backend/utils/misc/Makefile index 0ca57a1f21..cd9ba5d1cc 100644 --- a/src/backend/utils/misc/Makefile +++ b/src/backend/utils/misc/Makefile @@ -37,5 +37,5 @@ endif # Note: guc-file.c is not deleted by 'make clean', # since we want to ship it in distribution tarballs. -clean: +clean: @rm -f lex.yy.c diff --git a/src/backend/utils/misc/check_guc b/src/backend/utils/misc/check_guc index 5152b4e929..293fb0363f 100755 --- a/src/backend/utils/misc/check_guc +++ b/src/backend/utils/misc/check_guc @@ -4,7 +4,7 @@ ## in postgresql.conf.sample: ## 1) the valid config settings may be preceded by a '#', but NOT '# ' ## (we use this to skip comments) -## 2) the valid config settings will be followed immediately by ' =' +## 2) the valid config settings will be followed immediately by ' =' ## (at least one space preceding the '=') ## in guc.c: ## 3) the options have PGC_ on the same line as the option @@ -14,7 +14,7 @@ ## 1) Don't know what to do with TRANSACTION ISOLATION LEVEL ## if an option is valid but shows up in only one file (guc.c but not -## postgresql.conf.sample), it should be listed here so that it +## postgresql.conf.sample), it should be listed here so that it ## can be ignored INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \ is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \ @@ -23,35 +23,35 @@ session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwloc trace_notify trace_userlocks transaction_isolation transaction_read_only \ zero_damaged_pages" -### What options are listed in postgresql.conf.sample, but don't appear +### What options are listed in postgresql.conf.sample, but don't appear ### in guc.c? # grab everything that looks like a setting and convert it to lower case -SETTINGS=`grep ' =' postgresql.conf.sample | +SETTINGS=`grep ' =' postgresql.conf.sample | grep -v '^# ' | # strip comments -sed -e 's/^#//' | +sed -e 's/^#//' | awk '{print $1}'` SETTINGS=`echo "$SETTINGS" | tr 'A-Z' 'a-z'` -for i in $SETTINGS ; do +for i in $SETTINGS ; do hidden=0 ## it sure would be nice to replace this with an sql "not in" statement ## it doesn't seem to make sense to have things in .sample and not in guc.c # for hidethis in $INTENTIONALLY_NOT_INCLUDED ; do -# if [ "$hidethis" = "$i" ] ; then +# if [ "$hidethis" = "$i" ] ; then # hidden=1 # fi # done if [ "$hidden" -eq 0 ] ; then grep -i '"'$i'"' guc.c > /dev/null - if [ $? -ne 0 ] ; then - echo "$i seems to be missing from guc.c"; - fi; + if [ $? -ne 0 ] ; then + echo "$i seems to be missing from guc.c"; + fi; fi done -### What options are listed in guc.c, but don't appear +### What options are listed in guc.c, but don't appear ### in postgresql.conf.sample? # grab everything that looks like a setting and convert it to lower case diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l index 3b827958f5..2986d2f25b 100644 --- a/src/backend/utils/misc/guc-file.l +++ b/src/backend/utils/misc/guc-file.l @@ -463,9 +463,9 @@ ParseConfigFile(const char *config_file, const char *calling_file, /* now we must have the option value */ if (token != GUC_ID && - token != GUC_STRING && - token != GUC_INTEGER && - token != GUC_REAL && + token != GUC_STRING && + token != GUC_INTEGER && + token != GUC_REAL && token != GUC_UNQUOTED_STRING) goto parse_error; if (token == GUC_STRING) /* strip quotes and escapes */ @@ -573,7 +573,7 @@ ParseConfigFile(const char *config_file, const char *calling_file, else ereport(elevel, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("syntax error in file \"%s\" line %u, near token \"%s\"", + errmsg("syntax error in file \"%s\" line %u, near token \"%s\"", config_file, ConfigFileLineno, yytext))); OK = false; diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index d512172769..80ee04d30a 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -62,7 +62,7 @@ # (change requires restart) #port = 5432 # (change requires restart) #max_connections = 100 # (change requires restart) -# Note: Increasing max_connections costs ~400 bytes of shared memory per +# Note: Increasing max_connections costs ~400 bytes of shared memory per # connection slot, plus lock space (see max_locks_per_transaction). #superuser_reserved_connections = 3 # (change requires restart) #unix_socket_directory = '' # (change requires restart) @@ -154,7 +154,7 @@ # (change requires restart) #fsync = on # turns forced synchronization on or off #synchronous_commit = on # immediate fsync at commit -#wal_sync_method = fsync # the default is the first option +#wal_sync_method = fsync # the default is the first option # supported by the operating system: # open_datasync # fdatasync @@ -246,7 +246,7 @@ #constraint_exclusion = partition # on, off, or partition #cursor_tuple_fraction = 0.1 # range 0.0-1.0 #from_collapse_limit = 8 -#join_collapse_limit = 8 # 1 disables collapsing of explicit +#join_collapse_limit = 8 # 1 disables collapsing of explicit # JOIN clauses @@ -284,7 +284,7 @@ # in all cases. #log_rotation_age = 1d # Automatic rotation of logfiles will # happen after that time. 0 disables. -#log_rotation_size = 10MB # Automatic rotation of logfiles will +#log_rotation_size = 10MB # Automatic rotation of logfiles will # happen after that much log output. # 0 disables. @@ -412,7 +412,7 @@ # AUTOVACUUM PARAMETERS #------------------------------------------------------------------------------ -#autovacuum = on # Enable autovacuum subprocess? 'on' +#autovacuum = on # Enable autovacuum subprocess? 'on' # requires track_counts to also be on. #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and # their durations, > 0 logs only @@ -423,7 +423,7 @@ #autovacuum_naptime = 1min # time between autovacuum runs #autovacuum_vacuum_threshold = 50 # min number of row updates before # vacuum -#autovacuum_analyze_threshold = 50 # min number of row updates before +#autovacuum_analyze_threshold = 50 # min number of row updates before # analyze #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze diff --git a/src/backend/utils/mmgr/README b/src/backend/utils/mmgr/README index 2e9a226114..d52e959784 100644 --- a/src/backend/utils/mmgr/README +++ b/src/backend/utils/mmgr/README @@ -377,7 +377,7 @@ constraining context-type designers very much.) Given this, the pfree routine will look something like - StandardChunkHeader * header = + StandardChunkHeader * header = (StandardChunkHeader *) ((char *) p - sizeof(StandardChunkHeader)); (*header->mycontext->methods->free_p) (p); diff --git a/src/bcc32.mak b/src/bcc32.mak index 67f73a53b9..83c26df167 100644 --- a/src/bcc32.mak +++ b/src/bcc32.mak @@ -19,17 +19,17 @@ !IF "$(OS)" == "Windows_NT" NULL= -!ELSE +!ELSE NULL=nul -!ENDIF +!ENDIF -ALL: +ALL: cd include if not exist pg_config.h copy pg_config.h.win32 pg_config.h if not exist pg_config_os.h copy port\win32.h pg_config_os.h cd .. cd interfaces\libpq - make -N -DCFG=$(CFG) /f bcc32.mak + make -N -DCFG=$(CFG) /f bcc32.mak cd ..\.. echo All Win32 parts have been built! diff --git a/src/bin/pg_dump/README b/src/bin/pg_dump/README index c0a84ff63a..5015b7cd45 100644 --- a/src/bin/pg_dump/README +++ b/src/bin/pg_dump/README @@ -19,7 +19,7 @@ or, to dump in TAR format pg_dump -Ft > To restore, try - + To list contents: pg_restore -l | less @@ -62,12 +62,12 @@ or, simply: TAR === -The TAR archive that pg_dump creates currently has a blank username & group for the files, +The TAR archive that pg_dump creates currently has a blank username & group for the files, but should be otherwise valid. It also includes a 'restore.sql' script which is there for the benefit of humans. The script is never used by pg_restore. Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form. -(ie. you should not extract the files then expect pg_restore to work). +(ie. you should not extract the files then expect pg_restore to work). You can extract, edit, and tar the files again, and it should work, but the 'toc' file should go at the start, the data files be in the order they are used, and diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 55ea6841a4..3bca417cef 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -10498,7 +10498,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId, } /* - * dumpSecLabel + * dumpSecLabel * * This routine is used to dump any security labels associated with the * object handed to this routine. The routine takes a constant character diff --git a/src/bin/psql/psqlscan.l b/src/bin/psql/psqlscan.l index 7942fe5c45..a1da032a6f 100644 --- a/src/bin/psql/psqlscan.l +++ b/src/bin/psql/psqlscan.l @@ -277,8 +277,8 @@ xqinside [^']+ /* $foo$ style quotes ("dollar quoting") * The quoted string starts with $foo$ where "foo" is an optional string - * in the form of an identifier, except that it may not contain "$", - * and extends to the first occurrence of an identical string. + * in the form of an identifier, except that it may not contain "$", + * and extends to the first occurrence of an identical string. * There is *no* processing of the quoted text. * * {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim} @@ -364,7 +364,7 @@ self [,()\[\].;\:\+\-\*\/\%\^\<\>\=] op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=] operator {op_chars}+ -/* we no longer allow unary minus in numbers. +/* we no longer allow unary minus in numbers. * instead we pass it separately to parser. there it gets * coerced via doNegate() -- Leon aug 20 1999 * diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h index 0ec24bdd30..1831fd01ee 100644 --- a/src/include/catalog/objectaddress.h +++ b/src/include/catalog/objectaddress.h @@ -10,7 +10,7 @@ * *------------------------------------------------------------------------- */ -#ifndef OBJECTADDRESS_H +#ifndef OBJECTADDRESS_H #define OBJECTADDRESS_H #include "nodes/parsenodes.h" diff --git a/src/include/pg_config.h.win32 b/src/include/pg_config.h.win32 index 6c342a24d0..2a383b619a 100644 --- a/src/include/pg_config.h.win32 +++ b/src/include/pg_config.h.win32 @@ -179,7 +179,7 @@ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the global variable 'int timezone'. */ -#define HAVE_INT_TIMEZONE +#define HAVE_INT_TIMEZONE /* Define to 1 if you have support for IPv6. */ #define HAVE_IPV6 1 @@ -249,7 +249,7 @@ /* Define to 1 if `long long int' works and is 64 bits. */ #if (_MSC_VER > 1200) -#define HAVE_LONG_LONG_INT_64 +#define HAVE_LONG_LONG_INT_64 #endif /* Define to 1 if you have the `memmove' function. */ diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h index bf97ab3586..d54a02c98f 100644 --- a/src/include/storage/s_lock.h +++ b/src/include/storage/s_lock.h @@ -856,7 +856,7 @@ spin_delay(void) #endif - + #endif /* !defined(HAS_TEST_AND_SET) */ diff --git a/src/interfaces/ecpg/README.dynSQL b/src/interfaces/ecpg/README.dynSQL index dcb263e9f6..e1d1507fd0 100644 --- a/src/interfaces/ecpg/README.dynSQL +++ b/src/interfaces/ecpg/README.dynSQL @@ -3,7 +3,7 @@ src/interfaces/ecpg/README.dynSQL descriptor statements have the following shortcomings - input descriptors (USING DESCRIPTOR ) are not supported - + Reason: to fully support dynamic SQL the frontend/backend communication should change to recognize input parameters. Since this is not likely to happen in the near future and you diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c index 90288d343b..0296d925b9 100644 --- a/src/interfaces/ecpg/ecpglib/prepare.c +++ b/src/interfaces/ecpg/ecpglib/prepare.c @@ -164,7 +164,7 @@ ECPGprepare(int lineno, const char *connection_name, const bool questionmarks, c struct prepared_statement *this, *prev; - (void) questionmarks; /* quiet the compiler */ + (void) questionmarks; /* quiet the compiler */ con = ecpg_get_connection(connection_name); if (!ecpg_init(con, connection_name, lineno)) diff --git a/src/interfaces/ecpg/preproc/Makefile b/src/interfaces/ecpg/preproc/Makefile index e8a6916faa..4c8f8d699d 100644 --- a/src/interfaces/ecpg/preproc/Makefile +++ b/src/interfaces/ecpg/preproc/Makefile @@ -58,7 +58,7 @@ else endif preproc.y: ../../../backend/parser/gram.y parse.pl ecpg.addons ecpg.header ecpg.tokens ecpg.trailer ecpg.type - $(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@ + $(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@ $(PERL) $(srcdir)/check_rules.pl $(srcdir) $< ecpg_keywords.o c_keywords.o keywords.o preproc.o parser.o: preproc.h diff --git a/src/interfaces/ecpg/preproc/check_rules.pl b/src/interfaces/ecpg/preproc/check_rules.pl index 7dc6ca46fb..3a796493d5 100755 --- a/src/interfaces/ecpg/preproc/check_rules.pl +++ b/src/interfaces/ecpg/preproc/check_rules.pl @@ -102,7 +102,7 @@ while () { $block = $block . $arr[$fieldIndexer]; } } -} +} close GRAM; @@ -113,7 +113,7 @@ line: while () { @Fld = split(' ', $_, -1); if (!/^ECPG:/) { - next line; + next line; } if ($found{$Fld[2]} ne 'found') { diff --git a/src/interfaces/ecpg/preproc/ecpg.addons b/src/interfaces/ecpg/preproc/ecpg.addons index 3b74ba0a4e..3a8c2dca10 100644 --- a/src/interfaces/ecpg/preproc/ecpg.addons +++ b/src/interfaces/ecpg/preproc/ecpg.addons @@ -40,7 +40,7 @@ ECPG: stmtPrepareStmt block { if ($1.type == NULL || strlen($1.type) == 0) output_prepare_statement($1.name, $1.stmt); - else + else output_statement(cat_str(5, make_str("prepare"), $1.name, $1.type, make_str("as"), $1.stmt), 0, ECPGst_normal); } ECPG: stmtTransactionStmt block @@ -109,7 +109,7 @@ ECPG: stmtViewStmt rule if (!strcmp($1, "all")) fprintf(yyout, "{ ECPGdeallocate_all(__LINE__, %d, %s);", compat, con); - else if ($1[0] == ':') + else if ($1[0] == ':') fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, %s);", compat, con, $1+1); else fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, \"%s\");", compat, con, $1); diff --git a/src/interfaces/ecpg/preproc/ecpg.header b/src/interfaces/ecpg/preproc/ecpg.header index 54979e987c..3f6ffd9c7b 100644 --- a/src/interfaces/ecpg/preproc/ecpg.header +++ b/src/interfaces/ecpg/preproc/ecpg.header @@ -103,7 +103,7 @@ mmerror(int error_code, enum errortype type, const char *error, ...) fclose(yyin); if (yyout) fclose(yyout); - + if (strcmp(output_filename, "-") != 0 && unlink(output_filename) != 0) fprintf(stderr, _("could not remove output file \"%s\"\n"), output_filename); exit(error_code); diff --git a/src/interfaces/ecpg/preproc/ecpg.tokens b/src/interfaces/ecpg/preproc/ecpg.tokens index c396b552f9..b55138a316 100644 --- a/src/interfaces/ecpg/preproc/ecpg.tokens +++ b/src/interfaces/ecpg/preproc/ecpg.tokens @@ -3,7 +3,7 @@ /* special embedded SQL tokens */ %token SQL_ALLOCATE SQL_AUTOCOMMIT SQL_BOOL SQL_BREAK SQL_CALL SQL_CARDINALITY SQL_CONNECT - SQL_COUNT + SQL_COUNT SQL_DATETIME_INTERVAL_CODE SQL_DATETIME_INTERVAL_PRECISION SQL_DESCRIBE SQL_DESCRIPTOR SQL_DISCONNECT SQL_FOUND @@ -23,5 +23,5 @@ S_STATIC S_SUB S_VOLATILE S_TYPEDEF -%token CSTRING CVARIABLE CPP_LINE IP +%token CSTRING CVARIABLE CPP_LINE IP %token DOLCONST ECONST NCONST UCONST UIDENT diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer b/src/interfaces/ecpg/preproc/ecpg.trailer index 2eaef25c53..e80fece810 100644 --- a/src/interfaces/ecpg/preproc/ecpg.trailer +++ b/src/interfaces/ecpg/preproc/ecpg.trailer @@ -70,7 +70,7 @@ connection_target: opt_database_name opt_server opt_port /* old style: dbname[@server][:port] */ if (strlen($2) > 0 && *($2) != '@') mmerror(PARSE_ERROR, ET_ERROR, "expected \"@\", found \"%s\"", $2); - + /* C strings need to be handled differently */ if ($1[0] == '\"') $$ = $1; @@ -241,7 +241,7 @@ opt_options: Op connect_options | /*EMPTY*/ { $$ = EMPTY; } ; -connect_options: ColId opt_opt_value +connect_options: ColId opt_opt_value { $$ = make2_str($1, $2); } | ColId opt_opt_value Op connect_options { @@ -347,7 +347,7 @@ ECPGCursorStmt: DECLARE cursor_name cursor_options CURSOR opt_hold FOR prepared ; ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring - { + { /* execute immediate means prepare the statement and * immediately execute it */ $$ = $3; @@ -631,7 +631,7 @@ var_type: simple_type $$.type_index = this->type->type_index; if (this->type->type_sizeof && strlen(this->type->type_sizeof) != 0) $$.type_sizeof = this->type->type_sizeof; - else + else $$.type_sizeof = cat_str(3, make_str("sizeof("), mm_strdup(this->name), make_str(")")); struct_member_list[struct_level] = ECPGstruct_member_dup(this->struct_member_list); @@ -862,7 +862,7 @@ variable: opt_pointer ECPGColLabel opt_array_bounds opt_bit_field opt_initialize type = ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter); else type = ECPGmake_array_type(ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter), dimension); - + if (strcmp(dimension, "0") == 0 || abs(atoi(dimension)) == 1) *dim = '\0'; else @@ -1037,7 +1037,7 @@ UsingValue: UsingConst } | civar { $$ = EMPTY; } | civarind { $$ = EMPTY; } - ; + ; UsingConst: Iconst { $$ = $1; } | '+' Iconst { $$ = cat_str(2, make_str("+"), $2); } @@ -1857,7 +1857,7 @@ execute_rest: /* EMPTY */ { $$ = EMPTY; } | ecpg_into ecpg_using { $$ = EMPTY; } | ecpg_using { $$ = EMPTY; } | ecpg_into { $$ = EMPTY; } - ; + ; ecpg_into: INTO into_list { $$ = EMPTY; } | into_descriptor { $$ = $1; } diff --git a/src/interfaces/ecpg/preproc/ecpg.type b/src/interfaces/ecpg/preproc/ecpg.type index 831c4c3b20..ac6aa000ac 100644 --- a/src/interfaces/ecpg/preproc/ecpg.type +++ b/src/interfaces/ecpg/preproc/ecpg.type @@ -113,7 +113,7 @@ %type variable %type variable_declarations %type variable_list -%type vt_declarations +%type vt_declarations %type Op %type IntConstVar diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl index f3c757e893..b765a58305 100644 --- a/src/interfaces/ecpg/preproc/parse.pl +++ b/src/interfaces/ecpg/preproc/parse.pl @@ -93,7 +93,7 @@ line: while (<>) { chomp; # strip record separator @Fld = split(' ', $_, -1); - # Dump the action for a rule - + # Dump the action for a rule - # mode indicates if we are processing the 'stmt:' rule (mode==0 means normal, mode==1 means stmt:) # flds are the fields to use. These may start with a '$' - in which case they are the result of a previous non-terminal # if they dont start with a '$' then they are token name @@ -235,8 +235,8 @@ line: while (<>) { if ($replace_token{$arr[$fieldIndexer]}) { $arr[$fieldIndexer] = $replace_token{$arr[$fieldIndexer]}; } - - # Are we looking at a declaration of a non-terminal ? + + # Are we looking at a declaration of a non-terminal ? if (($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') || $arr[$fieldIndexer + 1] eq ':') { $non_term_id = $arr[$fieldIndexer]; $s = ':', $non_term_id =~ s/$s//g; @@ -253,7 +253,7 @@ line: while (<>) { $copymode = 'on'; } $line = $line . ' ' . $arr[$fieldIndexer]; - # Do we have the : attached already ? + # Do we have the : attached already ? # If yes, we'll have already printed the ':' if (!($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:')) { # Consume the ':' which is next... @@ -261,7 +261,7 @@ line: while (<>) { $fieldIndexer++; } - # Special mode? + # Special mode? if ($non_term_id eq 'stmt') { $stmt_mode = 1; } @@ -380,7 +380,7 @@ sub dump { sub dump_fields { local($mode, *flds, $len, $ln) = @_; if ($mode == 0) { - #Normal + #Normal &add_to_buffer('rules', $ln); if ($feature_not_supported == 1) { # we found an unsupported feature, but we have to @@ -393,7 +393,7 @@ sub dump_fields { } if ($len == 0) { - # We have no fields ? + # We have no fields ? &add_to_buffer('rules', " \$\$=EMPTY; }"); } else { @@ -418,7 +418,7 @@ sub dump_fields { } } - # So - how many fields did we end up with ? + # So - how many fields did we end up with ? if ($cnt == 1) { # Straight assignement $str = " \$\$ = " . $flds_new{0} . ';'; diff --git a/src/interfaces/ecpg/preproc/pgc.l b/src/interfaces/ecpg/preproc/pgc.l index 05febb556d..b7e46866f7 100644 --- a/src/interfaces/ecpg/preproc/pgc.l +++ b/src/interfaces/ecpg/preproc/pgc.l @@ -58,8 +58,8 @@ static bool isinformixdefine(void); char *token_start; int state_before; -struct _yy_buffer -{ +struct _yy_buffer +{ YY_BUFFER_STATE buffer; long lineno; char *filename; @@ -71,7 +71,7 @@ static char *old; #define MAX_NESTED_IF 128 static short preproc_tos; static short ifcond; -static struct _if_value +static struct _if_value { short condition; short else_branch; @@ -87,7 +87,7 @@ static struct _if_value %option yylineno -%x C SQL incl def def_ident undef +%x C SQL incl def def_ident undef /* * OK, here is a short description of lex/flex rules behavior. @@ -518,7 +518,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. /* throw back all but the initial "$" */ yyless(1); /* and treat it as {other} */ - return yytext[0]; + return yytext[0]; } {dolqdelim} { token_start = yytext; @@ -737,7 +737,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. } {identifier} { const ScanKeyword *keyword; - + if (!isdefine()) { /* Is it an SQL/ECPG keyword? */ @@ -764,7 +764,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. } {other} { return yytext[0]; } {exec_sql} { BEGIN(SQL); return SQL_START; } -{informix_special} { +{informix_special} { /* are we simulating Informix? */ if (INFORMIX_MODE) { @@ -939,7 +939,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. yyterminate(); } {exec_sql}{include}{space}* { BEGIN(incl); } -{informix_special}{include}{space}* { +{informix_special}{include}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { @@ -952,7 +952,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. } } {exec_sql}{ifdef}{space}* { ifcond = TRUE; BEGIN(xcond); } -{informix_special}{ifdef}{space}* { +{informix_special}{ifdef}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { @@ -966,7 +966,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. } } {exec_sql}{ifndef}{space}* { ifcond = FALSE; BEGIN(xcond); } -{informix_special}{ifndef}{space}* { +{informix_special}{ifndef}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { @@ -990,7 +990,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. ifcond = TRUE; BEGIN(xcond); } -{informix_special}{elif}{space}* { +{informix_special}{elif}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { @@ -1089,7 +1089,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. {identifier}{space}*";" { if (preproc_tos >= MAX_NESTED_IF-1) mmerror(PARSE_ERROR, ET_FATAL, "too many nested EXEC SQL IFDEF conditions"); - else + else { struct _defines *defptr; unsigned int i; @@ -1132,7 +1132,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. {other}|\n { mmerror(PARSE_ERROR, ET_FATAL, "missing identifier in EXEC SQL DEFINE command"); yyterminate(); - } + } {space}*";" { struct _defines *ptr, *this; @@ -1170,7 +1170,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. <> { if (yy_buffer == NULL) { - if ( preproc_tos > 0 ) + if ( preproc_tos > 0 ) { preproc_tos = 0; mmerror(PARSE_ERROR, ET_FATAL, "missing \"EXEC SQL ENDIF;\""); @@ -1189,7 +1189,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. ptr->used = NULL; break; } - + if (yyin != NULL) fclose(yyin); @@ -1209,7 +1209,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*. if (i != 0) output_line_number(); - + } } {other}|\n { mmerror(PARSE_ERROR, ET_FATAL, "internal error: unreachable state; please report this to "); } @@ -1244,7 +1244,7 @@ addlit(char *ytext, int yleng) /* enlarge buffer if needed */ if ((literallen+yleng) >= literalalloc) { - do + do literalalloc *= 2; while ((literallen+yleng) >= literalalloc); literalbuf = (char *) realloc(literalbuf, literalalloc); @@ -1290,7 +1290,7 @@ parse_include(void) /* * skip the ";" if there is one and trailing whitespace. Note that - * yytext contains at least one non-space character plus the ";" + * yytext contains at least one non-space character plus the ";" */ for (i = strlen(yytext)-2; i > 0 && ecpg_isspace(yytext[i]); @@ -1301,7 +1301,7 @@ parse_include(void) i--; yytext[i+1] = '\0'; - + yyin = NULL; /* If file name is enclosed in '"' remove these and look only in '.' */ @@ -1311,7 +1311,7 @@ parse_include(void) { yytext[i] = '\0'; memmove(yytext, yytext+1, strlen(yytext)); - + strncpy(inc_file, yytext, sizeof(inc_file)); yyin = fopen(inc_file, "r"); if (!yyin) @@ -1322,7 +1322,7 @@ parse_include(void) yyin = fopen(inc_file, "r"); } } - + } else { @@ -1331,7 +1331,7 @@ parse_include(void) yytext[i] = '\0'; memmove(yytext, yytext+1, strlen(yytext)); } - + for (ip = include_paths; yyin == NULL && ip != NULL; ip = ip->next) { if (strlen(ip->path) + strlen(yytext) + 3 > MAXPGPATH) diff --git a/src/interfaces/ecpg/test/Makefile.regress b/src/interfaces/ecpg/test/Makefile.regress index df792fd238..b2417081ee 100644 --- a/src/interfaces/ecpg/test/Makefile.regress +++ b/src/interfaces/ecpg/test/Makefile.regress @@ -1,6 +1,6 @@ override CPPFLAGS := -I../../include -I$(top_srcdir)/src/interfaces/ecpg/include \ - -I$(libpq_srcdir) $(CPPFLAGS) -override CFLAGS += $(PTHREAD_CFLAGS) + -I$(libpq_srcdir) $(CPPFLAGS) +override CFLAGS += $(PTHREAD_CFLAGS) override LDFLAGS := -L../../ecpglib -L../../pgtypeslib $(filter-out -l%, $(libpq)) $(LDFLAGS) override LIBS := -lecpg -lpgtypes $(filter -l%, $(libpq)) $(LIBS) $(PTHREAD_LIBS) diff --git a/src/interfaces/ecpg/test/compat_informix/describe.pgc b/src/interfaces/ecpg/test/compat_informix/describe.pgc index b0f9a3d8f2..1836ac3843 100644 --- a/src/interfaces/ecpg/test/compat_informix/describe.pgc +++ b/src/interfaces/ecpg/test/compat_informix/describe.pgc @@ -192,7 +192,7 @@ exec sql end declare section; strcpy(msg, "commit"); exec sql commit; - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); exec sql disconnect; return (0); diff --git a/src/interfaces/ecpg/test/compat_informix/sqlda.pgc b/src/interfaces/ecpg/test/compat_informix/sqlda.pgc index 8490d06164..e1142d2b22 100644 --- a/src/interfaces/ecpg/test/compat_informix/sqlda.pgc +++ b/src/interfaces/ecpg/test/compat_informix/sqlda.pgc @@ -106,7 +106,7 @@ exec sql end declare section; while (1) { strcpy(msg, "fetch"); - exec sql fetch 1 from mycur1 into descriptor outp_sqlda; + exec sql fetch 1 from mycur1 into descriptor outp_sqlda; printf("FETCH RECORD %d\n", ++rec); dump_sqlda(outp_sqlda); diff --git a/src/interfaces/ecpg/test/compat_informix/test_informix.pgc b/src/interfaces/ecpg/test/compat_informix/test_informix.pgc index e1cfd25a3b..8b7692b0fd 100644 --- a/src/interfaces/ecpg/test/compat_informix/test_informix.pgc +++ b/src/interfaces/ecpg/test/compat_informix/test_informix.pgc @@ -11,7 +11,7 @@ static void dosqlprint(void) { int main(void) { - $int i = 14; + $int i = 14; $decimal j, m, n; $string c[10]; diff --git a/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc b/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc index 9b324e25e8..0e8f1f0f0b 100644 --- a/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc +++ b/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc @@ -67,8 +67,8 @@ int main(void) EXEC SQL create table history (customerid integer, timestamp timestamp without time zone, action_taken char(5), narrative varchar(100)); sql_check("main", "create", 0); - - EXEC SQL insert into history + + EXEC SQL insert into history (customerid, timestamp, action_taken, narrative) values(1, '2003-05-07 13:28:34 CEST', 'test', 'test'); sql_check("main", "insert", 0); @@ -96,7 +96,7 @@ int main(void) (customerid, timestamp, action_taken, narrative) values(:c, :e, 'test', 'test'); sql_check("main", "update", 0); - + EXEC SQL commit; EXEC SQL drop table history; diff --git a/src/interfaces/ecpg/test/expected/compat_informix-describe.c b/src/interfaces/ecpg/test/expected/compat_informix-describe.c index 6aa534f5d6..b4e1b47066 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-describe.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-describe.c @@ -455,7 +455,7 @@ if (sqlca.sqlcode < 0) exit (1);} #line 193 "describe.pgc" - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); #line 196 "describe.pgc" diff --git a/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c b/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c index 647f677c14..a013be99aa 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c @@ -268,7 +268,7 @@ if (sqlca.sqlcode == ECPG_NOT_FOUND) break; if (sqlca.sqlcode < 0) exit (1);} #line 109 "sqlda.pgc" - + printf("FETCH RECORD %d\n", ++rec); dump_sqlda(outp_sqlda); diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix.c index 4cc6e3d713..d357c77a43 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix.c @@ -36,7 +36,7 @@ int main(void) int i = 14 ; #line 14 "test_informix.pgc" - + #line 15 "test_informix.pgc" decimal j , m , n ; diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c index 2f8ee74971..c1a4891191 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c @@ -193,7 +193,7 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 68 "test_informix2.pgc" sql_check("main", "create", 0); - + { ECPGdo(__LINE__, 1, 1, NULL, 0, ECPGst_normal, "insert into history ( customerid , timestamp , action_taken , narrative ) values ( 1 , '2003-05-07 13:28:34 CEST' , 'test' , 'test' )", ECPGt_EOIT, ECPGt_EORT); #line 73 "test_informix2.pgc" @@ -244,7 +244,7 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 97 "test_informix2.pgc" sql_check("main", "update", 0); - + { ECPGtrans(__LINE__, NULL, "commit"); #line 100 "test_informix2.pgc" diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c index 58d11e8e7e..648b648e21 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c @@ -55,17 +55,17 @@ main(void) char *t1 = "2000-7-12 17:34:29"; int i; - ECPGdebug(1, stderr); - /* exec sql whenever sqlerror do sqlprint ( ) ; */ + ECPGdebug(1, stderr); + /* exec sql whenever sqlerror do sqlprint ( ) ; */ #line 27 "dt_test.pgc" - { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); + { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); #line 28 "dt_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} #line 28 "dt_test.pgc" - { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT); + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT); #line 29 "dt_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} @@ -84,8 +84,8 @@ if (sqlca.sqlcode < 0) sqlprint ( );} #line 31 "dt_test.pgc" - date1 = PGTYPESdate_from_asc(d1, NULL); - ts1 = PGTYPEStimestamp_from_asc(t1, NULL); + date1 = PGTYPESdate_from_asc(d1, NULL); + ts1 = PGTYPEStimestamp_from_asc(t1, NULL); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into date_test ( d , ts ) values ( $1 , $2 )", ECPGt_date,&(date1),(long)1,(long)1,sizeof(date), diff --git a/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c b/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c index 91c8ad55fe..c60bf51d93 100644 --- a/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c +++ b/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c @@ -120,7 +120,7 @@ int main() ECPGdebug(1, stderr); - + { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); #line 50 "array_of_struct.pgc" diff --git a/src/interfaces/ecpg/test/expected/preproc-cursor.c b/src/interfaces/ecpg/test/expected/preproc-cursor.c index e755c57461..794e7e2643 100644 --- a/src/interfaces/ecpg/test/expected/preproc-cursor.c +++ b/src/interfaces/ecpg/test/expected/preproc-cursor.c @@ -754,7 +754,7 @@ if (sqlca.sqlcode < 0) exit (1);} #line 239 "cursor.pgc" - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); #line 242 "cursor.pgc" diff --git a/src/interfaces/ecpg/test/expected/preproc-init.c b/src/interfaces/ecpg/test/expected/preproc-init.c index 1307915fad..49f2d5d57a 100644 --- a/src/interfaces/ecpg/test/expected/preproc-init.c +++ b/src/interfaces/ecpg/test/expected/preproc-init.c @@ -146,7 +146,7 @@ int main(void) - + /* = 1L */ #line 60 "init.pgc" @@ -250,7 +250,7 @@ if (sqlca.sqlcode < 0) fe ( ENUM0 );} /* exec sql whenever sqlerror do sqlnotice ( NULL , 0 ) ; */ #line 97 "init.pgc" - + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select now ( )", ECPGt_EOIT, ECPGt_EORT); #line 98 "init.pgc" diff --git a/src/interfaces/ecpg/test/expected/preproc-outofscope.c b/src/interfaces/ecpg/test/expected/preproc-outofscope.c index ada4f89d6c..a30b7215ae 100644 --- a/src/interfaces/ecpg/test/expected/preproc-outofscope.c +++ b/src/interfaces/ecpg/test/expected/preproc-outofscope.c @@ -363,7 +363,7 @@ if (sqlca.sqlcode < 0) exit (1);} #line 118 "outofscope.pgc" - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); #line 121 "outofscope.pgc" diff --git a/src/interfaces/ecpg/test/expected/preproc-variable.c b/src/interfaces/ecpg/test/expected/preproc-variable.c index 9f8b36d8a7..ca3032faca 100644 --- a/src/interfaces/ecpg/test/expected/preproc-variable.c +++ b/src/interfaces/ecpg/test/expected/preproc-variable.c @@ -264,7 +264,7 @@ if (sqlca.sqlcode < 0) exit (1);} #line 95 "variable.pgc" - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); #line 98 "variable.pgc" diff --git a/src/interfaces/ecpg/test/expected/preproc-whenever.c b/src/interfaces/ecpg/test/expected/preproc-whenever.c index 1547a16e99..03f596a9c2 100644 --- a/src/interfaces/ecpg/test/expected/preproc-whenever.c +++ b/src/interfaces/ecpg/test/expected/preproc-whenever.c @@ -243,4 +243,4 @@ if (sqlca.sqlcode < 0) exit (1);} #line 65 "whenever.pgc" exit (0); -} +} diff --git a/src/interfaces/ecpg/test/expected/sql-array.c b/src/interfaces/ecpg/test/expected/sql-array.c index cdd2bea078..3c879561b3 100644 --- a/src/interfaces/ecpg/test/expected/sql-array.c +++ b/src/interfaces/ecpg/test/expected/sql-array.c @@ -148,7 +148,7 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 29 "array.pgc" - { ECPGtrans(__LINE__, NULL, "begin work"); + { ECPGtrans(__LINE__, NULL, "begin work"); #line 31 "array.pgc" if (sqlca.sqlcode < 0) sqlprint();} @@ -205,7 +205,7 @@ if (sqlca.sqlcode < 0) sqlprint();} if (sqlca.sqlcode < 0) sqlprint();} #line 43 "array.pgc" - + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select f , text from test where i = 1", ECPGt_EOIT, ECPGt_double,&(f),(long)1,(long)1,sizeof(double), diff --git a/src/interfaces/ecpg/test/expected/sql-code100.c b/src/interfaces/ecpg/test/expected/sql-code100.c index e250690e9c..051fc38622 100644 --- a/src/interfaces/ecpg/test/expected/sql-code100.c +++ b/src/interfaces/ecpg/test/expected/sql-code100.c @@ -104,7 +104,7 @@ int main() ECPGdebug(1,stderr); - + { ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); } #line 15 "code100.pgc" @@ -118,7 +118,7 @@ int main() #line 22 "code100.pgc" if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + for (index=0;index<10;++index) { { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into test ( payload , index ) values ( 0 , $1 )", ECPGt_int,&(index),(long)1,(long)1,sizeof(int), @@ -131,12 +131,12 @@ int main() #line 31 "code100.pgc" if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "update test set payload = payload + 1 where index = - 1", ECPGt_EOIT, ECPGt_EORT);} #line 35 "code100.pgc" if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "delete from test where index = - 1", ECPGt_EOIT, ECPGt_EORT);} #line 38 "code100.pgc" @@ -155,7 +155,7 @@ int main() #line 46 "code100.pgc" if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + { ECPGdisconnect(__LINE__, "CURRENT");} #line 49 "code100.pgc" diff --git a/src/interfaces/ecpg/test/expected/sql-describe.c b/src/interfaces/ecpg/test/expected/sql-describe.c index 7a5ab02deb..fd46a29245 100644 --- a/src/interfaces/ecpg/test/expected/sql-describe.c +++ b/src/interfaces/ecpg/test/expected/sql-describe.c @@ -453,7 +453,7 @@ if (sqlca.sqlcode < 0) exit (1);} #line 193 "describe.pgc" - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); #line 196 "describe.pgc" diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc.c b/src/interfaces/ecpg/test/expected/sql-dynalloc.c index 0232480333..ff04922fa7 100644 --- a/src/interfaces/ecpg/test/expected/sql-dynalloc.c +++ b/src/interfaces/ecpg/test/expected/sql-dynalloc.c @@ -296,28 +296,28 @@ if (sqlca.sqlcode < 0) sqlprint ( );} for (i=0;imember; int c=10>>2; - bool h=2||1; + bool h=2||1; long iay /* = 1L */ ; exec sql end declare section; @@ -94,7 +94,7 @@ int main(void) exec sql select now(); exec sql whenever sqlerror do fe(ENUM0); exec sql select now(); - exec sql whenever sqlerror do sqlnotice(NULL, NONO); + exec sql whenever sqlerror do sqlnotice(NULL, NONO); exec sql select now(); return 0; } diff --git a/src/interfaces/ecpg/test/preproc/outofscope.pgc b/src/interfaces/ecpg/test/preproc/outofscope.pgc index 12cd79ac77..25efe75cca 100644 --- a/src/interfaces/ecpg/test/preproc/outofscope.pgc +++ b/src/interfaces/ecpg/test/preproc/outofscope.pgc @@ -117,7 +117,7 @@ main (void) strcpy(msg, "commit"); exec sql commit; - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); exec sql disconnect; return (0); diff --git a/src/interfaces/ecpg/test/preproc/variable.pgc b/src/interfaces/ecpg/test/preproc/variable.pgc index 71efa0ddaf..05420afdb2 100644 --- a/src/interfaces/ecpg/test/preproc/variable.pgc +++ b/src/interfaces/ecpg/test/preproc/variable.pgc @@ -94,7 +94,7 @@ exec sql end declare section; strcpy(msg, "commit"); exec sql commit; - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); exec sql disconnect; return (0); diff --git a/src/interfaces/ecpg/test/preproc/whenever.pgc b/src/interfaces/ecpg/test/preproc/whenever.pgc index bba78ee023..9b3ae9e9ec 100644 --- a/src/interfaces/ecpg/test/preproc/whenever.pgc +++ b/src/interfaces/ecpg/test/preproc/whenever.pgc @@ -64,4 +64,4 @@ int main(void) exec sql select 1 into :i; exec sql rollback; exit (0); -} +} diff --git a/src/interfaces/ecpg/test/sql/Makefile b/src/interfaces/ecpg/test/sql/Makefile index 59e4dd6fc9..18c37b665e 100644 --- a/src/interfaces/ecpg/test/sql/Makefile +++ b/src/interfaces/ecpg/test/sql/Makefile @@ -22,7 +22,7 @@ TESTS = array array.c \ parser parser.c \ quote quote.c \ show show.c \ - insupd insupd.c + insupd insupd.c all: $(TESTS) diff --git a/src/interfaces/ecpg/test/sql/array.pgc b/src/interfaces/ecpg/test/sql/array.pgc index d74a1354e5..fbc6741665 100644 --- a/src/interfaces/ecpg/test/sql/array.pgc +++ b/src/interfaces/ecpg/test/sql/array.pgc @@ -28,7 +28,7 @@ EXEC SQL END DECLARE SECTION; EXEC SQL SET AUTOCOMMIT = ON; - EXEC SQL BEGIN WORK; + EXEC SQL BEGIN WORK; EXEC SQL CREATE TABLE test (f float, i int, a int[10], text char(10)); @@ -40,7 +40,7 @@ EXEC SQL END DECLARE SECTION; EXEC SQL COMMIT; - EXEC SQL BEGIN WORK; + EXEC SQL BEGIN WORK; EXEC SQL SELECT f,text INTO :f,:text diff --git a/src/interfaces/ecpg/test/sql/code100.pgc b/src/interfaces/ecpg/test/sql/code100.pgc index 2ae6d15ead..d9a5e52444 100644 --- a/src/interfaces/ecpg/test/sql/code100.pgc +++ b/src/interfaces/ecpg/test/sql/code100.pgc @@ -11,7 +11,7 @@ int main() ECPGdebug(1,stderr); - + exec sql connect to REGRESSDB1; if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); @@ -21,7 +21,7 @@ int main() if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); exec sql commit work; if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + for (index=0;index<10;++index) { exec sql insert into test (payload, index) @@ -30,11 +30,11 @@ int main() } exec sql commit work; if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + exec sql update test - set payload=payload+1 where index=-1; + set payload=payload+1 where index=-1; if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + exec sql delete from test where index=-1; if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); @@ -45,7 +45,7 @@ int main() if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); exec sql commit work; if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); - + exec sql disconnect; if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc); return 0; diff --git a/src/interfaces/ecpg/test/sql/describe.pgc b/src/interfaces/ecpg/test/sql/describe.pgc index 80361cbe43..cd52c8220b 100644 --- a/src/interfaces/ecpg/test/sql/describe.pgc +++ b/src/interfaces/ecpg/test/sql/describe.pgc @@ -192,7 +192,7 @@ exec sql end declare section; strcpy(msg, "commit"); exec sql commit; - strcpy(msg, "disconnect"); + strcpy(msg, "disconnect"); exec sql disconnect; return (0); diff --git a/src/interfaces/ecpg/test/sql/dynalloc.pgc b/src/interfaces/ecpg/test/sql/dynalloc.pgc index 90da1c060a..8aa810f6c9 100644 --- a/src/interfaces/ecpg/test/sql/dynalloc.pgc +++ b/src/interfaces/ecpg/test/sql/dynalloc.pgc @@ -55,28 +55,28 @@ int main(void) for (i=0;i $@ all: all-lib diff --git a/src/pl/plperl/SPI.xs b/src/pl/plperl/SPI.xs index bea690cf3e..afcfe211c8 100644 --- a/src/pl/plperl/SPI.xs +++ b/src/pl/plperl/SPI.xs @@ -1,7 +1,7 @@ /********************************************************************** * PostgreSQL::InServer::SPI * - * SPI interface for plperl. + * SPI interface for plperl. * * src/pl/plperl/SPI.xs * @@ -94,10 +94,10 @@ spi_spi_prepare(query, ...) CODE: int i; SV** argv; - if (items < 1) + if (items < 1) Perl_croak(aTHX_ "Usage: spi_prepare(query, ...)"); argv = ( SV**) palloc(( items - 1) * sizeof(SV*)); - for ( i = 1; i < items; i++) + for ( i = 1; i < items; i++) argv[i - 1] = ST(i); RETVAL = plperl_spi_prepare(query, items - 1, argv); pfree( argv); @@ -113,17 +113,17 @@ spi_spi_exec_prepared(query, ...) HV *attr = NULL; int i, offset = 1, argc; SV ** argv; - if ( items < 1) - Perl_croak(aTHX_ "Usage: spi_exec_prepared(query, [\\%%attr,] " + if ( items < 1) + Perl_croak(aTHX_ "Usage: spi_exec_prepared(query, [\\%%attr,] " "[\\@bind_values])"); if ( items > 1 && SvROK( ST( 1)) && SvTYPE( SvRV( ST( 1))) == SVt_PVHV) - { + { attr = ( HV*) SvRV(ST(1)); offset++; } argc = items - offset; argv = ( SV**) palloc( argc * sizeof(SV*)); - for ( i = 0; offset < items; offset++, i++) + for ( i = 0; offset < items; offset++, i++) argv[i] = ST(offset); ret_hash = plperl_spi_exec_prepared(query, attr, argc, argv); RETVAL = newRV_noinc((SV*)ret_hash); @@ -137,11 +137,11 @@ spi_spi_query_prepared(query, ...) CODE: int i; SV ** argv; - if ( items < 1) + if ( items < 1) Perl_croak(aTHX_ "Usage: spi_query_prepared(query, " "[\\@bind_values])"); argv = ( SV**) palloc(( items - 1) * sizeof(SV*)); - for ( i = 1; i < items; i++) + for ( i = 1; i < items; i++) argv[i - 1] = ST(i); RETVAL = plperl_spi_query_prepared(query, items - 1, argv); pfree( argv); diff --git a/src/pl/plperl/Util.xs b/src/pl/plperl/Util.xs index 7d29ef6aef..6b96107444 100644 --- a/src/pl/plperl/Util.xs +++ b/src/pl/plperl/Util.xs @@ -134,11 +134,11 @@ SV * util_quote_nullable(sv) SV *sv CODE: - if (!sv || !SvOK(sv)) + if (!sv || !SvOK(sv)) { RETVAL = newSVstring_len("NULL", 4); } - else + else { text *arg = sv2text(sv); text *ret = DatumGetTextP(DirectFunctionCall1(quote_nullable, PointerGetDatum(arg))); diff --git a/src/pl/plperl/expected/plperl.out b/src/pl/plperl/expected/plperl.out index e3e9ec7b6f..d95f646e06 100644 --- a/src/pl/plperl/expected/plperl.out +++ b/src/pl/plperl/expected/plperl.out @@ -476,9 +476,9 @@ SELECT * FROM recurse(3); --- --- Test arrary return --- -CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][] -LANGUAGE plperl as $$ - return [['a"b',undef,'c,d'],['e\\f',undef,'g']]; +CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][] +LANGUAGE plperl as $$ + return [['a"b',undef,'c,d'],['e\\f',undef,'g']]; $$; SELECT array_of_text(); array_of_text diff --git a/src/pl/plperl/expected/plperl_plperlu.out b/src/pl/plperl/expected/plperl_plperlu.out index 479a902de4..2be955ff13 100644 --- a/src/pl/plperl/expected/plperl_plperlu.out +++ b/src/pl/plperl/expected/plperl_plperlu.out @@ -5,12 +5,10 @@ CREATE OR REPLACE FUNCTION bar() RETURNS integer AS $$ # alternative - causes server process to exit(255) spi_exec_query("invalid sql statement"); $$ language plperl; -- compile plperl code - CREATE OR REPLACE FUNCTION foo() RETURNS integer AS $$ spi_exec_query("SELECT * FROM bar()"); return 1; $$ LANGUAGE plperlu; -- compile plperlu code - SELECT * FROM bar(); -- throws exception normally (running plperl) ERROR: syntax error at or near "invalid" at line 4. CONTEXT: PL/Perl function "bar" diff --git a/src/pl/plperl/expected/plperl_trigger.out b/src/pl/plperl/expected/plperl_trigger.out index bb1aed3093..3e549f7eef 100644 --- a/src/pl/plperl/expected/plperl_trigger.out +++ b/src/pl/plperl/expected/plperl_trigger.out @@ -48,7 +48,7 @@ CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger LANGUAGE plperl AS $$ } return undef; # allow statement to proceed; $$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); insert into trigger_test values(1,'insert'); @@ -122,7 +122,6 @@ NOTICE: $_TD->{table_schema} = 'public' CONTEXT: PL/Perl function "trigger_data" NOTICE: $_TD->{when} = 'BEFORE' CONTEXT: PL/Perl function "trigger_data" - DROP TRIGGER show_trigger_data_trig on trigger_test; insert into trigger_test values(1,'insert'); CREATE VIEW trigger_test_view AS SELECT * FROM trigger_test; @@ -202,20 +201,19 @@ NOTICE: $_TD->{when} = 'INSTEAD OF' CONTEXT: PL/Perl function "trigger_data" DROP VIEW trigger_test_view; delete from trigger_test; - DROP FUNCTION trigger_data(); CREATE OR REPLACE FUNCTION valid_id() RETURNS trigger AS $$ if (($_TD->{new}{i}>=100) || ($_TD->{new}{i}<=0)) { return "SKIP"; # Skip INSERT/UPDATE command - } - elsif ($_TD->{new}{v} ne "immortal") + } + elsif ($_TD->{new}{v} ne "immortal") { $_TD->{new}{v} .= "(modified by trigger)"; return "MODIFY"; # Modify tuple and proceed INSERT/UPDATE command - } - else + } + else { return; # Proceed INSERT/UPDATE command } @@ -251,9 +249,9 @@ CREATE OR REPLACE FUNCTION immortal() RETURNS trigger AS $$ if ($_TD->{old}{v} eq $_TD->{args}[0]) { return "SKIP"; # Skip DELETE command - } - else - { + } + else + { return; # Proceed DELETE command }; $$ LANGUAGE plperl; diff --git a/src/pl/plperl/plc_trusted.pl b/src/pl/plperl/plc_trusted.pl index a681ae0874..cd61882eb6 100644 --- a/src/pl/plperl/plc_trusted.pl +++ b/src/pl/plperl/plc_trusted.pl @@ -1,7 +1,7 @@ # src/pl/plperl/plc_trusted.pl package PostgreSQL::InServer::safe; - + # Load widely useful pragmas into plperl to make them available. # # SECURITY RISKS: diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 270e9f78e0..5595baaed5 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -1376,7 +1376,7 @@ plperl_validator(PG_FUNCTION_ARGS) &argtypes, &argnames, &argmodes); for (i = 0; i < numargs; i++) { - if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO && + if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO && argtypes[i] != RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -2112,7 +2112,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger) typeStruct = (Form_pg_type) GETSTRUCT(typeTup); /* Disallow pseudotype argument */ - if (typeStruct->typtype == TYPTYPE_PSEUDO && + if (typeStruct->typtype == TYPTYPE_PSEUDO && procStruct->proargtypes.values[i] != RECORDOID) { free(prodesc->proname); @@ -2123,7 +2123,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger) format_type_be(procStruct->proargtypes.values[i])))); } - if (typeStruct->typtype == TYPTYPE_COMPOSITE || + if (typeStruct->typtype == TYPTYPE_COMPOSITE || procStruct->proargtypes.values[i] == RECORDOID) prodesc->arg_is_rowtype[i] = true; else diff --git a/src/pl/plperl/sql/plperl.sql b/src/pl/plperl/sql/plperl.sql index fcae9e093c..22ac0bb451 100644 --- a/src/pl/plperl/sql/plperl.sql +++ b/src/pl/plperl/sql/plperl.sql @@ -299,9 +299,9 @@ SELECT * FROM recurse(3); --- --- Test arrary return --- -CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][] -LANGUAGE plperl as $$ - return [['a"b',undef,'c,d'],['e\\f',undef,'g']]; +CREATE OR REPLACE FUNCTION array_of_text() RETURNS TEXT[][] +LANGUAGE plperl as $$ + return [['a"b',undef,'c,d'],['e\\f',undef,'g']]; $$; SELECT array_of_text(); diff --git a/src/pl/plperl/sql/plperl_plperlu.sql b/src/pl/plperl/sql/plperl_plperlu.sql index 65281c2df9..bbd79b662e 100644 --- a/src/pl/plperl/sql/plperl_plperlu.sql +++ b/src/pl/plperl/sql/plperl_plperlu.sql @@ -7,12 +7,12 @@ CREATE OR REPLACE FUNCTION bar() RETURNS integer AS $$ # alternative - causes server process to exit(255) spi_exec_query("invalid sql statement"); $$ language plperl; -- compile plperl code - + CREATE OR REPLACE FUNCTION foo() RETURNS integer AS $$ spi_exec_query("SELECT * FROM bar()"); return 1; $$ LANGUAGE plperlu; -- compile plperlu code - + SELECT * FROM bar(); -- throws exception normally (running plperl) SELECT * FROM foo(); -- used to cause backend crash (after switching to plperlu) diff --git a/src/pl/plperl/sql/plperl_trigger.sql b/src/pl/plperl/sql/plperl_trigger.sql index c47ddad3ca..1583a42544 100644 --- a/src/pl/plperl/sql/plperl_trigger.sql +++ b/src/pl/plperl/sql/plperl_trigger.sql @@ -51,14 +51,14 @@ CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger LANGUAGE plperl AS $$ return undef; # allow statement to proceed; $$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); insert into trigger_test values(1,'insert'); update trigger_test set v = 'update' where i = 1; delete from trigger_test; - + DROP TRIGGER show_trigger_data_trig on trigger_test; insert into trigger_test values(1,'insert'); @@ -74,7 +74,7 @@ delete from trigger_test_view; DROP VIEW trigger_test_view; delete from trigger_test; - + DROP FUNCTION trigger_data(); CREATE OR REPLACE FUNCTION valid_id() RETURNS trigger AS $$ @@ -82,13 +82,13 @@ CREATE OR REPLACE FUNCTION valid_id() RETURNS trigger AS $$ if (($_TD->{new}{i}>=100) || ($_TD->{new}{i}<=0)) { return "SKIP"; # Skip INSERT/UPDATE command - } - elsif ($_TD->{new}{v} ne "immortal") + } + elsif ($_TD->{new}{v} ne "immortal") { $_TD->{new}{v} .= "(modified by trigger)"; return "MODIFY"; # Modify tuple and proceed INSERT/UPDATE command - } - else + } + else { return; # Proceed INSERT/UPDATE command } @@ -116,9 +116,9 @@ CREATE OR REPLACE FUNCTION immortal() RETURNS trigger AS $$ if ($_TD->{old}{v} eq $_TD->{args}[0]) { return "SKIP"; # Skip DELETE command - } - else - { + } + else + { return; # Proceed DELETE command }; $$ LANGUAGE plperl; diff --git a/src/pl/plperl/text2macro.pl b/src/pl/plperl/text2macro.pl index 482ea0fd07..88241e2cb2 100644 --- a/src/pl/plperl/text2macro.pl +++ b/src/pl/plperl/text2macro.pl @@ -88,7 +88,7 @@ sub selftest { print $fh "int main() { puts(X); return 0; }\n"; close $fh; system("cat -n $tmp.c"); - + system("make $tmp") == 0 or die; open $fh, "./$tmp |" or die; my $result = <$fh>; diff --git a/src/pl/plpgsql/src/gram.y b/src/pl/plpgsql/src/gram.y index a28c6707e4..e4f485b553 100644 --- a/src/pl/plpgsql/src/gram.y +++ b/src/pl/plpgsql/src/gram.y @@ -1734,7 +1734,7 @@ stmt_open : K_OPEN cursor_variable if (endtoken == K_USING) { PLpgSQL_expr *expr; - + do { expr = read_sql_expression2(',', ';', diff --git a/src/pl/plpython/expected/plpython_newline.out b/src/pl/plpython/expected/plpython_newline.out index bf77285b98..27dc2f8ab0 100644 --- a/src/pl/plpython/expected/plpython_newline.out +++ b/src/pl/plpython/expected/plpython_newline.out @@ -1,6 +1,6 @@ -- -- Universal Newline Support --- +-- CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS E'x = 100\ny = 23\nreturn x + y\n' LANGUAGE plpythonu; diff --git a/src/pl/plpython/expected/plpython_schema.out b/src/pl/plpython/expected/plpython_schema.out index e94e7bbcf8..3ec331c0f0 100644 --- a/src/pl/plpython/expected/plpython_schema.out +++ b/src/pl/plpython/expected/plpython_schema.out @@ -3,7 +3,7 @@ CREATE TABLE users ( lname text not null, username text, userid serial, - PRIMARY KEY(lname, fname) + PRIMARY KEY(lname, fname) ) ; NOTICE: CREATE TABLE will create implicit sequence "users_userid_seq" for serial column "users.userid" NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "users_pkey" for table "users" diff --git a/src/pl/plpython/expected/plpython_trigger.out b/src/pl/plpython/expected/plpython_trigger.out index a78e96e4a1..e04da221f7 100644 --- a/src/pl/plpython/expected/plpython_trigger.out +++ b/src/pl/plpython/expected/plpython_trigger.out @@ -78,7 +78,7 @@ for key in skeys: val = TD[key] plpy.notice("TD[" + key + "] => " + str(val)) -return None +return None $$; CREATE TRIGGER show_trigger_data_trig_before diff --git a/src/pl/plpython/sql/plpython_newline.sql b/src/pl/plpython/sql/plpython_newline.sql index 6263fac141..f9cee9491b 100644 --- a/src/pl/plpython/sql/plpython_newline.sql +++ b/src/pl/plpython/sql/plpython_newline.sql @@ -1,6 +1,6 @@ -- -- Universal Newline Support --- +-- CREATE OR REPLACE FUNCTION newline_lf() RETURNS integer AS E'x = 100\ny = 23\nreturn x + y\n' diff --git a/src/pl/plpython/sql/plpython_schema.sql b/src/pl/plpython/sql/plpython_schema.sql index 669c4877f1..a5bdbda2a3 100644 --- a/src/pl/plpython/sql/plpython_schema.sql +++ b/src/pl/plpython/sql/plpython_schema.sql @@ -3,7 +3,7 @@ CREATE TABLE users ( lname text not null, username text, userid serial, - PRIMARY KEY(lname, fname) + PRIMARY KEY(lname, fname) ) ; CREATE INDEX users_username_idx ON users(username); diff --git a/src/pl/plpython/sql/plpython_trigger.sql b/src/pl/plpython/sql/plpython_trigger.sql index 7520c79db5..4994d8fe7b 100644 --- a/src/pl/plpython/sql/plpython_trigger.sql +++ b/src/pl/plpython/sql/plpython_trigger.sql @@ -78,7 +78,7 @@ for key in skeys: val = TD[key] plpy.notice("TD[" + key + "] => " + str(val)) -return None +return None $$; diff --git a/src/pl/tcl/expected/pltcl_setup.out b/src/pl/tcl/expected/pltcl_setup.out index f577e66277..a1385b2eee 100644 --- a/src/pl/tcl/expected/pltcl_setup.out +++ b/src/pl/tcl/expected/pltcl_setup.out @@ -40,7 +40,7 @@ create function check_pkey1_exists(int4, bpchar) returns bool as E' where key1 = \\$1 and key2 = \\$2" \\ {int4 bpchar}] } - + set n [spi_execp -count 1 $GD(plan) [list $1 $2]] if {$n > 0} { @@ -61,8 +61,8 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$ set dnames [info locals {[a-zA-Z]*} ] foreach key [lsort $dnames] { - - if { [array exists $key] } { + + if { [array exists $key] } { set str "{" foreach akey [lsort [ array names $key ] ] { if {[string length $str] > 1} { set str "$str, " } @@ -80,10 +80,10 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$ } - return OK + return OK $_$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); CREATE TRIGGER show_trigger_data_view_trig diff --git a/src/pl/tcl/sql/pltcl_setup.sql b/src/pl/tcl/sql/pltcl_setup.sql index a9370d258d..2176d5c4f4 100644 --- a/src/pl/tcl/sql/pltcl_setup.sql +++ b/src/pl/tcl/sql/pltcl_setup.sql @@ -45,7 +45,7 @@ create function check_pkey1_exists(int4, bpchar) returns bool as E' where key1 = \\$1 and key2 = \\$2" \\ {int4 bpchar}] } - + set n [spi_execp -count 1 $GD(plan) [list $1 $2]] if {$n > 0} { @@ -71,8 +71,8 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$ set dnames [info locals {[a-zA-Z]*} ] foreach key [lsort $dnames] { - - if { [array exists $key] } { + + if { [array exists $key] } { set str "{" foreach akey [lsort [ array names $key ] ] { if {[string length $str] > 1} { set str "$str, " } @@ -90,11 +90,11 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$ } - return OK + return OK $_$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); diff --git a/src/test/examples/Makefile b/src/test/examples/Makefile index b009416b8f..bbc6ee1d36 100644 --- a/src/test/examples/Makefile +++ b/src/test/examples/Makefile @@ -18,5 +18,5 @@ PROGS = testlibpq testlibpq2 testlibpq3 testlibpq4 testlo all: $(PROGS) -clean: +clean: rm -f $(PROGS) diff --git a/src/test/locale/Makefile b/src/test/locale/Makefile index e4d38646e4..c71dc2dbbf 100644 --- a/src/test/locale/Makefile +++ b/src/test/locale/Makefile @@ -10,7 +10,7 @@ DIRS = de_DE.ISO8859-1 gr_GR.ISO8859-7 koi8-r koi8-to-win1251 all: $(PROGS) -clean: +clean: rm -f $(PROGS) for d in $(DIRS); do \ $(MAKE) -C $$d clean || exit; \ diff --git a/src/test/locale/README b/src/test/locale/README index 86246df95d..980df8005b 100644 --- a/src/test/locale/README +++ b/src/test/locale/README @@ -24,5 +24,5 @@ a Makefile (and other files) similar to koi8-r/*. Actually, the simplest the files. Oleg. ----- +---- Oleg Broytmann http://members.xoom.com/phd2/ phd2@earthling.net diff --git a/src/test/locale/de_DE.ISO8859-1/Makefile b/src/test/locale/de_DE.ISO8859-1/Makefile index fd8301928e..28a72b7e52 100644 --- a/src/test/locale/de_DE.ISO8859-1/Makefile +++ b/src/test/locale/de_DE.ISO8859-1/Makefile @@ -1,7 +1,7 @@ -all: +all: -test: +test: ./runall -clean: +clean: rm -f *.out diff --git a/src/test/locale/gr_GR.ISO8859-7/Makefile b/src/test/locale/gr_GR.ISO8859-7/Makefile index fd8301928e..28a72b7e52 100644 --- a/src/test/locale/gr_GR.ISO8859-7/Makefile +++ b/src/test/locale/gr_GR.ISO8859-7/Makefile @@ -1,7 +1,7 @@ -all: +all: -test: +test: ./runall -clean: +clean: rm -f *.out diff --git a/src/test/locale/koi8-r/Makefile b/src/test/locale/koi8-r/Makefile index fd8301928e..28a72b7e52 100644 --- a/src/test/locale/koi8-r/Makefile +++ b/src/test/locale/koi8-r/Makefile @@ -1,7 +1,7 @@ -all: +all: -test: +test: ./runall -clean: +clean: rm -f *.out diff --git a/src/test/locale/koi8-to-win1251/Makefile b/src/test/locale/koi8-to-win1251/Makefile index fd8301928e..28a72b7e52 100644 --- a/src/test/locale/koi8-to-win1251/Makefile +++ b/src/test/locale/koi8-to-win1251/Makefile @@ -1,7 +1,7 @@ -all: +all: -test: +test: ./runall -clean: +clean: rm -f *.out diff --git a/src/test/mb/mbregress.sh b/src/test/mb/mbregress.sh index 1ce45c7e1c..20942e30c2 100644 --- a/src/test/mb/mbregress.sh +++ b/src/test/mb/mbregress.sh @@ -46,7 +46,7 @@ do else EXPECTED="expected/${i}.out" fi - + if [ `diff ${EXPECTED} results/${i}.out | wc -l` -ne 0 ] then ( diff -C3 ${EXPECTED} results/${i}.out; \ diff --git a/src/test/performance/runtests.pl b/src/test/performance/runtests.pl index f4bf2fc2ec..edf45ded2f 100755 --- a/src/test/performance/runtests.pl +++ b/src/test/performance/runtests.pl @@ -15,10 +15,10 @@ $DBNAME = 'perftest'; ); # Tests to run: test' script, test' description, ... -# Test' script is in form +# Test' script is in form # # script_name[.ntm][ T] -# +# # script_name is name of file in ./sqls # .ntm means that script will be used for some initialization # and should not be timed: runtests.pl opens /dev/null as STDERR @@ -26,11 +26,11 @@ $DBNAME = 'perftest'; # Script shouldn't notice either he is running for test or for # initialization purposes. # T means that all queries in this test (initialization ?) are to be -# executed in SINGLE transaction. In this case global variable $XACTBLOCK +# executed in SINGLE transaction. In this case global variable $XACTBLOCK # is not empty string. Otherwise, each query in test is to be executed -# in own transaction ($XACTBLOCK is empty string). In accordance with -# $XACTBLOCK, script is to do DBMS specific preparation before execution -# of queries. (Look at example in sqls/inssimple for MySQL - it gives +# in own transaction ($XACTBLOCK is empty string). In accordance with +# $XACTBLOCK, script is to do DBMS specific preparation before execution +# of queries. (Look at example in sqls/inssimple for MySQL - it gives # an idea of what can be done for features unsupported by an DBMS.) # @perftests = ( @@ -91,9 +91,9 @@ for ($i = 0; $i <= $#perftests; $i++) $runtest = $test; if ( $test =~ /\.ntm/ ) { - # + # # No timing for this queries - # + # close (STDERR); # close $TmpFile open (STDERR, ">/dev/null") or die; $runtest =~ s/\.ntm//; diff --git a/src/test/regress/GNUmakefile b/src/test/regress/GNUmakefile index 0755304820..2869b4022c 100644 --- a/src/test/regress/GNUmakefile +++ b/src/test/regress/GNUmakefile @@ -162,7 +162,7 @@ runtest: installcheck runtest-parallel: installcheck-parallel bigtest: all tablespace-setup - $(pg_regress_call) --psqldir=$(PSQLDIR) --schedule=$(srcdir)/serial_schedule numeric_big + $(pg_regress_call) --psqldir=$(PSQLDIR) --schedule=$(srcdir)/serial_schedule numeric_big bigcheck: all tablespace-setup $(pg_regress_call) --temp-install=./tmp_check --top-builddir=$(top_builddir) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) numeric_big diff --git a/src/test/regress/expected/abstime.out b/src/test/regress/expected/abstime.out index a04f091666..ed48f642ab 100644 --- a/src/test/regress/expected/abstime.out +++ b/src/test/regress/expected/abstime.out @@ -5,7 +5,7 @@ -- -- -- timezones may vary based not only on location but the operating --- system. the main correctness issue is that the OS may not get +-- system. the main correctness issue is that the OS may not get -- daylight savings time right for times prior to Unix epoch (jan 1 1970). -- CREATE TABLE ABSTIME_TBL (f1 abstime); @@ -26,7 +26,7 @@ INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'epoch'); INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'infinity'); INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity'); INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12'); --- what happens if we specify slightly misformatted abstime? +-- what happens if we specify slightly misformatted abstime? INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'); ERROR: date/time field value out of range: "Feb 35, 1946 10:00:00" LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'... @@ -36,7 +36,7 @@ INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'); ERROR: date/time field value out of range: "Feb 28, 1984 25:08:10" LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'... ^ --- badly formatted abstimes: these should result in invalid abstimes +-- badly formatted abstimes: these should result in invalid abstimes INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); ERROR: invalid input syntax for type abstime: "bad date format" LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index ed3b0c4b75..82407bc9fd 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -317,7 +317,7 @@ CREATE TEMPORARY TABLE bitwise_test( y BIT(4) ); -- empty case -SELECT +SELECT BIT_AND(i2) AS "?", BIT_OR(i4) AS "?" FROM bitwise_test; @@ -386,7 +386,7 @@ SELECT t | t | t | t | t | t | t | t | t (1 row) -CREATE TEMPORARY TABLE bool_test( +CREATE TEMPORARY TABLE bool_test( b1 BOOL, b2 BOOL, b3 BOOL, diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index ab19a8e4fc..d6c5827c68 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -35,8 +35,8 @@ ALTER TABLE tmp ADD COLUMN y float4[]; ALTER TABLE tmp ADD COLUMN z int2[]; INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', @@ -48,7 +48,7 @@ SELECT * FROM tmp; (1 row) DROP TABLE tmp; --- the wolf bug - schema mods caused inconsistent row descriptors +-- the wolf bug - schema mods caused inconsistent row descriptors CREATE TABLE tmp ( initial int4 ); @@ -80,8 +80,8 @@ ALTER TABLE tmp ADD COLUMN y float4[]; ALTER TABLE tmp ADD COLUMN z int2[]; INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', @@ -137,7 +137,7 @@ ALTER TABLE tmp_view RENAME TO tmp_view_new; ANALYZE tenk1; set enable_seqscan to off; set enable_bitmapscan to off; --- 5 values, sorted +-- 5 values, sorted SELECT unique1 FROM tenk1 WHERE unique1 < 5; unique1 --------- @@ -1413,7 +1413,7 @@ select * from anothertab; (3 rows) alter table anothertab alter column atcol2 type text - using case when atcol2 is true then 'IT WAS TRUE' + using case when atcol2 is true then 'IT WAS TRUE' when atcol2 is false then 'IT WAS FALSE' else 'IT WAS NULL!' end; select * from anothertab; diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out index eff5f88c24..4d86f454f9 100644 --- a/src/test/regress/expected/arrays.out +++ b/src/test/regress/expected/arrays.out @@ -5,7 +5,7 @@ CREATE TABLE arrtest ( a int2[], b int4[][][], c name[], - d text[][], + d text[][], e float8[], f char(5)[], g varchar(5)[] @@ -21,7 +21,7 @@ INSERT INTO arrtest (f) VALUES ('{"too long"}'); ERROR: value too long for type character(5) INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g) - VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}', + VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}', '{{"elt1", "elt2"}}', '{"3.4", "6.7"}', '{"abc","abcde"}', '{"abc","abcde"}'); INSERT INTO arrtest (a, b[1:2], c, d[1:2]) @@ -37,7 +37,7 @@ SELECT * FROM arrtest; SELECT arrtest.a[1], arrtest.b[1][1][1], arrtest.c[1], - arrtest.d[1][1], + arrtest.d[1][1], arrtest.e[0] FROM arrtest; a | b | c | d | e @@ -58,7 +58,7 @@ SELECT a[1], b[1][1][1], c[1], d[1][1], e[0] SELECT a[1:3], b[1:1][1:2][1:2], - c[1:2], + c[1:2], d[1:1][1:2] FROM arrtest; a | b | c | d @@ -86,10 +86,10 @@ SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c | [1:2] | [1:2] (3 rows) --- returns nothing +-- returns nothing SELECT * FROM arrtest - WHERE a[1] < 5 and + WHERE a[1] < 5 and c = '{"foobar"}'::_name; a | b | c | d | e | f | g ---+---+---+---+---+---+--- @@ -115,7 +115,7 @@ SELECT a,b,c FROM arrtest; SELECT a[1:3], b[1:1][1:2][1:2], - c[1:2], + c[1:2], d[1:1][2:2] FROM arrtest; a | b | c | d @@ -940,11 +940,11 @@ select c2[2].f2 from comptable; drop type _comptype; drop table comptable; drop type comptype; -create or replace function unnest1(anyarray) +create or replace function unnest1(anyarray) returns setof anyelement as $$ select $1[s] from generate_subscripts($1,1) g(s); $$ language sql immutable; -create or replace function unnest2(anyarray) +create or replace function unnest2(anyarray) returns setof anyelement as $$ select $1[s1][s2] from generate_subscripts($1,1) g1(s1), generate_subscripts($1,2) g2(s2); diff --git a/src/test/regress/expected/bit.out b/src/test/regress/expected/bit.out index 40082ca14a..9c7d202149 100644 --- a/src/test/regress/expected/bit.out +++ b/src/test/regress/expected/bit.out @@ -14,7 +14,7 @@ INSERT INTO BIT_TABLE VALUES (B'101011111010'); -- too long ERROR: bit string length 12 does not match type bit(11) --INSERT INTO BIT_TABLE VALUES ('X554'); --INSERT INTO BIT_TABLE VALUES ('X555'); -SELECT * FROM BIT_TABLE; +SELECT * FROM BIT_TABLE; b ------------- 00000000000 @@ -31,7 +31,7 @@ INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long ERROR: bit string too long for type bit varying(11) --INSERT INTO VARBIT_TABLE VALUES ('X554'); --INSERT INTO VARBIT_TABLE VALUES ('X555'); -SELECT * FROM VARBIT_TABLE; +SELECT * FROM VARBIT_TABLE; v ------------- @@ -42,7 +42,7 @@ SELECT * FROM VARBIT_TABLE; -- Concatenation SELECT v, b, (v || b) AS concat - FROM BIT_TABLE, VARBIT_TABLE + FROM BIT_TABLE, VARBIT_TABLE ORDER BY 3; v | b | concat -------------+-------------+------------------------ @@ -110,7 +110,7 @@ SELECT v, DROP TABLE varbit_table; CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16)); COPY varbit_table FROM stdin; -SELECT a, b, ~a AS "~ a", a & b AS "a & b", +SELECT a, b, ~a AS "~ a", a & b AS "a & b", a | b AS "a | b", a # b AS "a # b" FROM varbit_table; a | b | ~ a | a & b | a | b | a # b ------------------+------------------+------------------+------------------+------------------+------------------ @@ -162,7 +162,7 @@ DROP TABLE varbit_table; DROP TABLE bit_table; CREATE TABLE bit_table (a BIT(16), b BIT(16)); COPY bit_table FROM stdin; -SELECT a,b,~a AS "~ a",a & b AS "a & b", +SELECT a,b,~a AS "~ a",a & b AS "a & b", a|b AS "a | b", a # b AS "a # b" FROM bit_table; a | b | ~ a | a & b | a | b | a # b ------------------+------------------+------------------+------------------+------------------+------------------ @@ -455,7 +455,7 @@ INSERT INTO BIT_SHIFT_TABLE SELECT b>>4 FROM BIT_SHIFT_TABLE; INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE; SELECT POSITION(B'1101' IN b), POSITION(B'11011' IN b), - b + b FROM BIT_SHIFT_TABLE ; position | position | b ----------+----------+------------------ @@ -485,7 +485,7 @@ INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0000' AS BIT VARYING(12)) >>4 INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE; SELECT POSITION(B'1101' IN v), POSITION(B'11011' IN v), - v + v FROM VARBIT_SHIFT_TABLE ; position | position | v ----------+----------+---------------------- diff --git a/src/test/regress/expected/bitmapops.out b/src/test/regress/expected/bitmapops.out index d88a76fe24..3570973e3c 100644 --- a/src/test/regress/expected/bitmapops.out +++ b/src/test/regress/expected/bitmapops.out @@ -9,7 +9,7 @@ -- That allows us to test all the different combinations of -- lossy and non-lossy pages with the minimum amount of data CREATE TABLE bmscantest (a int, b int, t text); -INSERT INTO bmscantest +INSERT INTO bmscantest SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,70000) r; CREATE INDEX i_bmtest_a ON bmscantest(a); diff --git a/src/test/regress/expected/boolean.out b/src/test/regress/expected/boolean.out index 28d7cf9526..e39f550332 100644 --- a/src/test/regress/expected/boolean.out +++ b/src/test/regress/expected/boolean.out @@ -225,7 +225,7 @@ CREATE TABLE BOOLTBL1 (f1 bool); INSERT INTO BOOLTBL1 (f1) VALUES (bool 't'); INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True'); INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true'); --- BOOLTBL1 should be full of true's at this point +-- BOOLTBL1 should be full of true's at this point SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1; t_3 | f1 -----+---- @@ -244,7 +244,7 @@ SELECT '' AS t_3, BOOLTBL1.* | t (3 rows) -SELECT '' AS t_3, BOOLTBL1.* +SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1 WHERE f1 <> bool 'false'; t_3 | f1 @@ -262,7 +262,7 @@ SELECT '' AS zero, BOOLTBL1.* (0 rows) INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f'); -SELECT '' AS f_1, BOOLTBL1.* +SELECT '' AS f_1, BOOLTBL1.* FROM BOOLTBL1 WHERE f1 = bool 'false'; f_1 | f1 @@ -277,12 +277,12 @@ INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False'); INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE'); -- This is now an invalid expression -- For pre-v6.3 this evaluated to false - thomas 1997-10-23 -INSERT INTO BOOLTBL2 (f1) - VALUES (bool 'XXX'); +INSERT INTO BOOLTBL2 (f1) + VALUES (bool 'XXX'); ERROR: invalid input syntax for type boolean: "XXX" LINE 2: VALUES (bool 'XXX'); ^ --- BOOLTBL2 should be full of false's at this point +-- BOOLTBL2 should be full of false's at this point SELECT '' AS f_4, BOOLTBL2.* FROM BOOLTBL2; f_4 | f1 -----+---- diff --git a/src/test/regress/expected/box.out b/src/test/regress/expected/box.out index 2a94e33465..fc3154014b 100644 --- a/src/test/regress/expected/box.out +++ b/src/test/regress/expected/box.out @@ -18,11 +18,11 @@ CREATE TABLE BOX_TBL (f1 box); INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)'); INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)'); --- degenerate cases where the box is a line or a point --- note that lines and points boxes all have zero area +-- degenerate cases where the box is a line or a point +-- note that lines and points boxes all have zero area INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)'); INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)'); --- badly formatted box inputs +-- badly formatted box inputs INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); ERROR: invalid input syntax for type box: "(2.3, 4.5)" LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); @@ -50,9 +50,9 @@ SELECT '' AS four, b.*, area(b.f1) as barea | (3,3),(3,3) | 0 (4 rows) --- overlap +-- overlap SELECT '' AS three, b.f1 - FROM BOX_TBL b + FROM BOX_TBL b WHERE b.f1 && box '(2.5,2.5,1.0,1.0)'; three | f1 -------+--------------------- @@ -61,7 +61,7 @@ SELECT '' AS three, b.f1 | (2.5,3.5),(2.5,2.5) (3 rows) --- left-or-overlap (x only) +-- left-or-overlap (x only) SELECT '' AS two, b1.* FROM BOX_TBL b1 WHERE b1.f1 &< box '(2.0,2.0,2.5,2.5)'; @@ -71,7 +71,7 @@ SELECT '' AS two, b1.* | (2.5,3.5),(2.5,2.5) (2 rows) --- right-or-overlap (x only) +-- right-or-overlap (x only) SELECT '' AS two, b1.* FROM BOX_TBL b1 WHERE b1.f1 &> box '(2.0,2.0,2.5,2.5)'; @@ -81,7 +81,7 @@ SELECT '' AS two, b1.* | (3,3),(3,3) (2 rows) --- left of +-- left of SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 << box '(3.0,3.0,5.0,5.0)'; @@ -91,7 +91,7 @@ SELECT '' AS two, b.f1 | (2.5,3.5),(2.5,2.5) (2 rows) --- area <= +-- area <= SELECT '' AS four, b.f1 FROM BOX_TBL b WHERE b.f1 <= box '(3.0,3.0,5.0,5.0)'; @@ -103,7 +103,7 @@ SELECT '' AS four, b.f1 | (3,3),(3,3) (4 rows) --- area < +-- area < SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 < box '(3.0,3.0,5.0,5.0)'; @@ -113,7 +113,7 @@ SELECT '' AS two, b.f1 | (3,3),(3,3) (2 rows) --- area = +-- area = SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 = box '(3.0,3.0,5.0,5.0)'; @@ -123,19 +123,19 @@ SELECT '' AS two, b.f1 | (3,3),(1,1) (2 rows) --- area > +-- area > SELECT '' AS two, b.f1 - FROM BOX_TBL b -- zero area - WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; + FROM BOX_TBL b -- zero area + WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; two | f1 -----+------------- | (2,2),(0,0) | (3,3),(1,1) (2 rows) --- area >= +-- area >= SELECT '' AS four, b.f1 - FROM BOX_TBL b -- zero area + FROM BOX_TBL b -- zero area WHERE b.f1 >= box '(3.5,3.0,4.5,3.0)'; four | f1 ------+--------------------- @@ -145,7 +145,7 @@ SELECT '' AS four, b.f1 | (3,3),(3,3) (4 rows) --- right of +-- right of SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE box '(3.0,3.0,5.0,5.0)' >> b.f1; @@ -155,7 +155,7 @@ SELECT '' AS two, b.f1 | (2.5,3.5),(2.5,2.5) (2 rows) --- contained in +-- contained in SELECT '' AS three, b.f1 FROM BOX_TBL b WHERE b.f1 <@ box '(0,0,3,3)'; @@ -166,7 +166,7 @@ SELECT '' AS three, b.f1 | (3,3),(3,3) (3 rows) --- contains +-- contains SELECT '' AS three, b.f1 FROM BOX_TBL b WHERE box '(0,0,3,3)' @> b.f1; @@ -177,7 +177,7 @@ SELECT '' AS three, b.f1 | (3,3),(3,3) (3 rows) --- box equality +-- box equality SELECT '' AS one, b.f1 FROM BOX_TBL b WHERE box '(1,1,3,3)' ~= b.f1; @@ -186,7 +186,7 @@ SELECT '' AS one, b.f1 | (3,3),(1,1) (1 row) --- center of box, left unary operator +-- center of box, left unary operator SELECT '' AS four, @@(b1.f1) AS p FROM BOX_TBL b1; four | p @@ -197,9 +197,9 @@ SELECT '' AS four, @@(b1.f1) AS p | (3,3) (4 rows) --- wholly-contained +-- wholly-contained SELECT '' AS one, b1.*, b2.* - FROM BOX_TBL b1, BOX_TBL b2 + FROM BOX_TBL b1, BOX_TBL b2 WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1; one | f1 | f1 -----+-------------+------------- diff --git a/src/test/regress/expected/char.out b/src/test/regress/expected/char.out index a0ba3d4a8c..991c7717d4 100644 --- a/src/test/regress/expected/char.out +++ b/src/test/regress/expected/char.out @@ -15,13 +15,13 @@ SELECT char 'c' = char 'c' AS true; CREATE TABLE CHAR_TBL(f1 char); INSERT INTO CHAR_TBL (f1) VALUES ('a'); INSERT INTO CHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO CHAR_TBL (f1) VALUES ('1'); INSERT INTO CHAR_TBL (f1) VALUES (2); INSERT INTO CHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO CHAR_TBL (f1) VALUES (''); --- try char's of greater than 1 length +-- try char's of greater than 1 length INSERT INTO CHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character(1) INSERT INTO CHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/char_1.out b/src/test/regress/expected/char_1.out index 4cc081deae..8eff75afb5 100644 --- a/src/test/regress/expected/char_1.out +++ b/src/test/regress/expected/char_1.out @@ -15,13 +15,13 @@ SELECT char 'c' = char 'c' AS true; CREATE TABLE CHAR_TBL(f1 char); INSERT INTO CHAR_TBL (f1) VALUES ('a'); INSERT INTO CHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO CHAR_TBL (f1) VALUES ('1'); INSERT INTO CHAR_TBL (f1) VALUES (2); INSERT INTO CHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO CHAR_TBL (f1) VALUES (''); --- try char's of greater than 1 length +-- try char's of greater than 1 length INSERT INTO CHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character(1) INSERT INTO CHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/char_2.out b/src/test/regress/expected/char_2.out index 8fe6e07acc..f54736c3e1 100644 --- a/src/test/regress/expected/char_2.out +++ b/src/test/regress/expected/char_2.out @@ -15,13 +15,13 @@ SELECT char 'c' = char 'c' AS true; CREATE TABLE CHAR_TBL(f1 char); INSERT INTO CHAR_TBL (f1) VALUES ('a'); INSERT INTO CHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO CHAR_TBL (f1) VALUES ('1'); INSERT INTO CHAR_TBL (f1) VALUES (2); INSERT INTO CHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO CHAR_TBL (f1) VALUES (''); --- try char's of greater than 1 length +-- try char's of greater than 1 length INSERT INTO CHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character(1) INSERT INTO CHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/cluster.out b/src/test/regress/expected/cluster.out index 96bd8164fa..979057d26f 100644 --- a/src/test/regress/expected/cluster.out +++ b/src/test/regress/expected/cluster.out @@ -398,7 +398,7 @@ BEGIN; UPDATE clustertest SET key = 100 WHERE key = 10; -- Test update where the new row version is found first in the scan UPDATE clustertest SET key = 35 WHERE key = 40; --- Test longer update chain +-- Test longer update chain UPDATE clustertest SET key = 60 WHERE key = 50; UPDATE clustertest SET key = 70 WHERE key = 60; UPDATE clustertest SET key = 80 WHERE key = 70; diff --git a/src/test/regress/expected/copyselect.out b/src/test/regress/expected/copyselect.out index 8a42b0e3d8..cbc140c538 100644 --- a/src/test/regress/expected/copyselect.out +++ b/src/test/regress/expected/copyselect.out @@ -113,7 +113,7 @@ ERROR: cannot copy from view "v_test1" HINT: Try the COPY (SELECT ...) TO variant. \copy: ERROR: cannot copy from view "v_test1" HINT: Try the COPY (SELECT ...) TO variant. --- +-- -- Test \copy (select ...) -- \copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout diff --git a/src/test/regress/expected/create_aggregate.out b/src/test/regress/expected/create_aggregate.out index 448e319794..ad1459419f 100644 --- a/src/test/regress/expected/create_aggregate.out +++ b/src/test/regress/expected/create_aggregate.out @@ -3,7 +3,7 @@ -- -- all functions CREATEd CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, + sfunc = int4_avg_accum, basetype = int4, stype = _int8, finalfunc = int8_avg, initcond1 = '{0,0}' ); @@ -14,7 +14,7 @@ COMMENT ON AGGREGATE newavg (int4) IS 'an agg comment'; COMMENT ON AGGREGATE newavg (int4) IS NULL; -- without finalfunc; test obsolete spellings 'sfunc1' etc CREATE AGGREGATE newsum ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, + sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond1 = '0' ); -- zero-argument aggregate diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out index 2275343704..27d5e848e5 100644 --- a/src/test/regress/expected/create_index.out +++ b/src/test/regress/expected/create_index.out @@ -54,8 +54,8 @@ CREATE INDEX gcircleind ON circle_tbl USING gist (f1); CREATE INDEX gpointind ON point_tbl USING gist (f1); CREATE TEMP TABLE gpolygon_tbl AS SELECT polygon(home_base) AS f1 FROM slow_emp4000; -INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); -INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); CREATE TEMP TABLE gcircle_tbl AS SELECT circle(home_base) AS f1 FROM slow_emp4000; CREATE INDEX ggpolygonind ON gpolygon_tbl USING gist (f1); @@ -1023,5 +1023,4 @@ SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NUL RESET enable_seqscan; RESET enable_indexscan; RESET enable_bitmapscan; - DROP TABLE onek_with_null; diff --git a/src/test/regress/expected/create_misc.out b/src/test/regress/expected/create_misc.out index 775d1bd1a7..45125fedfd 100644 --- a/src/test/regress/expected/create_misc.out +++ b/src/test/regress/expected/create_misc.out @@ -28,13 +28,13 @@ SELECT * INTO TABLE ramp FROM road WHERE name ~ '.*Ramp'; -INSERT INTO ihighway - SELECT * - FROM road +INSERT INTO ihighway + SELECT * + FROM road WHERE name ~ 'I- .*'; -INSERT INTO shighway - SELECT * - FROM road +INSERT INTO shighway + SELECT * + FROM road WHERE name ~ 'State Hwy.*'; UPDATE shighway SET surface = 'asphalt'; @@ -99,14 +99,14 @@ INSERT INTO f_star (class, a, c, f) INSERT INTO f_star (class, a, e, f) VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon); INSERT INTO f_star (class, c, e, f) - VALUES ('f', 'hi keith'::name, '-8'::int2, + VALUES ('f', 'hi keith'::name, '-8'::int2, '(1111,3333),(2222,4444)'::polygon); INSERT INTO f_star (class, a, c) VALUES ('f', 24, 'hi marc'::name); INSERT INTO f_star (class, a, e) VALUES ('f', 25, '-9'::int2); INSERT INTO f_star (class, a, f) - VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon); + VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon); INSERT INTO f_star (class, c, e) VALUES ('f', 'hi allison'::name, '-10'::int2); INSERT INTO f_star (class, c, f) @@ -117,15 +117,15 @@ INSERT INTO f_star (class, e, f) INSERT INTO f_star (class, a) VALUES ('f', 27); INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name); INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2); -INSERT INTO f_star (class, f) +INSERT INTO f_star (class, f) VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon); INSERT INTO f_star (class) VALUES ('f'); -- -- for internal portal (cursor) tests -- CREATE TABLE iportaltest ( - i int4, - d float4, + i int4, + d float4, p polygon ); INSERT INTO iportaltest (i, d, p) diff --git a/src/test/regress/expected/create_operator.out b/src/test/regress/expected/create_operator.out index df25f35ae7..8656864655 100644 --- a/src/test/regress/expected/create_operator.out +++ b/src/test/regress/expected/create_operator.out @@ -1,30 +1,30 @@ -- -- CREATE_OPERATOR -- -CREATE OPERATOR ## ( +CREATE OPERATOR ## ( leftarg = path, rightarg = path, procedure = path_inter, - commutator = ## + commutator = ## ); CREATE OPERATOR <% ( leftarg = point, rightarg = widget, procedure = pt_in_widget, commutator = >% , - negator = >=% + negator = >=% ); CREATE OPERATOR @#@ ( - rightarg = int8, -- left unary - procedure = numeric_fac + rightarg = int8, -- left unary + procedure = numeric_fac ); CREATE OPERATOR #@# ( leftarg = int8, -- right unary procedure = numeric_fac ); -CREATE OPERATOR #%# ( - leftarg = int8, -- right unary - procedure = numeric_fac +CREATE OPERATOR #%# ( + leftarg = int8, -- right unary + procedure = numeric_fac ); -- Test comments COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary'; diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out index 6f65885c82..62010a1482 100644 --- a/src/test/regress/expected/create_table.out +++ b/src/test/regress/expected/create_table.out @@ -5,7 +5,7 @@ -- CLASS DEFINITIONS -- CREATE TABLE hobbies_r ( - name text, + name text, person text ); CREATE TABLE equipment_r ( @@ -123,7 +123,7 @@ CREATE TABLE real_city ( -- f inherits from e (three-level single inheritance) -- CREATE TABLE a_star ( - class char, + class char, a int4 ); CREATE TABLE b_star ( @@ -165,7 +165,7 @@ CREATE TABLE hash_f8_heap ( ); -- don't include the hash_ovfl_heap stuff in the distribution -- the data set is too large for what it's worth --- +-- -- CREATE TABLE hash_ovfl_heap ( -- x int4, -- y int4 @@ -183,7 +183,7 @@ CREATE TABLE bt_txt_heap ( random int4 ); CREATE TABLE bt_f8_heap ( - seqno float8, + seqno float8, random int4 ); CREATE TABLE array_op_test ( @@ -196,11 +196,11 @@ CREATE TABLE array_index_op_test ( i int4[], t text[] ); -CREATE TABLE IF NOT EXISTS test_tsvector( - t text, - a tsvector +CREATE TABLE IF NOT EXISTS test_tsvector( + t text, + a tsvector ); -CREATE TABLE IF NOT EXISTS test_tsvector( +CREATE TABLE IF NOT EXISTS test_tsvector( t text ); NOTICE: relation "test_tsvector" already exists, skipping diff --git a/src/test/regress/expected/create_type.out b/src/test/regress/expected/create_type.out index 9d3a82e153..6dfe916985 100644 --- a/src/test/regress/expected/create_type.out +++ b/src/test/regress/expected/create_type.out @@ -7,17 +7,17 @@ -- of the "old style" approach of making the functions first. -- CREATE TYPE widget ( - internallength = 24, + internallength = 24, input = widget_in, output = widget_out, typmod_in = numerictypmodin, typmod_out = numerictypmodout, alignment = double ); -CREATE TYPE city_budget ( - internallength = 16, - input = int44in, - output = int44out, +CREATE TYPE city_budget ( + internallength = 16, + input = int44in, + output = int44out, element = int4, category = 'x', -- just to verify the system will take it preferred = true -- ditto diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out index 04383e43d2..f2c06854d0 100644 --- a/src/test/regress/expected/create_view.out +++ b/src/test/regress/expected/create_view.out @@ -4,11 +4,11 @@ -- (this also tests the query rewrite system) -- CREATE VIEW street AS - SELECT r.name, r.thepath, c.cname AS cname + SELECT r.name, r.thepath, c.cname AS cname FROM ONLY road r, real_city c WHERE c.outline ## r.thepath; CREATE VIEW iexit AS - SELECT ih.name, ih.thepath, + SELECT ih.name, ih.thepath, interpt_pp(ih.thepath, r.thepath) AS exit FROM ihighway ih, ramp r WHERE ih.thepath ## r.thepath; @@ -58,7 +58,7 @@ ERROR: cannot change name of view column "a" to "?column?" CREATE OR REPLACE VIEW viewtest AS SELECT a, b::numeric FROM viewtest_tbl; ERROR: cannot change data type of view column "b" from integer to numeric --- should work +-- should work CREATE OR REPLACE VIEW viewtest AS SELECT a, b, 0 AS c FROM viewtest_tbl; DROP VIEW viewtest; @@ -133,7 +133,7 @@ CREATE VIEW v9 AS SELECT seq1.is_called FROM seq1; CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp; NOTICE: view "v13_temp" will be a temporary view SELECT relname FROM pg_class - WHERE relname LIKE 'v_' + WHERE relname LIKE 'v_' AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'temp_view_test') ORDER BY relname; relname @@ -150,7 +150,7 @@ SELECT relname FROM pg_class (9 rows) SELECT relname FROM pg_class - WHERE relname LIKE 'v%' + WHERE relname LIKE 'v%' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') ORDER BY relname; relname @@ -199,7 +199,7 @@ SELECT relname FROM pg_class (4 rows) SELECT relname FROM pg_class - WHERE relname LIKE 'temporal%' + WHERE relname LIKE 'temporal%' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') ORDER BY relname; relname diff --git a/src/test/regress/expected/drop_if_exists.out b/src/test/regress/expected/drop_if_exists.out index 092c90403a..2a23b4cfe6 100644 --- a/src/test/regress/expected/drop_if_exists.out +++ b/src/test/regress/expected/drop_if_exists.out @@ -1,6 +1,6 @@ --- +-- -- IF EXISTS tests --- +-- -- table (will be really dropped at the end) DROP TABLE test_exists; ERROR: table "test_exists" does not exist diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out index 55822e0b9b..4a10c6ae8a 100644 --- a/src/test/regress/expected/errors.out +++ b/src/test/regress/expected/errors.out @@ -10,19 +10,17 @@ select 1; -- -- UNSUPPORTED STUFF - --- doesn't work +-- doesn't work -- notify pg_class -- -- -- SELECT - --- missing relation name +-- missing relation name select; ERROR: syntax error at or near ";" LINE 1: select; ^ --- no such relation +-- no such relation select * from nonesuch; ERROR: relation "nonesuch" does not exist LINE 1: select * from nonesuch; @@ -59,74 +57,70 @@ LINE 1: select distinct on (foobar) * from pg_database; ^ -- -- DELETE - --- missing relation name (this had better not wildcard!) +-- missing relation name (this had better not wildcard!) delete from; ERROR: syntax error at or near ";" LINE 1: delete from; ^ --- no such relation +-- no such relation delete from nonesuch; ERROR: relation "nonesuch" does not exist LINE 1: delete from nonesuch; ^ -- -- DROP - --- missing relation name (this had better not wildcard!) +-- missing relation name (this had better not wildcard!) drop table; ERROR: syntax error at or near ";" LINE 1: drop table; ^ --- no such relation +-- no such relation drop table nonesuch; ERROR: table "nonesuch" does not exist -- -- ALTER TABLE - --- relation renaming --- missing relation name +-- relation renaming +-- missing relation name alter table rename; ERROR: syntax error at or near ";" LINE 1: alter table rename; ^ --- no such relation +-- no such relation alter table nonesuch rename to newnonesuch; ERROR: relation "nonesuch" does not exist --- no such relation +-- no such relation alter table nonesuch rename to stud_emp; ERROR: relation "nonesuch" does not exist --- conflict +-- conflict alter table stud_emp rename to aggtest; ERROR: relation "aggtest" already exists --- self-conflict +-- self-conflict alter table stud_emp rename to stud_emp; ERROR: relation "stud_emp" already exists --- attribute renaming --- no such relation +-- attribute renaming +-- no such relation alter table nonesuchrel rename column nonesuchatt to newnonesuchatt; ERROR: relation "nonesuchrel" does not exist --- no such attribute +-- no such attribute alter table emp rename column nonesuchatt to newnonesuchatt; ERROR: column "nonesuchatt" does not exist --- conflict +-- conflict alter table emp rename column salary to manager; ERROR: column "manager" of relation "stud_emp" already exists --- conflict +-- conflict alter table emp rename column salary to oid; ERROR: column "oid" of relation "stud_emp" already exists -- -- TRANSACTION STUFF - --- not in a xact +-- not in a xact abort; NOTICE: there is no transaction in progress --- not in a xact +-- not in a xact end; WARNING: there is no transaction in progress -- -- CREATE AGGREGATE --- sfunc/finalfunc type disagreement +-- sfunc/finalfunc type disagreement create aggregate newavg2 (sfunc = int4pl, basetype = int4, stype = int4, @@ -140,24 +134,22 @@ create aggregate newcnt1 (sfunc = int4inc, ERROR: aggregate input type must be specified -- -- DROP INDEX - --- missing index name +-- missing index name drop index; ERROR: syntax error at or near ";" LINE 1: drop index; ^ --- bad index name +-- bad index name drop index 314159; ERROR: syntax error at or near "314159" LINE 1: drop index 314159; ^ --- no such index +-- no such index drop index nonesuch; ERROR: index "nonesuch" does not exist -- -- DROP AGGREGATE - --- missing aggregate name +-- missing aggregate name drop aggregate; ERROR: syntax error at or near ";" LINE 1: drop aggregate; @@ -167,7 +159,7 @@ drop aggregate newcnt1; ERROR: syntax error at or near ";" LINE 1: drop aggregate newcnt1; ^ --- bad aggregate name +-- bad aggregate name drop aggregate 314159 (int); ERROR: syntax error at or near "314159" LINE 1: drop aggregate 314159 (int); @@ -175,7 +167,7 @@ LINE 1: drop aggregate 314159 (int); -- bad aggregate type drop aggregate newcnt (nonesuch); ERROR: type "nonesuch" does not exist --- no such aggregate +-- no such aggregate drop aggregate nonesuch (int4); ERROR: aggregate nonesuch(integer) does not exist -- no such aggregate for type @@ -183,114 +175,110 @@ drop aggregate newcnt (float4); ERROR: aggregate newcnt(real) does not exist -- -- DROP FUNCTION - --- missing function name +-- missing function name drop function (); ERROR: syntax error at or near "(" LINE 1: drop function (); ^ --- bad function name +-- bad function name drop function 314159(); ERROR: syntax error at or near "314159" LINE 1: drop function 314159(); ^ --- no such function +-- no such function drop function nonesuch(); ERROR: function nonesuch() does not exist -- -- DROP TYPE - --- missing type name +-- missing type name drop type; ERROR: syntax error at or near ";" LINE 1: drop type; ^ --- bad type name +-- bad type name drop type 314159; ERROR: syntax error at or near "314159" LINE 1: drop type 314159; ^ --- no such type +-- no such type drop type nonesuch; ERROR: type "nonesuch" does not exist -- -- DROP OPERATOR - --- missing everything +-- missing everything drop operator; ERROR: syntax error at or near ";" LINE 1: drop operator; ^ --- bad operator name +-- bad operator name drop operator equals; ERROR: syntax error at or near ";" LINE 1: drop operator equals; ^ --- missing type list +-- missing type list drop operator ===; ERROR: syntax error at or near ";" LINE 1: drop operator ===; ^ --- missing parentheses +-- missing parentheses drop operator int4, int4; ERROR: syntax error at or near "," LINE 1: drop operator int4, int4; ^ --- missing operator name +-- missing operator name drop operator (int4, int4); ERROR: syntax error at or near "(" LINE 1: drop operator (int4, int4); ^ --- missing type list contents +-- missing type list contents drop operator === (); ERROR: syntax error at or near ")" LINE 1: drop operator === (); ^ --- no such operator +-- no such operator drop operator === (int4); ERROR: missing argument LINE 1: drop operator === (int4); ^ HINT: Use NONE to denote the missing argument of a unary operator. --- no such operator by that name +-- no such operator by that name drop operator === (int4, int4); ERROR: operator does not exist: integer === integer --- no such type1 +-- no such type1 drop operator = (nonesuch); ERROR: missing argument LINE 1: drop operator = (nonesuch); ^ HINT: Use NONE to denote the missing argument of a unary operator. --- no such type1 +-- no such type1 drop operator = ( , int4); ERROR: syntax error at or near "," LINE 1: drop operator = ( , int4); ^ --- no such type1 +-- no such type1 drop operator = (nonesuch, int4); ERROR: type "nonesuch" does not exist --- no such type2 +-- no such type2 drop operator = (int4, nonesuch); ERROR: type "nonesuch" does not exist --- no such type2 +-- no such type2 drop operator = (int4, ); ERROR: syntax error at or near ")" LINE 1: drop operator = (int4, ); ^ -- -- DROP RULE - --- missing rule name +-- missing rule name drop rule; ERROR: syntax error at or near ";" LINE 1: drop rule; ^ --- bad rule name +-- bad rule name drop rule 314159; ERROR: syntax error at or near "314159" LINE 1: drop rule 314159; ^ --- no such rule +-- no such rule drop rule nonesuch on noplace; ERROR: relation "noplace" does not exist -- these postquel variants are no longer supported @@ -360,7 +348,7 @@ VALUES(123); ERROR: syntax error at or near "123" LINE 1: INSERT INTO 123 ^ -INSERT INTO foo +INSERT INTO foo VALUES(123) 123 ; ERROR: syntax error at or near "123" @@ -375,11 +363,11 @@ ERROR: syntax error at or near "NUL" LINE 3: id3 INTEGER NOT NUL, ^ -- long line to be truncated on the left -CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); ERROR: syntax error at or near "NUL" -LINE 1: ...T NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, - ^ +LINE 1: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ^ -- long line to be truncated on the right CREATE TABLE foo( id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); @@ -394,24 +382,24 @@ LINE 1: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... -- long line to be truncated on the left, many lines CREATE TEMPORARY -TABLE -foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, -id4 INT4 -UNIQUE -NOT -NULL, -id5 TEXT -UNIQUE -NOT +TABLE +foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 +UNIQUE +NOT +NULL, +id5 TEXT +UNIQUE +NOT NULL) ; ERROR: syntax error at or near "NUL" -LINE 4: ...T NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, - ^ +LINE 4: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ^ -- long line to be truncated on the right, many lines -CREATE +CREATE TEMPORARY -TABLE +TABLE foo( id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) ; @@ -419,40 +407,40 @@ ERROR: syntax error at or near "NUL" LINE 5: id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ... ^ -- long line to be truncated both ways, many lines -CREATE +CREATE TEMPORARY -TABLE +TABLE foo -(id -INT4 -UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, -idz INT4 UNIQUE NOT NULL, +(id +INT4 +UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, idv INT4 UNIQUE NOT NULL); ERROR: syntax error at or near "NUL" LINE 7: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... ^ -- more than 10 lines... -CREATE +CREATE TEMPORARY -TABLE +TABLE foo -(id -INT4 -UNIQUE -NOT +(id +INT4 +UNIQUE +NOT NULL -, +, idm -INT4 -UNIQUE -NOT +INT4 +UNIQUE +NOT NULL, -idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, -idz INT4 UNIQUE NOT NULL, -idv -INT4 -UNIQUE -NOT +idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv +INT4 +UNIQUE +NOT NULL); ERROR: syntax error at or near "NUL" LINE 16: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... diff --git a/src/test/regress/expected/float4-exp-three-digits.out b/src/test/regress/expected/float4-exp-three-digits.out index ff680f4792..f17f95697a 100644 --- a/src/test/regress/expected/float4-exp-three-digits.out +++ b/src/test/regress/expected/float4-exp-three-digits.out @@ -7,7 +7,7 @@ INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); --- test for over and under flow +-- test for over and under flow INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); ERROR: value out of range: overflow LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); @@ -233,7 +233,7 @@ SELECT '' AS five, * FROM FLOAT4_TBL; | 1.23457e-020 (5 rows) --- test the unary float4abs operator +-- test the unary float4abs operator SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; five | f1 | abs_f1 ------+--------------+-------------- diff --git a/src/test/regress/expected/float4.out b/src/test/regress/expected/float4.out index dd8066a79c..fd46a4a1db 100644 --- a/src/test/regress/expected/float4.out +++ b/src/test/regress/expected/float4.out @@ -7,7 +7,7 @@ INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); --- test for over and under flow +-- test for over and under flow INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); ERROR: value out of range: overflow LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); @@ -233,7 +233,7 @@ SELECT '' AS five, * FROM FLOAT4_TBL; | 1.23457e-20 (5 rows) --- test the unary float4abs operator +-- test the unary float4abs operator SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; five | f1 | abs_f1 ------+-------------+------------- diff --git a/src/test/regress/expected/float8-exp-three-digits-win32.out b/src/test/regress/expected/float8-exp-three-digits-win32.out index a4b8b47bad..2dd648d6b9 100644 --- a/src/test/regress/expected/float8-exp-three-digits-win32.out +++ b/src/test/regress/expected/float8-exp-three-digits-win32.out @@ -184,7 +184,7 @@ SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; | 1.2345678901234e-200 (4 rows) -SELECT '' AS three, f.f1, f.f1 * '-10' AS x +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; three | f1 | x @@ -231,8 +231,8 @@ SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 | 1008618.49 (1 row) --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT8_TBL f; five | f1 | abs_f1 ------+----------------------+---------------------- @@ -243,7 +243,7 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 | 1.2345678901234e-200 | 1.2345678901234e-200 (5 rows) --- truncate +-- truncate SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 FROM FLOAT8_TBL f; five | f1 | trunc_f1 @@ -255,7 +255,7 @@ SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 | 1.2345678901234e-200 | 0 (5 rows) --- round +-- round SELECT '' AS five, f.f1, round(f.f1) AS round_f1 FROM FLOAT8_TBL f; five | f1 | round_f1 @@ -310,7 +310,7 @@ select sign(f1) as sign_f1 from float8_tbl f; 1 (5 rows) --- square root +-- square root SELECT sqrt(float8 '64') AS eight; eight ------- @@ -340,7 +340,7 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) --- take exp of ln(f.f1) +-- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; @@ -351,7 +351,7 @@ SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 | 1.2345678901234e-200 | 1.23456789012339e-200 (3 rows) --- cube root +-- cube root SELECT ||/ float8 '27' AS three; three ------- @@ -409,7 +409,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL; | -1.2345678901234e-200 (5 rows) --- test for over- and underflow +-- test for over- and underflow INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); ERROR: "10e400" is out of range for type double precision LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); diff --git a/src/test/regress/expected/float8-small-is-zero.out b/src/test/regress/expected/float8-small-is-zero.out index 6bddbc9290..5da743374c 100644 --- a/src/test/regress/expected/float8-small-is-zero.out +++ b/src/test/regress/expected/float8-small-is-zero.out @@ -188,7 +188,7 @@ SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; | 1.2345678901234e-200 (4 rows) -SELECT '' AS three, f.f1, f.f1 * '-10' AS x +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; three | f1 | x @@ -235,8 +235,8 @@ SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 | 1008618.49 (1 row) --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT8_TBL f; five | f1 | abs_f1 ------+----------------------+---------------------- @@ -247,7 +247,7 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 | 1.2345678901234e-200 | 1.2345678901234e-200 (5 rows) --- truncate +-- truncate SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 FROM FLOAT8_TBL f; five | f1 | trunc_f1 @@ -259,7 +259,7 @@ SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 | 1.2345678901234e-200 | 0 (5 rows) --- round +-- round SELECT '' AS five, f.f1, round(f.f1) AS round_f1 FROM FLOAT8_TBL f; five | f1 | round_f1 @@ -314,7 +314,7 @@ select sign(f1) as sign_f1 from float8_tbl f; 1 (5 rows) --- square root +-- square root SELECT sqrt(float8 '64') AS eight; eight ------- @@ -344,7 +344,7 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) --- take exp of ln(f.f1) +-- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; @@ -355,7 +355,7 @@ SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 | 1.2345678901234e-200 | 1.23456789012339e-200 (3 rows) --- cube root +-- cube root SELECT ||/ float8 '27' AS three; three ------- @@ -413,7 +413,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL; | -1.2345678901234e-200 (5 rows) --- test for over- and underflow +-- test for over- and underflow INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); ERROR: "10e400" is out of range for type double precision LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); diff --git a/src/test/regress/expected/float8-small-is-zero_1.out b/src/test/regress/expected/float8-small-is-zero_1.out index f48d280845..530842e102 100644 --- a/src/test/regress/expected/float8-small-is-zero_1.out +++ b/src/test/regress/expected/float8-small-is-zero_1.out @@ -188,7 +188,7 @@ SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; | 1.2345678901234e-200 (4 rows) -SELECT '' AS three, f.f1, f.f1 * '-10' AS x +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; three | f1 | x @@ -235,8 +235,8 @@ SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 | 1008618.49 (1 row) --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT8_TBL f; five | f1 | abs_f1 ------+----------------------+---------------------- @@ -247,7 +247,7 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 | 1.2345678901234e-200 | 1.2345678901234e-200 (5 rows) --- truncate +-- truncate SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 FROM FLOAT8_TBL f; five | f1 | trunc_f1 @@ -259,7 +259,7 @@ SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 | 1.2345678901234e-200 | 0 (5 rows) --- round +-- round SELECT '' AS five, f.f1, round(f.f1) AS round_f1 FROM FLOAT8_TBL f; five | f1 | round_f1 @@ -314,7 +314,7 @@ select sign(f1) as sign_f1 from float8_tbl f; 1 (5 rows) --- square root +-- square root SELECT sqrt(float8 '64') AS eight; eight ------- @@ -344,7 +344,7 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) --- take exp of ln(f.f1) +-- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; @@ -355,7 +355,7 @@ SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 | 1.2345678901234e-200 | 1.23456789012339e-200 (3 rows) --- cube root +-- cube root SELECT ||/ float8 '27' AS three; three ------- @@ -413,7 +413,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL; | -1.2345678901234e-200 (5 rows) --- test for over- and underflow +-- test for over- and underflow INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); ERROR: "10e400" is out of range for type double precision LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); diff --git a/src/test/regress/expected/float8.out b/src/test/regress/expected/float8.out index d8350d100e..6221538af5 100644 --- a/src/test/regress/expected/float8.out +++ b/src/test/regress/expected/float8.out @@ -184,7 +184,7 @@ SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; | 1.2345678901234e-200 (4 rows) -SELECT '' AS three, f.f1, f.f1 * '-10' AS x +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; three | f1 | x @@ -231,8 +231,8 @@ SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 | 1008618.49 (1 row) --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT8_TBL f; five | f1 | abs_f1 ------+----------------------+---------------------- @@ -243,7 +243,7 @@ SELECT '' AS five, f.f1, @f.f1 AS abs_f1 | 1.2345678901234e-200 | 1.2345678901234e-200 (5 rows) --- truncate +-- truncate SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 FROM FLOAT8_TBL f; five | f1 | trunc_f1 @@ -255,7 +255,7 @@ SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 | 1.2345678901234e-200 | 0 (5 rows) --- round +-- round SELECT '' AS five, f.f1, round(f.f1) AS round_f1 FROM FLOAT8_TBL f; five | f1 | round_f1 @@ -310,7 +310,7 @@ select sign(f1) as sign_f1 from float8_tbl f; 1 (5 rows) --- square root +-- square root SELECT sqrt(float8 '64') AS eight; eight ------- @@ -340,7 +340,7 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) --- take exp of ln(f.f1) +-- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; @@ -351,7 +351,7 @@ SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 | 1.2345678901234e-200 | 1.23456789012339e-200 (3 rows) --- cube root +-- cube root SELECT ||/ float8 '27' AS three; three ------- @@ -409,7 +409,7 @@ SELECT '' AS five, * FROM FLOAT8_TBL; | -1.2345678901234e-200 (5 rows) --- test for over- and underflow +-- test for over- and underflow INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); ERROR: "10e400" is out of range for type double precision LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out index 0367f53233..87d573b6ab 100644 --- a/src/test/regress/expected/foreign_key.out +++ b/src/test/regress/expected/foreign_key.out @@ -62,7 +62,7 @@ DROP TABLE PKTABLE; -- CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable" -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL); -- Test comments COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment'; @@ -175,7 +175,7 @@ DROP TABLE FKTABLE; -- CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable" -CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) +CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT); -- Insert a value in PKTABLE for default INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!'); @@ -351,7 +351,7 @@ INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -416,7 +416,7 @@ INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -513,8 +513,8 @@ INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -618,8 +618,8 @@ INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); INSERT INTO FKTABLE VALUES (2, 4, 5, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); @@ -745,7 +745,7 @@ DROP TABLE PKTABLE; -- -- Tests for mismatched types -- --- Basic one column, two table setup +-- Basic one column, two table setup CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable" INSERT INTO PKTABLE VALUES(42); @@ -831,7 +831,7 @@ CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY ptest4) REFERENCES pktable(ptest1, ptest2)); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable" DROP TABLE PKTABLE; --- And this, +-- And this, CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, ptest4) REFERENCES pktable); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pktable_pkey" for table "pktable" diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out index 432c932480..22835f8ea4 100644 --- a/src/test/regress/expected/hash_index.out +++ b/src/test/regress/expected/hash_index.out @@ -108,10 +108,10 @@ SELECT h.seqno AS i1492, h.random AS i1 1492 | 1 (1 row) -UPDATE hash_i4_heap - SET seqno = 20000 +UPDATE hash_i4_heap + SET seqno = 20000 WHERE hash_i4_heap.random = 1492795354; -SELECT h.seqno AS i20000 +SELECT h.seqno AS i20000 FROM hash_i4_heap h WHERE h.random = 1492795354; i20000 @@ -119,7 +119,7 @@ SELECT h.seqno AS i20000 20000 (1 row) -UPDATE hash_name_heap +UPDATE hash_name_heap SET random = '0123456789abcdef'::name WHERE hash_name_heap.seqno = 6543; SELECT h.seqno AS i6543, h.random AS c0_to_f @@ -134,7 +134,7 @@ UPDATE hash_name_heap SET seqno = 20000 WHERE hash_name_heap.random = '76652222'::name; -- --- this is the row we just replaced; index scan should return zero rows +-- this is the row we just replaced; index scan should return zero rows -- SELECT h.seqno AS emptyset FROM hash_name_heap h @@ -143,7 +143,7 @@ SELECT h.seqno AS emptyset ---------- (0 rows) -UPDATE hash_txt_heap +UPDATE hash_txt_heap SET random = '0123456789abcdefghijklmnop'::text WHERE hash_txt_heap.seqno = 4002; SELECT h.seqno AS i4002, h.random AS c0_to_p @@ -168,7 +168,7 @@ SELECT h.seqno AS t20000 UPDATE hash_f8_heap SET random = '-1234.1234'::float8 WHERE hash_f8_heap.seqno = 8906; -SELECT h.seqno AS i8096, h.random AS f1234_1234 +SELECT h.seqno AS i8096, h.random AS f1234_1234 FROM hash_f8_heap h WHERE h.random = '-1234.1234'::float8; i8096 | f1234_1234 @@ -176,7 +176,7 @@ SELECT h.seqno AS i8096, h.random AS f1234_1234 8906 | -1234.1234 (1 row) -UPDATE hash_f8_heap +UPDATE hash_f8_heap SET seqno = 20000 WHERE hash_f8_heap.random = '488912369'::float8; SELECT h.seqno AS f20000 diff --git a/src/test/regress/expected/inet.out b/src/test/regress/expected/inet.out index abb59d4acf..356a397822 100644 --- a/src/test/regress/expected/inet.out +++ b/src/test/regress/expected/inet.out @@ -177,7 +177,7 @@ SELECT '' AS six, c AS cidr, i AS inet FROM INET_TBL (2 rows) SELECT '' AS ten, i, c, - i < c AS lt, i <= c AS le, i = c AS eq, + i < c AS lt, i <= c AS le, i = c AS eq, i >= c AS ge, i > c AS gt, i <> c AS ne, i << c AS sb, i <<= c AS sbe, i >> c AS sup, i >>= c AS spe diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out index 581cc7d0e7..d59ca449dc 100644 --- a/src/test/regress/expected/inherit.out +++ b/src/test/regress/expected/inherit.out @@ -592,7 +592,7 @@ CREATE TABLE inhx (xx text DEFAULT 'text'); * Test double inheritance * * Ensure that defaults are NOT included unless - * INCLUDING DEFAULTS is specified + * INCLUDING DEFAULTS is specified */ CREATE TABLE inhe (ee text, LIKE inhx) inherits (b); INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); diff --git a/src/test/regress/expected/int2.out b/src/test/regress/expected/int2.out index 80e6ed9fd6..021d476822 100644 --- a/src/test/regress/expected/int2.out +++ b/src/test/regress/expected/int2.out @@ -141,14 +141,14 @@ SELECT '' AS three, i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0'; | 32767 (3 rows) --- positive odds +-- positive odds SELECT '' AS one, i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1'; one | f1 -----+------- | 32767 (1 row) --- any evens +-- any evens SELECT '' AS three, i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0'; three | f1 -------+------- diff --git a/src/test/regress/expected/int8-exp-three-digits.out b/src/test/regress/expected/int8-exp-three-digits.out index 2108d08d87..b523bfcc01 100644 --- a/src/test/regress/expected/int8-exp-three-digits.out +++ b/src/test/regress/expected/int8-exp-three-digits.out @@ -456,7 +456,7 @@ SELECT max(q1), max(q2) FROM INT8_TBL; -- TO_CHAR() -- -SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') +SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') FROM INT8_TBL; to_char_1 | to_char | to_char -----------+------------------------+------------------------ @@ -467,8 +467,8 @@ SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999 | 4,567,890,123,456,789 | -4,567,890,123,456,789 (5 rows) -SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') - FROM INT8_TBL; +SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') + FROM INT8_TBL; to_char_2 | to_char | to_char -----------+--------------------------------+-------------------------------- | 123.000,000 | 456.000,000 @@ -478,7 +478,7 @@ SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2 | 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000 (5 rows) -SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') +SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') FROM INT8_TBL; to_char_3 | to_char | to_char -----------+--------------------+------------------------ @@ -489,7 +489,7 @@ SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 | <4567890123456789> | 4567890123456789.000 (5 rows) -SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') +SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') FROM INT8_TBL; to_char_4 | to_char | to_char -----------+-------------------+------------------- @@ -500,7 +500,7 @@ SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * | 4567890123456789- | +4567890123456789 (5 rows) -SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; to_char_5 | to_char -----------+------------------- | 456 @@ -530,7 +530,7 @@ SELECT '' AS to_char_7, to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL; | <4567890123456789> (5 rows) -SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; +SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; to_char_8 | to_char -----------+--------------------- | + 456th @@ -540,7 +540,7 @@ SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; to_char_9 | to_char -----------+------------------- | 0000000000000456 @@ -550,7 +550,7 @@ SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; to_char_10 | to_char ------------+------------------- | +0000000000000456 @@ -560,7 +560,7 @@ SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; to_char_11 | to_char ------------+------------------- | 0000000000000456 @@ -580,7 +580,7 @@ SELECT '' AS to_char_12, to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL; | -4567890123456789.000 (5 rows) -SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; +SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; to_char_13 | to_char ------------+------------------------ | 456.000 diff --git a/src/test/regress/expected/int8.out b/src/test/regress/expected/int8.out index 9272dd645f..811d6a5520 100644 --- a/src/test/regress/expected/int8.out +++ b/src/test/regress/expected/int8.out @@ -456,7 +456,7 @@ SELECT max(q1), max(q2) FROM INT8_TBL; -- TO_CHAR() -- -SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') +SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') FROM INT8_TBL; to_char_1 | to_char | to_char -----------+------------------------+------------------------ @@ -467,8 +467,8 @@ SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999 | 4,567,890,123,456,789 | -4,567,890,123,456,789 (5 rows) -SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') - FROM INT8_TBL; +SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') + FROM INT8_TBL; to_char_2 | to_char | to_char -----------+--------------------------------+-------------------------------- | 123.000,000 | 456.000,000 @@ -478,7 +478,7 @@ SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2 | 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000 (5 rows) -SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') +SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') FROM INT8_TBL; to_char_3 | to_char | to_char -----------+--------------------+------------------------ @@ -489,7 +489,7 @@ SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 | <4567890123456789> | 4567890123456789.000 (5 rows) -SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') +SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') FROM INT8_TBL; to_char_4 | to_char | to_char -----------+-------------------+------------------- @@ -500,7 +500,7 @@ SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * | 4567890123456789- | +4567890123456789 (5 rows) -SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; to_char_5 | to_char -----------+------------------- | 456 @@ -530,7 +530,7 @@ SELECT '' AS to_char_7, to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL; | <4567890123456789> (5 rows) -SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; +SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; to_char_8 | to_char -----------+--------------------- | + 456th @@ -540,7 +540,7 @@ SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; to_char_9 | to_char -----------+------------------- | 0000000000000456 @@ -550,7 +550,7 @@ SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; to_char_10 | to_char ------------+------------------- | +0000000000000456 @@ -560,7 +560,7 @@ SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; | -4567890123456789 (5 rows) -SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; to_char_11 | to_char ------------+------------------- | 0000000000000456 @@ -580,7 +580,7 @@ SELECT '' AS to_char_12, to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL; | -4567890123456789.000 (5 rows) -SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; +SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; to_char_13 | to_char ------------+------------------------ | 456.000 diff --git a/src/test/regress/expected/interval.out b/src/test/regress/expected/interval.out index a6f50b4622..e7e2181333 100644 --- a/src/test/regress/expected/interval.out +++ b/src/test/regress/expected/interval.out @@ -128,7 +128,7 @@ SELECT '' AS one, * FROM INTERVAL_TBL | 34 years (1 row) -SELECT '' AS five, * FROM INTERVAL_TBL +SELECT '' AS five, * FROM INTERVAL_TBL WHERE INTERVAL_TBL.f1 >= interval '@ 1 month'; five | f1 ------+----------------- @@ -208,11 +208,11 @@ SELECT '' AS fortyfive, r1.*, r2.* (45 rows) -- Test multiplication and division with intervals. --- Floating point arithmetic rounding errors can lead to unexpected results, --- though the code attempts to do the right thing and round up to days and --- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'. --- Note that it is expected for some day components to be greater than 29 and --- some time components be greater than 23:59:59 due to how intervals are +-- Floating point arithmetic rounding errors can lead to unexpected results, +-- though the code attempts to do the right thing and round up to days and +-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'. +-- Note that it is expected for some day components to be greater than 29 and +-- some time components be greater than 23:59:59 due to how intervals are -- stored internally. CREATE TABLE INTERVAL_MULDIV_TBL (span interval); COPY INTERVAL_MULDIV_TBL FROM STDIN; @@ -753,7 +753,7 @@ select interval '1 year 2 mons 3 days 04:05:06.699999'; @ 1 year 2 mons 3 days 4 hours 5 mins 6.699999 secs (1 row) -select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; +select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; interval | interval | interval ------------+------------+------------ @ 0.7 secs | @ 0.7 secs | @ 0.7 secs diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out index c33ebe0396..0c06ecba14 100644 --- a/src/test/regress/expected/limit.out +++ b/src/test/regress/expected/limit.out @@ -2,8 +2,8 @@ -- LIMIT -- Check the LIMIT/OFFSET feature of SELECT -- -SELECT ''::text AS two, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 50 +SELECT ''::text AS two, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 50 ORDER BY unique1 LIMIT 2; two | unique1 | unique2 | stringu1 -----+---------+---------+---------- @@ -11,8 +11,8 @@ SELECT ''::text AS two, unique1, unique2, stringu1 | 52 | 985 | ACAAAA (2 rows) -SELECT ''::text AS five, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 60 +SELECT ''::text AS five, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 60 ORDER BY unique1 LIMIT 5; five | unique1 | unique2 | stringu1 ------+---------+---------+---------- @@ -23,7 +23,7 @@ SELECT ''::text AS five, unique1, unique2, stringu1 | 65 | 64 | NCAAAA (5 rows) -SELECT ''::text AS two, unique1, unique2, stringu1 +SELECT ''::text AS two, unique1, unique2, stringu1 FROM onek WHERE unique1 > 60 AND unique1 < 63 ORDER BY unique1 LIMIT 5; two | unique1 | unique2 | stringu1 @@ -32,8 +32,8 @@ SELECT ''::text AS two, unique1, unique2, stringu1 | 62 | 633 | KCAAAA (2 rows) -SELECT ''::text AS three, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 100 +SELECT ''::text AS three, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 100 ORDER BY unique1 LIMIT 3 OFFSET 20; three | unique1 | unique2 | stringu1 -------+---------+---------+---------- @@ -42,15 +42,15 @@ SELECT ''::text AS three, unique1, unique2, stringu1 | 123 | 777 | TEAAAA (3 rows) -SELECT ''::text AS zero, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 +SELECT ''::text AS zero, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 ORDER BY unique1 DESC LIMIT 8 OFFSET 99; zero | unique1 | unique2 | stringu1 ------+---------+---------+---------- (0 rows) -SELECT ''::text AS eleven, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 +SELECT ''::text AS eleven, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 ORDER BY unique1 DESC LIMIT 20 OFFSET 39; eleven | unique1 | unique2 | stringu1 --------+---------+---------+---------- @@ -67,7 +67,7 @@ SELECT ''::text AS eleven, unique1, unique2, stringu1 | 0 | 998 | AAAAAA (11 rows) -SELECT ''::text AS ten, unique1, unique2, stringu1 +SELECT ''::text AS ten, unique1, unique2, stringu1 FROM onek ORDER BY unique1 OFFSET 990; ten | unique1 | unique2 | stringu1 @@ -84,7 +84,7 @@ SELECT ''::text AS ten, unique1, unique2, stringu1 | 999 | 152 | LMAAAA (10 rows) -SELECT ''::text AS five, unique1, unique2, stringu1 +SELECT ''::text AS five, unique1, unique2, stringu1 FROM onek ORDER BY unique1 OFFSET 990 LIMIT 5; five | unique1 | unique2 | stringu1 @@ -96,7 +96,7 @@ SELECT ''::text AS five, unique1, unique2, stringu1 | 994 | 695 | GMAAAA (5 rows) -SELECT ''::text AS five, unique1, unique2, stringu1 +SELECT ''::text AS five, unique1, unique2, stringu1 FROM onek ORDER BY unique1 LIMIT 5 OFFSET 900; five | unique1 | unique2 | stringu1 diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out index 857e1d8319..d9927b7915 100644 --- a/src/test/regress/expected/numeric.out +++ b/src/test/regress/expected/numeric.out @@ -805,7 +805,7 @@ SELECT width_bucket('Infinity'::float8, 1, 10, 10), DROP TABLE width_bucket_test; -- TO_CHAR() -- -SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') +SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') FROM num_data; to_char_1 | to_char -----------+------------------------ @@ -869,7 +869,7 @@ SELECT '' AS to_char_4, to_char(val, '9999999999999999.999999999999999S') | 24926804.045047420000000- (10 rows) -SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data; to_char_5 | to_char -----------+----------------------------------- | .000000000000000 @@ -914,7 +914,7 @@ SELECT '' AS to_char_7, to_char(val, 'FM9999999999999999.999999999999999THPR') | <24926804.04504742> (10 rows) -SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data; +SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data; to_char_8 | to_char -----------+----------------------------------- | + .000000000000000 @@ -929,7 +929,7 @@ SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') | - 24926804.045047420000000 (10 rows) -SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data; to_char_9 | to_char -----------+----------------------------------- | 0000000000000000.000000000000000 @@ -944,7 +944,7 @@ SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') | -0000000024926804.045047420000000 (10 rows) -SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data; to_char_10 | to_char ------------+----------------------------------- | +0000000000000000.000000000000000 @@ -959,7 +959,7 @@ SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') | -0000000024926804.045047420000000 (10 rows) -SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data; to_char_11 | to_char ------------+----------------------------- | 0000000000000000. @@ -1034,7 +1034,7 @@ SELECT '' AS to_char_15, to_char(val, 'FM9999999990999999.099999999999999') FRO | -24926804.04504742 (10 rows) -SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data; +SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data; to_char_16 | to_char ------------+------------------------------------ | .000000000000000 diff --git a/src/test/regress/expected/oid.out b/src/test/regress/expected/oid.out index 008b5a246b..1eab9cc935 100644 --- a/src/test/regress/expected/oid.out +++ b/src/test/regress/expected/oid.out @@ -11,7 +11,7 @@ INSERT INTO OID_TBL(f1) VALUES ('5 '); INSERT INTO OID_TBL(f1) VALUES (' 10 '); -- leading/trailing hard tab is also allowed INSERT INTO OID_TBL(f1) VALUES (' 15 '); --- bad inputs +-- bad inputs INSERT INTO OID_TBL(f1) VALUES (''); ERROR: invalid input syntax for type oid: "" LINE 1: INSERT INTO OID_TBL(f1) VALUES (''); diff --git a/src/test/regress/expected/oidjoins.out b/src/test/regress/expected/oidjoins.out index af1a801bc6..9fa0f845cf 100644 --- a/src/test/regress/expected/oidjoins.out +++ b/src/test/regress/expected/oidjoins.out @@ -1,953 +1,953 @@ -- -- This is created by pgsql/src/tools/findoidjoins/make_oidjoins_check -- -SELECT ctid, aggfnoid -FROM pg_catalog.pg_aggregate fk -WHERE aggfnoid != 0 AND +SELECT ctid, aggfnoid +FROM pg_catalog.pg_aggregate fk +WHERE aggfnoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfnoid); ctid | aggfnoid ------+---------- (0 rows) -SELECT ctid, aggtransfn -FROM pg_catalog.pg_aggregate fk -WHERE aggtransfn != 0 AND +SELECT ctid, aggtransfn +FROM pg_catalog.pg_aggregate fk +WHERE aggtransfn != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggtransfn); ctid | aggtransfn ------+------------ (0 rows) -SELECT ctid, aggfinalfn -FROM pg_catalog.pg_aggregate fk -WHERE aggfinalfn != 0 AND +SELECT ctid, aggfinalfn +FROM pg_catalog.pg_aggregate fk +WHERE aggfinalfn != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfinalfn); ctid | aggfinalfn ------+------------ (0 rows) -SELECT ctid, aggsortop -FROM pg_catalog.pg_aggregate fk -WHERE aggsortop != 0 AND +SELECT ctid, aggsortop +FROM pg_catalog.pg_aggregate fk +WHERE aggsortop != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.aggsortop); ctid | aggsortop ------+----------- (0 rows) -SELECT ctid, aggtranstype -FROM pg_catalog.pg_aggregate fk -WHERE aggtranstype != 0 AND +SELECT ctid, aggtranstype +FROM pg_catalog.pg_aggregate fk +WHERE aggtranstype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.aggtranstype); ctid | aggtranstype ------+-------------- (0 rows) -SELECT ctid, amkeytype -FROM pg_catalog.pg_am fk -WHERE amkeytype != 0 AND +SELECT ctid, amkeytype +FROM pg_catalog.pg_am fk +WHERE amkeytype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amkeytype); ctid | amkeytype ------+----------- (0 rows) -SELECT ctid, aminsert -FROM pg_catalog.pg_am fk -WHERE aminsert != 0 AND +SELECT ctid, aminsert +FROM pg_catalog.pg_am fk +WHERE aminsert != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aminsert); ctid | aminsert ------+---------- (0 rows) -SELECT ctid, ambeginscan -FROM pg_catalog.pg_am fk -WHERE ambeginscan != 0 AND +SELECT ctid, ambeginscan +FROM pg_catalog.pg_am fk +WHERE ambeginscan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambeginscan); ctid | ambeginscan ------+------------- (0 rows) -SELECT ctid, amgettuple -FROM pg_catalog.pg_am fk -WHERE amgettuple != 0 AND +SELECT ctid, amgettuple +FROM pg_catalog.pg_am fk +WHERE amgettuple != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgettuple); ctid | amgettuple ------+------------ (0 rows) -SELECT ctid, amgetbitmap -FROM pg_catalog.pg_am fk -WHERE amgetbitmap != 0 AND +SELECT ctid, amgetbitmap +FROM pg_catalog.pg_am fk +WHERE amgetbitmap != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgetbitmap); ctid | amgetbitmap ------+------------- (0 rows) -SELECT ctid, amrescan -FROM pg_catalog.pg_am fk -WHERE amrescan != 0 AND +SELECT ctid, amrescan +FROM pg_catalog.pg_am fk +WHERE amrescan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrescan); ctid | amrescan ------+---------- (0 rows) -SELECT ctid, amendscan -FROM pg_catalog.pg_am fk -WHERE amendscan != 0 AND +SELECT ctid, amendscan +FROM pg_catalog.pg_am fk +WHERE amendscan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amendscan); ctid | amendscan ------+----------- (0 rows) -SELECT ctid, ammarkpos -FROM pg_catalog.pg_am fk -WHERE ammarkpos != 0 AND +SELECT ctid, ammarkpos +FROM pg_catalog.pg_am fk +WHERE ammarkpos != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ammarkpos); ctid | ammarkpos ------+----------- (0 rows) -SELECT ctid, amrestrpos -FROM pg_catalog.pg_am fk -WHERE amrestrpos != 0 AND +SELECT ctid, amrestrpos +FROM pg_catalog.pg_am fk +WHERE amrestrpos != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrestrpos); ctid | amrestrpos ------+------------ (0 rows) -SELECT ctid, ambuild -FROM pg_catalog.pg_am fk -WHERE ambuild != 0 AND +SELECT ctid, ambuild +FROM pg_catalog.pg_am fk +WHERE ambuild != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambuild); ctid | ambuild ------+--------- (0 rows) -SELECT ctid, ambulkdelete -FROM pg_catalog.pg_am fk -WHERE ambulkdelete != 0 AND +SELECT ctid, ambulkdelete +FROM pg_catalog.pg_am fk +WHERE ambulkdelete != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambulkdelete); ctid | ambulkdelete ------+-------------- (0 rows) -SELECT ctid, amvacuumcleanup -FROM pg_catalog.pg_am fk -WHERE amvacuumcleanup != 0 AND +SELECT ctid, amvacuumcleanup +FROM pg_catalog.pg_am fk +WHERE amvacuumcleanup != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amvacuumcleanup); ctid | amvacuumcleanup ------+----------------- (0 rows) -SELECT ctid, amcostestimate -FROM pg_catalog.pg_am fk -WHERE amcostestimate != 0 AND +SELECT ctid, amcostestimate +FROM pg_catalog.pg_am fk +WHERE amcostestimate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amcostestimate); ctid | amcostestimate ------+---------------- (0 rows) -SELECT ctid, amoptions -FROM pg_catalog.pg_am fk -WHERE amoptions != 0 AND +SELECT ctid, amoptions +FROM pg_catalog.pg_am fk +WHERE amoptions != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amoptions); ctid | amoptions ------+----------- (0 rows) -SELECT ctid, amopfamily -FROM pg_catalog.pg_amop fk -WHERE amopfamily != 0 AND +SELECT ctid, amopfamily +FROM pg_catalog.pg_amop fk +WHERE amopfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amopfamily); ctid | amopfamily ------+------------ (0 rows) -SELECT ctid, amoplefttype -FROM pg_catalog.pg_amop fk -WHERE amoplefttype != 0 AND +SELECT ctid, amoplefttype +FROM pg_catalog.pg_amop fk +WHERE amoplefttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoplefttype); ctid | amoplefttype ------+-------------- (0 rows) -SELECT ctid, amoprighttype -FROM pg_catalog.pg_amop fk -WHERE amoprighttype != 0 AND +SELECT ctid, amoprighttype +FROM pg_catalog.pg_amop fk +WHERE amoprighttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoprighttype); ctid | amoprighttype ------+--------------- (0 rows) -SELECT ctid, amopopr -FROM pg_catalog.pg_amop fk -WHERE amopopr != 0 AND +SELECT ctid, amopopr +FROM pg_catalog.pg_amop fk +WHERE amopopr != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.amopopr); ctid | amopopr ------+--------- (0 rows) -SELECT ctid, amopmethod -FROM pg_catalog.pg_amop fk -WHERE amopmethod != 0 AND +SELECT ctid, amopmethod +FROM pg_catalog.pg_amop fk +WHERE amopmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.amopmethod); ctid | amopmethod ------+------------ (0 rows) -SELECT ctid, amprocfamily -FROM pg_catalog.pg_amproc fk -WHERE amprocfamily != 0 AND +SELECT ctid, amprocfamily +FROM pg_catalog.pg_amproc fk +WHERE amprocfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amprocfamily); ctid | amprocfamily ------+-------------- (0 rows) -SELECT ctid, amproclefttype -FROM pg_catalog.pg_amproc fk -WHERE amproclefttype != 0 AND +SELECT ctid, amproclefttype +FROM pg_catalog.pg_amproc fk +WHERE amproclefttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amproclefttype); ctid | amproclefttype ------+---------------- (0 rows) -SELECT ctid, amprocrighttype -FROM pg_catalog.pg_amproc fk -WHERE amprocrighttype != 0 AND +SELECT ctid, amprocrighttype +FROM pg_catalog.pg_amproc fk +WHERE amprocrighttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amprocrighttype); ctid | amprocrighttype ------+----------------- (0 rows) -SELECT ctid, amproc -FROM pg_catalog.pg_amproc fk -WHERE amproc != 0 AND +SELECT ctid, amproc +FROM pg_catalog.pg_amproc fk +WHERE amproc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amproc); ctid | amproc ------+-------- (0 rows) -SELECT ctid, attrelid -FROM pg_catalog.pg_attribute fk -WHERE attrelid != 0 AND +SELECT ctid, attrelid +FROM pg_catalog.pg_attribute fk +WHERE attrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.attrelid); ctid | attrelid ------+---------- (0 rows) -SELECT ctid, atttypid -FROM pg_catalog.pg_attribute fk -WHERE atttypid != 0 AND +SELECT ctid, atttypid +FROM pg_catalog.pg_attribute fk +WHERE atttypid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.atttypid); ctid | atttypid ------+---------- (0 rows) -SELECT ctid, castsource -FROM pg_catalog.pg_cast fk -WHERE castsource != 0 AND +SELECT ctid, castsource +FROM pg_catalog.pg_cast fk +WHERE castsource != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.castsource); ctid | castsource ------+------------ (0 rows) -SELECT ctid, casttarget -FROM pg_catalog.pg_cast fk -WHERE casttarget != 0 AND +SELECT ctid, casttarget +FROM pg_catalog.pg_cast fk +WHERE casttarget != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.casttarget); ctid | casttarget ------+------------ (0 rows) -SELECT ctid, castfunc -FROM pg_catalog.pg_cast fk -WHERE castfunc != 0 AND +SELECT ctid, castfunc +FROM pg_catalog.pg_cast fk +WHERE castfunc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.castfunc); ctid | castfunc ------+---------- (0 rows) -SELECT ctid, relnamespace -FROM pg_catalog.pg_class fk -WHERE relnamespace != 0 AND +SELECT ctid, relnamespace +FROM pg_catalog.pg_class fk +WHERE relnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.relnamespace); ctid | relnamespace ------+-------------- (0 rows) -SELECT ctid, reltype -FROM pg_catalog.pg_class fk -WHERE reltype != 0 AND +SELECT ctid, reltype +FROM pg_catalog.pg_class fk +WHERE reltype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.reltype); ctid | reltype ------+--------- (0 rows) -SELECT ctid, relowner -FROM pg_catalog.pg_class fk -WHERE relowner != 0 AND +SELECT ctid, relowner +FROM pg_catalog.pg_class fk +WHERE relowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.relowner); ctid | relowner ------+---------- (0 rows) -SELECT ctid, relam -FROM pg_catalog.pg_class fk -WHERE relam != 0 AND +SELECT ctid, relam +FROM pg_catalog.pg_class fk +WHERE relam != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.relam); ctid | relam ------+------- (0 rows) -SELECT ctid, reltablespace -FROM pg_catalog.pg_class fk -WHERE reltablespace != 0 AND +SELECT ctid, reltablespace +FROM pg_catalog.pg_class fk +WHERE reltablespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.reltablespace); ctid | reltablespace ------+--------------- (0 rows) -SELECT ctid, reltoastrelid -FROM pg_catalog.pg_class fk -WHERE reltoastrelid != 0 AND +SELECT ctid, reltoastrelid +FROM pg_catalog.pg_class fk +WHERE reltoastrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastrelid); ctid | reltoastrelid ------+--------------- (0 rows) -SELECT ctid, reltoastidxid -FROM pg_catalog.pg_class fk -WHERE reltoastidxid != 0 AND +SELECT ctid, reltoastidxid +FROM pg_catalog.pg_class fk +WHERE reltoastidxid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastidxid); ctid | reltoastidxid ------+--------------- (0 rows) -SELECT ctid, connamespace -FROM pg_catalog.pg_constraint fk -WHERE connamespace != 0 AND +SELECT ctid, connamespace +FROM pg_catalog.pg_constraint fk +WHERE connamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace); ctid | connamespace ------+-------------- (0 rows) -SELECT ctid, contypid -FROM pg_catalog.pg_constraint fk -WHERE contypid != 0 AND +SELECT ctid, contypid +FROM pg_catalog.pg_constraint fk +WHERE contypid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.contypid); ctid | contypid ------+---------- (0 rows) -SELECT ctid, connamespace -FROM pg_catalog.pg_conversion fk -WHERE connamespace != 0 AND +SELECT ctid, connamespace +FROM pg_catalog.pg_conversion fk +WHERE connamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace); ctid | connamespace ------+-------------- (0 rows) -SELECT ctid, conowner -FROM pg_catalog.pg_conversion fk -WHERE conowner != 0 AND +SELECT ctid, conowner +FROM pg_catalog.pg_conversion fk +WHERE conowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.conowner); ctid | conowner ------+---------- (0 rows) -SELECT ctid, conproc -FROM pg_catalog.pg_conversion fk -WHERE conproc != 0 AND +SELECT ctid, conproc +FROM pg_catalog.pg_conversion fk +WHERE conproc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.conproc); ctid | conproc ------+--------- (0 rows) -SELECT ctid, datdba -FROM pg_catalog.pg_database fk -WHERE datdba != 0 AND +SELECT ctid, datdba +FROM pg_catalog.pg_database fk +WHERE datdba != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.datdba); ctid | datdba ------+-------- (0 rows) -SELECT ctid, dattablespace -FROM pg_catalog.pg_database fk -WHERE dattablespace != 0 AND +SELECT ctid, dattablespace +FROM pg_catalog.pg_database fk +WHERE dattablespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.dattablespace); ctid | dattablespace ------+--------------- (0 rows) -SELECT ctid, setdatabase -FROM pg_catalog.pg_db_role_setting fk -WHERE setdatabase != 0 AND +SELECT ctid, setdatabase +FROM pg_catalog.pg_db_role_setting fk +WHERE setdatabase != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_database pk WHERE pk.oid = fk.setdatabase); ctid | setdatabase ------+------------- (0 rows) -SELECT ctid, classid -FROM pg_catalog.pg_depend fk -WHERE classid != 0 AND +SELECT ctid, classid +FROM pg_catalog.pg_depend fk +WHERE classid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classid); ctid | classid ------+--------- (0 rows) -SELECT ctid, refclassid -FROM pg_catalog.pg_depend fk -WHERE refclassid != 0 AND +SELECT ctid, refclassid +FROM pg_catalog.pg_depend fk +WHERE refclassid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid); ctid | refclassid ------+------------ (0 rows) -SELECT ctid, classoid -FROM pg_catalog.pg_description fk -WHERE classoid != 0 AND +SELECT ctid, classoid +FROM pg_catalog.pg_description fk +WHERE classoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid); ctid | classoid ------+---------- (0 rows) -SELECT ctid, indexrelid -FROM pg_catalog.pg_index fk -WHERE indexrelid != 0 AND +SELECT ctid, indexrelid +FROM pg_catalog.pg_index fk +WHERE indexrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indexrelid); ctid | indexrelid ------+------------ (0 rows) -SELECT ctid, indrelid -FROM pg_catalog.pg_index fk -WHERE indrelid != 0 AND +SELECT ctid, indrelid +FROM pg_catalog.pg_index fk +WHERE indrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indrelid); ctid | indrelid ------+---------- (0 rows) -SELECT ctid, lanowner -FROM pg_catalog.pg_language fk -WHERE lanowner != 0 AND +SELECT ctid, lanowner +FROM pg_catalog.pg_language fk +WHERE lanowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lanowner); ctid | lanowner ------+---------- (0 rows) -SELECT ctid, lanplcallfoid -FROM pg_catalog.pg_language fk -WHERE lanplcallfoid != 0 AND +SELECT ctid, lanplcallfoid +FROM pg_catalog.pg_language fk +WHERE lanplcallfoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanplcallfoid); ctid | lanplcallfoid ------+--------------- (0 rows) -SELECT ctid, laninline -FROM pg_catalog.pg_language fk -WHERE laninline != 0 AND +SELECT ctid, laninline +FROM pg_catalog.pg_language fk +WHERE laninline != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.laninline); ctid | laninline ------+----------- (0 rows) -SELECT ctid, lanvalidator -FROM pg_catalog.pg_language fk -WHERE lanvalidator != 0 AND +SELECT ctid, lanvalidator +FROM pg_catalog.pg_language fk +WHERE lanvalidator != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanvalidator); ctid | lanvalidator ------+-------------- (0 rows) -SELECT ctid, nspowner -FROM pg_catalog.pg_namespace fk -WHERE nspowner != 0 AND +SELECT ctid, nspowner +FROM pg_catalog.pg_namespace fk +WHERE nspowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.nspowner); ctid | nspowner ------+---------- (0 rows) -SELECT ctid, opcmethod -FROM pg_catalog.pg_opclass fk -WHERE opcmethod != 0 AND +SELECT ctid, opcmethod +FROM pg_catalog.pg_opclass fk +WHERE opcmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opcmethod); ctid | opcmethod ------+----------- (0 rows) -SELECT ctid, opcnamespace -FROM pg_catalog.pg_opclass fk -WHERE opcnamespace != 0 AND +SELECT ctid, opcnamespace +FROM pg_catalog.pg_opclass fk +WHERE opcnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opcnamespace); ctid | opcnamespace ------+-------------- (0 rows) -SELECT ctid, opcowner -FROM pg_catalog.pg_opclass fk -WHERE opcowner != 0 AND +SELECT ctid, opcowner +FROM pg_catalog.pg_opclass fk +WHERE opcowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opcowner); ctid | opcowner ------+---------- (0 rows) -SELECT ctid, opcfamily -FROM pg_catalog.pg_opclass fk -WHERE opcfamily != 0 AND +SELECT ctid, opcfamily +FROM pg_catalog.pg_opclass fk +WHERE opcfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.opcfamily); ctid | opcfamily ------+----------- (0 rows) -SELECT ctid, opcintype -FROM pg_catalog.pg_opclass fk -WHERE opcintype != 0 AND +SELECT ctid, opcintype +FROM pg_catalog.pg_opclass fk +WHERE opcintype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opcintype); ctid | opcintype ------+----------- (0 rows) -SELECT ctid, opckeytype -FROM pg_catalog.pg_opclass fk -WHERE opckeytype != 0 AND +SELECT ctid, opckeytype +FROM pg_catalog.pg_opclass fk +WHERE opckeytype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opckeytype); ctid | opckeytype ------+------------ (0 rows) -SELECT ctid, oprnamespace -FROM pg_catalog.pg_operator fk -WHERE oprnamespace != 0 AND +SELECT ctid, oprnamespace +FROM pg_catalog.pg_operator fk +WHERE oprnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.oprnamespace); ctid | oprnamespace ------+-------------- (0 rows) -SELECT ctid, oprowner -FROM pg_catalog.pg_operator fk -WHERE oprowner != 0 AND +SELECT ctid, oprowner +FROM pg_catalog.pg_operator fk +WHERE oprowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.oprowner); ctid | oprowner ------+---------- (0 rows) -SELECT ctid, oprleft -FROM pg_catalog.pg_operator fk -WHERE oprleft != 0 AND +SELECT ctid, oprleft +FROM pg_catalog.pg_operator fk +WHERE oprleft != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprleft); ctid | oprleft ------+--------- (0 rows) -SELECT ctid, oprright -FROM pg_catalog.pg_operator fk -WHERE oprright != 0 AND +SELECT ctid, oprright +FROM pg_catalog.pg_operator fk +WHERE oprright != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprright); ctid | oprright ------+---------- (0 rows) -SELECT ctid, oprresult -FROM pg_catalog.pg_operator fk -WHERE oprresult != 0 AND +SELECT ctid, oprresult +FROM pg_catalog.pg_operator fk +WHERE oprresult != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprresult); ctid | oprresult ------+----------- (0 rows) -SELECT ctid, oprcom -FROM pg_catalog.pg_operator fk -WHERE oprcom != 0 AND +SELECT ctid, oprcom +FROM pg_catalog.pg_operator fk +WHERE oprcom != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom); ctid | oprcom ------+-------- (0 rows) -SELECT ctid, oprnegate -FROM pg_catalog.pg_operator fk -WHERE oprnegate != 0 AND +SELECT ctid, oprnegate +FROM pg_catalog.pg_operator fk +WHERE oprnegate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate); ctid | oprnegate ------+----------- (0 rows) -SELECT ctid, oprcode -FROM pg_catalog.pg_operator fk -WHERE oprcode != 0 AND +SELECT ctid, oprcode +FROM pg_catalog.pg_operator fk +WHERE oprcode != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprcode); ctid | oprcode ------+--------- (0 rows) -SELECT ctid, oprrest -FROM pg_catalog.pg_operator fk -WHERE oprrest != 0 AND +SELECT ctid, oprrest +FROM pg_catalog.pg_operator fk +WHERE oprrest != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprrest); ctid | oprrest ------+--------- (0 rows) -SELECT ctid, oprjoin -FROM pg_catalog.pg_operator fk -WHERE oprjoin != 0 AND +SELECT ctid, oprjoin +FROM pg_catalog.pg_operator fk +WHERE oprjoin != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprjoin); ctid | oprjoin ------+--------- (0 rows) -SELECT ctid, opfmethod -FROM pg_catalog.pg_opfamily fk -WHERE opfmethod != 0 AND +SELECT ctid, opfmethod +FROM pg_catalog.pg_opfamily fk +WHERE opfmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opfmethod); ctid | opfmethod ------+----------- (0 rows) -SELECT ctid, opfnamespace -FROM pg_catalog.pg_opfamily fk -WHERE opfnamespace != 0 AND +SELECT ctid, opfnamespace +FROM pg_catalog.pg_opfamily fk +WHERE opfnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opfnamespace); ctid | opfnamespace ------+-------------- (0 rows) -SELECT ctid, opfowner -FROM pg_catalog.pg_opfamily fk -WHERE opfowner != 0 AND +SELECT ctid, opfowner +FROM pg_catalog.pg_opfamily fk +WHERE opfowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opfowner); ctid | opfowner ------+---------- (0 rows) -SELECT ctid, pronamespace -FROM pg_catalog.pg_proc fk -WHERE pronamespace != 0 AND +SELECT ctid, pronamespace +FROM pg_catalog.pg_proc fk +WHERE pronamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.pronamespace); ctid | pronamespace ------+-------------- (0 rows) -SELECT ctid, proowner -FROM pg_catalog.pg_proc fk -WHERE proowner != 0 AND +SELECT ctid, proowner +FROM pg_catalog.pg_proc fk +WHERE proowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.proowner); ctid | proowner ------+---------- (0 rows) -SELECT ctid, prolang -FROM pg_catalog.pg_proc fk -WHERE prolang != 0 AND +SELECT ctid, prolang +FROM pg_catalog.pg_proc fk +WHERE prolang != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.prolang); ctid | prolang ------+--------- (0 rows) -SELECT ctid, prorettype -FROM pg_catalog.pg_proc fk -WHERE prorettype != 0 AND +SELECT ctid, prorettype +FROM pg_catalog.pg_proc fk +WHERE prorettype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.prorettype); ctid | prorettype ------+------------ (0 rows) -SELECT ctid, ev_class -FROM pg_catalog.pg_rewrite fk -WHERE ev_class != 0 AND +SELECT ctid, ev_class +FROM pg_catalog.pg_rewrite fk +WHERE ev_class != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.ev_class); ctid | ev_class ------+---------- (0 rows) -SELECT ctid, refclassid -FROM pg_catalog.pg_shdepend fk -WHERE refclassid != 0 AND +SELECT ctid, refclassid +FROM pg_catalog.pg_shdepend fk +WHERE refclassid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid); ctid | refclassid ------+------------ (0 rows) -SELECT ctid, classoid -FROM pg_catalog.pg_shdescription fk -WHERE classoid != 0 AND +SELECT ctid, classoid +FROM pg_catalog.pg_shdescription fk +WHERE classoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid); ctid | classoid ------+---------- (0 rows) -SELECT ctid, starelid -FROM pg_catalog.pg_statistic fk -WHERE starelid != 0 AND +SELECT ctid, starelid +FROM pg_catalog.pg_statistic fk +WHERE starelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.starelid); ctid | starelid ------+---------- (0 rows) -SELECT ctid, staop1 -FROM pg_catalog.pg_statistic fk -WHERE staop1 != 0 AND +SELECT ctid, staop1 +FROM pg_catalog.pg_statistic fk +WHERE staop1 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop1); ctid | staop1 ------+-------- (0 rows) -SELECT ctid, staop2 -FROM pg_catalog.pg_statistic fk -WHERE staop2 != 0 AND +SELECT ctid, staop2 +FROM pg_catalog.pg_statistic fk +WHERE staop2 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop2); ctid | staop2 ------+-------- (0 rows) -SELECT ctid, staop3 -FROM pg_catalog.pg_statistic fk -WHERE staop3 != 0 AND +SELECT ctid, staop3 +FROM pg_catalog.pg_statistic fk +WHERE staop3 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop3); ctid | staop3 ------+-------- (0 rows) -SELECT ctid, spcowner -FROM pg_catalog.pg_tablespace fk -WHERE spcowner != 0 AND +SELECT ctid, spcowner +FROM pg_catalog.pg_tablespace fk +WHERE spcowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.spcowner); ctid | spcowner ------+---------- (0 rows) -SELECT ctid, cfgnamespace -FROM pg_catalog.pg_ts_config fk -WHERE cfgnamespace != 0 AND +SELECT ctid, cfgnamespace +FROM pg_catalog.pg_ts_config fk +WHERE cfgnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.cfgnamespace); ctid | cfgnamespace ------+-------------- (0 rows) -SELECT ctid, cfgowner -FROM pg_catalog.pg_ts_config fk -WHERE cfgowner != 0 AND +SELECT ctid, cfgowner +FROM pg_catalog.pg_ts_config fk +WHERE cfgowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.cfgowner); ctid | cfgowner ------+---------- (0 rows) -SELECT ctid, cfgparser -FROM pg_catalog.pg_ts_config fk -WHERE cfgparser != 0 AND +SELECT ctid, cfgparser +FROM pg_catalog.pg_ts_config fk +WHERE cfgparser != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_parser pk WHERE pk.oid = fk.cfgparser); ctid | cfgparser ------+----------- (0 rows) -SELECT ctid, mapcfg -FROM pg_catalog.pg_ts_config_map fk -WHERE mapcfg != 0 AND +SELECT ctid, mapcfg +FROM pg_catalog.pg_ts_config_map fk +WHERE mapcfg != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_config pk WHERE pk.oid = fk.mapcfg); ctid | mapcfg ------+-------- (0 rows) -SELECT ctid, mapdict -FROM pg_catalog.pg_ts_config_map fk -WHERE mapdict != 0 AND +SELECT ctid, mapdict +FROM pg_catalog.pg_ts_config_map fk +WHERE mapdict != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_dict pk WHERE pk.oid = fk.mapdict); ctid | mapdict ------+--------- (0 rows) -SELECT ctid, dictnamespace -FROM pg_catalog.pg_ts_dict fk -WHERE dictnamespace != 0 AND +SELECT ctid, dictnamespace +FROM pg_catalog.pg_ts_dict fk +WHERE dictnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.dictnamespace); ctid | dictnamespace ------+--------------- (0 rows) -SELECT ctid, dictowner -FROM pg_catalog.pg_ts_dict fk -WHERE dictowner != 0 AND +SELECT ctid, dictowner +FROM pg_catalog.pg_ts_dict fk +WHERE dictowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.dictowner); ctid | dictowner ------+----------- (0 rows) -SELECT ctid, dicttemplate -FROM pg_catalog.pg_ts_dict fk -WHERE dicttemplate != 0 AND +SELECT ctid, dicttemplate +FROM pg_catalog.pg_ts_dict fk +WHERE dicttemplate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_template pk WHERE pk.oid = fk.dicttemplate); ctid | dicttemplate ------+-------------- (0 rows) -SELECT ctid, prsnamespace -FROM pg_catalog.pg_ts_parser fk -WHERE prsnamespace != 0 AND +SELECT ctid, prsnamespace +FROM pg_catalog.pg_ts_parser fk +WHERE prsnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.prsnamespace); ctid | prsnamespace ------+-------------- (0 rows) -SELECT ctid, prsstart -FROM pg_catalog.pg_ts_parser fk -WHERE prsstart != 0 AND +SELECT ctid, prsstart +FROM pg_catalog.pg_ts_parser fk +WHERE prsstart != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsstart); ctid | prsstart ------+---------- (0 rows) -SELECT ctid, prstoken -FROM pg_catalog.pg_ts_parser fk -WHERE prstoken != 0 AND +SELECT ctid, prstoken +FROM pg_catalog.pg_ts_parser fk +WHERE prstoken != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prstoken); ctid | prstoken ------+---------- (0 rows) -SELECT ctid, prsend -FROM pg_catalog.pg_ts_parser fk -WHERE prsend != 0 AND +SELECT ctid, prsend +FROM pg_catalog.pg_ts_parser fk +WHERE prsend != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsend); ctid | prsend ------+-------- (0 rows) -SELECT ctid, prsheadline -FROM pg_catalog.pg_ts_parser fk -WHERE prsheadline != 0 AND +SELECT ctid, prsheadline +FROM pg_catalog.pg_ts_parser fk +WHERE prsheadline != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsheadline); ctid | prsheadline ------+------------- (0 rows) -SELECT ctid, prslextype -FROM pg_catalog.pg_ts_parser fk -WHERE prslextype != 0 AND +SELECT ctid, prslextype +FROM pg_catalog.pg_ts_parser fk +WHERE prslextype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prslextype); ctid | prslextype ------+------------ (0 rows) -SELECT ctid, tmplnamespace -FROM pg_catalog.pg_ts_template fk -WHERE tmplnamespace != 0 AND +SELECT ctid, tmplnamespace +FROM pg_catalog.pg_ts_template fk +WHERE tmplnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.tmplnamespace); ctid | tmplnamespace ------+--------------- (0 rows) -SELECT ctid, tmplinit -FROM pg_catalog.pg_ts_template fk -WHERE tmplinit != 0 AND +SELECT ctid, tmplinit +FROM pg_catalog.pg_ts_template fk +WHERE tmplinit != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmplinit); ctid | tmplinit ------+---------- (0 rows) -SELECT ctid, tmpllexize -FROM pg_catalog.pg_ts_template fk -WHERE tmpllexize != 0 AND +SELECT ctid, tmpllexize +FROM pg_catalog.pg_ts_template fk +WHERE tmpllexize != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmpllexize); ctid | tmpllexize ------+------------ (0 rows) -SELECT ctid, typnamespace -FROM pg_catalog.pg_type fk -WHERE typnamespace != 0 AND +SELECT ctid, typnamespace +FROM pg_catalog.pg_type fk +WHERE typnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.typnamespace); ctid | typnamespace ------+-------------- (0 rows) -SELECT ctid, typowner -FROM pg_catalog.pg_type fk -WHERE typowner != 0 AND +SELECT ctid, typowner +FROM pg_catalog.pg_type fk +WHERE typowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.typowner); ctid | typowner ------+---------- (0 rows) -SELECT ctid, typrelid -FROM pg_catalog.pg_type fk -WHERE typrelid != 0 AND +SELECT ctid, typrelid +FROM pg_catalog.pg_type fk +WHERE typrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.typrelid); ctid | typrelid ------+---------- (0 rows) -SELECT ctid, typelem -FROM pg_catalog.pg_type fk -WHERE typelem != 0 AND +SELECT ctid, typelem +FROM pg_catalog.pg_type fk +WHERE typelem != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typelem); ctid | typelem ------+--------- (0 rows) -SELECT ctid, typarray -FROM pg_catalog.pg_type fk -WHERE typarray != 0 AND +SELECT ctid, typarray +FROM pg_catalog.pg_type fk +WHERE typarray != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typarray); ctid | typarray ------+---------- (0 rows) -SELECT ctid, typinput -FROM pg_catalog.pg_type fk -WHERE typinput != 0 AND +SELECT ctid, typinput +FROM pg_catalog.pg_type fk +WHERE typinput != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typinput); ctid | typinput ------+---------- (0 rows) -SELECT ctid, typoutput -FROM pg_catalog.pg_type fk -WHERE typoutput != 0 AND +SELECT ctid, typoutput +FROM pg_catalog.pg_type fk +WHERE typoutput != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typoutput); ctid | typoutput ------+----------- (0 rows) -SELECT ctid, typreceive -FROM pg_catalog.pg_type fk -WHERE typreceive != 0 AND +SELECT ctid, typreceive +FROM pg_catalog.pg_type fk +WHERE typreceive != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typreceive); ctid | typreceive ------+------------ (0 rows) -SELECT ctid, typsend -FROM pg_catalog.pg_type fk -WHERE typsend != 0 AND +SELECT ctid, typsend +FROM pg_catalog.pg_type fk +WHERE typsend != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typsend); ctid | typsend ------+--------- (0 rows) -SELECT ctid, typmodin -FROM pg_catalog.pg_type fk -WHERE typmodin != 0 AND +SELECT ctid, typmodin +FROM pg_catalog.pg_type fk +WHERE typmodin != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodin); ctid | typmodin ------+---------- (0 rows) -SELECT ctid, typmodout -FROM pg_catalog.pg_type fk -WHERE typmodout != 0 AND +SELECT ctid, typmodout +FROM pg_catalog.pg_type fk +WHERE typmodout != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodout); ctid | typmodout ------+----------- (0 rows) -SELECT ctid, typanalyze -FROM pg_catalog.pg_type fk -WHERE typanalyze != 0 AND +SELECT ctid, typanalyze +FROM pg_catalog.pg_type fk +WHERE typanalyze != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typanalyze); ctid | typanalyze ------+------------ (0 rows) -SELECT ctid, typbasetype -FROM pg_catalog.pg_type fk -WHERE typbasetype != 0 AND +SELECT ctid, typbasetype +FROM pg_catalog.pg_type fk +WHERE typbasetype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typbasetype); ctid | typbasetype ------+------------- diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out index a02da98011..22ccce212c 100644 --- a/src/test/regress/expected/plpgsql.out +++ b/src/test/regress/expected/plpgsql.out @@ -3220,7 +3220,7 @@ NOTICE: 6 drop function exc_using(int, text); create or replace function exc_using(int) returns void as $$ -declare +declare c refcursor; i int; begin @@ -3231,7 +3231,7 @@ begin raise notice '%', i; end loop; close c; - return; + return; end; $$ language plpgsql; select exc_using(5); diff --git a/src/test/regress/expected/point.out b/src/test/regress/expected/point.out index 278837d091..7929229b16 100644 --- a/src/test/regress/expected/point.out +++ b/src/test/regress/expected/point.out @@ -7,7 +7,7 @@ INSERT INTO POINT_TBL(f1) VALUES ('(-10.0,0.0)'); INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)'); INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)'); INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)'); --- bad format points +-- bad format points INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); ERROR: invalid input syntax for type point: "asdfasdf" LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); @@ -32,7 +32,7 @@ SELECT '' AS six, * FROM POINT_TBL; | (10,10) (6 rows) --- left of +-- left of SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)'; three | f1 -------+---------- @@ -41,7 +41,7 @@ SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)'; | (-5,-12) (3 rows) --- right of +-- right of SELECT '' AS three, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1; three | f1 -------+---------- @@ -50,28 +50,28 @@ SELECT '' AS three, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1; | (-5,-12) (3 rows) --- above +-- above SELECT '' AS one, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >^ p.f1; one | f1 -----+---------- | (-5,-12) (1 row) --- below +-- below SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 <^ '(0.0, 0.0)'; one | f1 -----+---------- | (-5,-12) (1 row) --- equal +-- equal SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 ~= '(5.1, 34.5)'; one | f1 -----+------------ | (5.1,34.5) (1 row) --- point in box +-- point in box SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 <@ box '(0,0,100,100)'; three | f1 @@ -235,7 +235,7 @@ SELECT '' AS fifteen, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS dis -- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 SELECT '' AS three, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance - FROM POINT_TBL p1, POINT_TBL p2 + FROM POINT_TBL p1, POINT_TBL p2 WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 >^ p2.f1 ORDER BY distance; three | point1 | point2 | distance diff --git a/src/test/regress/expected/polygon.out b/src/test/regress/expected/polygon.out index 7e0ae24266..b252902720 100644 --- a/src/test/regress/expected/polygon.out +++ b/src/test/regress/expected/polygon.out @@ -16,10 +16,10 @@ CREATE TABLE POLYGON_TBL(f1 polygon); INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)'); --- degenerate polygons +-- degenerate polygons INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)'); --- bad polygon input strings +-- bad polygon input strings INSERT INTO POLYGON_TBL(f1) VALUES ('0.0'); ERROR: invalid input syntax for type polygon: "0.0" LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('0.0'); @@ -49,7 +49,7 @@ SELECT '' AS four, * FROM POLYGON_TBL; | ((0,1),(0,1)) (4 rows) --- overlap +-- overlap SELECT '' AS three, p.* FROM POLYGON_TBL p WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -59,8 +59,8 @@ SELECT '' AS three, p.* | ((3,1),(3,3),(1,0)) (2 rows) --- left overlap -SELECT '' AS four, p.* +-- left overlap +SELECT '' AS four, p.* FROM POLYGON_TBL p WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; four | f1 @@ -71,8 +71,8 @@ SELECT '' AS four, p.* | ((0,1),(0,1)) (4 rows) --- right overlap -SELECT '' AS two, p.* +-- right overlap +SELECT '' AS two, p.* FROM POLYGON_TBL p WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; two | f1 @@ -80,7 +80,7 @@ SELECT '' AS two, p.* | ((3,1),(3,3),(1,0)) (1 row) --- left of +-- left of SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -90,7 +90,7 @@ SELECT '' AS one, p.* | ((0,1),(0,1)) (2 rows) --- right of +-- right of SELECT '' AS zero, p.* FROM POLYGON_TBL p WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -98,8 +98,8 @@ SELECT '' AS zero, p.* ------+---- (0 rows) --- contained -SELECT '' AS one, p.* +-- contained +SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; one | f1 @@ -107,7 +107,7 @@ SELECT '' AS one, p.* | ((3,1),(3,3),(1,0)) (1 row) --- same +-- same SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -116,7 +116,7 @@ SELECT '' AS one, p.* | ((3,1),(3,3),(1,0)) (1 row) --- contains +-- contains SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -138,42 +138,42 @@ SELECT '' AS one, p.* -- -- 0 1 2 3 4 -- --- left of +-- left of SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- f (1 row) --- left overlap +-- left overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; true ------ f (1 row) --- right overlap +-- right overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- f (1 row) --- right of +-- right of SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- f (1 row) --- contained in +-- contained in SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- f (1 row) --- contains +-- contains SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- @@ -182,7 +182,7 @@ SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),( -- +------------------------+ -- | *---* 1 --- | + | | +-- | + | | -- | 2 *---* -- +------------------------+ -- 3 @@ -195,10 +195,10 @@ SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))' (1 row) -- +-----------+ --- | *---* / --- | | |/ --- | | + --- | | |\ +-- | *---* / +-- | | |/ +-- | | + +-- | | |\ -- | *---* \ -- +-----------+ SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; @@ -233,14 +233,14 @@ SELECT '((0,0),(0,3),(3,3),(3,0))'::polygon @> '((2,1),(2,2),(3,2),(3,1))'::poly t (1 row) --- same +-- same SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; false ------- f (1 row) --- overlap +-- overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; true ------ @@ -249,7 +249,7 @@ SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),( -- +--------------------+ -- | *---* 1 --- | + | | +-- | + | | -- | 2 *---* -- +--------------------+ -- 3 diff --git a/src/test/regress/expected/portals.out b/src/test/regress/expected/portals.out index be7348d6b2..01152a939d 100644 --- a/src/test/regress/expected/portals.out +++ b/src/test/regress/expected/portals.out @@ -1244,16 +1244,16 @@ ERROR: WHERE CURRENT OF on a view is not implemented ROLLBACK; -- Make sure snapshot management works okay, per bug report in -- 235395b90909301035v7228ce63q392931f15aa74b31@mail.gmail.com -BEGIN; -SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; -CREATE TABLE cursor (a int); -INSERT INTO cursor VALUES (1); -DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; -UPDATE cursor SET a = 2; -FETCH ALL FROM c1; +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE cursor (a int); +INSERT INTO cursor VALUES (1); +DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; +UPDATE cursor SET a = 2; +FETCH ALL FROM c1; a --- (0 rows) -COMMIT; +COMMIT; DROP TABLE cursor; diff --git a/src/test/regress/expected/portals_p2.out b/src/test/regress/expected/portals_p2.out index 7a9cf69674..1e2365a2a6 100644 --- a/src/test/regress/expected/portals_p2.out +++ b/src/test/regress/expected/portals_p2.out @@ -2,31 +2,31 @@ -- PORTALS_P2 -- BEGIN; -DECLARE foo13 CURSOR FOR +DECLARE foo13 CURSOR FOR SELECT * FROM onek WHERE unique1 = 50; -DECLARE foo14 CURSOR FOR +DECLARE foo14 CURSOR FOR SELECT * FROM onek WHERE unique1 = 51; -DECLARE foo15 CURSOR FOR +DECLARE foo15 CURSOR FOR SELECT * FROM onek WHERE unique1 = 52; -DECLARE foo16 CURSOR FOR +DECLARE foo16 CURSOR FOR SELECT * FROM onek WHERE unique1 = 53; -DECLARE foo17 CURSOR FOR +DECLARE foo17 CURSOR FOR SELECT * FROM onek WHERE unique1 = 54; -DECLARE foo18 CURSOR FOR +DECLARE foo18 CURSOR FOR SELECT * FROM onek WHERE unique1 = 55; -DECLARE foo19 CURSOR FOR +DECLARE foo19 CURSOR FOR SELECT * FROM onek WHERE unique1 = 56; -DECLARE foo20 CURSOR FOR +DECLARE foo20 CURSOR FOR SELECT * FROM onek WHERE unique1 = 57; -DECLARE foo21 CURSOR FOR +DECLARE foo21 CURSOR FOR SELECT * FROM onek WHERE unique1 = 58; -DECLARE foo22 CURSOR FOR +DECLARE foo22 CURSOR FOR SELECT * FROM onek WHERE unique1 = 59; -DECLARE foo23 CURSOR FOR +DECLARE foo23 CURSOR FOR SELECT * FROM onek WHERE unique1 = 60; -DECLARE foo24 CURSOR FOR +DECLARE foo24 CURSOR FOR SELECT * FROM onek2 WHERE unique1 = 50; -DECLARE foo25 CURSOR FOR +DECLARE foo25 CURSOR FOR SELECT * FROM onek2 WHERE unique1 = 60; FETCH all in foo13; unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index be65be91dd..5673f72173 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -35,9 +35,9 @@ create table rtest_interface (sysname text, ifname text); create table rtest_person (pname text, pdesc text); create table rtest_admin (pname text, sysname text); create rule rtest_sys_upd as on update to rtest_system do also ( - update rtest_interface set sysname = new.sysname + update rtest_interface set sysname = new.sysname where sysname = old.sysname; - update rtest_admin set sysname = new.sysname + update rtest_admin set sysname = new.sysname where sysname = old.sysname ); create rule rtest_sys_del as on delete to rtest_system do also ( @@ -65,7 +65,7 @@ create rule rtest_emp_del as on delete to rtest_emp do 'fired', '0.00', old.salary); -- -- Tables and rules for the multiple cascaded qualified instead --- rule test +-- rule test -- create table rtest_t4 (a int4, b text); create table rtest_t5 (a int4, b text); @@ -753,7 +753,7 @@ create table rtest_view1 (a int4, b text, v bool); create table rtest_view2 (a int4); create table rtest_view3 (a int4, b text); create table rtest_view4 (a int4, b text, c int4); -create view rtest_vview1 as select a, b from rtest_view1 X +create view rtest_vview1 as select a, b from rtest_view1 X where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); create view rtest_vview2 as select a, b from rtest_view1 where v; create view rtest_vview3 as select a, b from rtest_vview2 X @@ -896,7 +896,7 @@ create table rtest_unitfact ( unit char(4), factor float ); -create view rtest_vcomp as +create view rtest_vcomp as select X.part, (X.size * Y.factor) as size_in_cm from rtest_comp X, rtest_unitfact Y where X.unit = Y.unit; @@ -1227,7 +1227,7 @@ create rule rrule as on update to vview do instead ( insert into cchild (pid, descrip) - select old.pid, new.descrip where old.descrip isnull; + select old.pid, new.descrip where old.descrip isnull; update cchild set descrip = new.descrip where cchild.pid = old.pid; ); select * from vview; @@ -1336,7 +1336,7 @@ SELECT viewname, definition FROM pg_views WHERE schemaname <> 'information_schem toyemp | SELECT emp.name, emp.age, emp.location, (12 * emp.salary) AS annualsal FROM emp; (56 rows) -SELECT tablename, rulename, definition FROM pg_rules +SELECT tablename, rulename, definition FROM pg_rules ORDER BY tablename, rulename; tablename | rulename | definition ---------------+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -1394,14 +1394,12 @@ SELECT * FROM ruletest_tbl2; create table rule_and_refint_t1 ( id1a integer, id1b integer, - primary key (id1a, id1b) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "rule_and_refint_t1_pkey" for table "rule_and_refint_t1" create table rule_and_refint_t2 ( id2a integer, id2c integer, - primary key (id2a, id2c) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "rule_and_refint_t2_pkey" for table "rule_and_refint_t2" @@ -1517,11 +1515,11 @@ create temp table t1 (a integer primary key); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); -create rule t1_ins_1 as on insert to t1 +create rule t1_ins_1 as on insert to t1 where new.a >= 0 and new.a < 10 do instead insert into t1_1 values (new.a); -create rule t1_ins_2 as on insert to t1 +create rule t1_ins_2 as on insert to t1 where new.a >= 10 and new.a < 20 do instead insert into t1_2 values (new.a); diff --git a/src/test/regress/expected/select.out b/src/test/regress/expected/select.out index 449341739c..c376523bbe 100644 --- a/src/test/regress/expected/select.out +++ b/src/test/regress/expected/select.out @@ -25,7 +25,7 @@ SELECT * FROM onek -- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 -- SELECT onek.unique1, onek.stringu1 FROM onek - WHERE onek.unique1 < 20 + WHERE onek.unique1 < 20 ORDER BY unique1 using >; unique1 | stringu1 ---------+---------- @@ -55,7 +55,7 @@ SELECT onek.unique1, onek.stringu1 FROM onek -- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2 -- SELECT onek.unique1, onek.stringu1 FROM onek - WHERE onek.unique1 > 980 + WHERE onek.unique1 > 980 ORDER BY stringu1 using <; unique1 | stringu1 ---------+---------- @@ -80,13 +80,12 @@ SELECT onek.unique1, onek.stringu1 FROM onek 987 | ZLAAAA (19 rows) - -- -- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | -- sort +1d -2 +0nr -1 -- SELECT onek.unique1, onek.string4 FROM onek - WHERE onek.unique1 > 980 + WHERE onek.unique1 > 980 ORDER BY string4 using <, unique1 using >; unique1 | string4 ---------+--------- @@ -111,7 +110,6 @@ SELECT onek.unique1, onek.string4 FROM onek 984 | VVVVxx (19 rows) - -- -- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | -- sort +1dr -2 +0n -1 @@ -142,7 +140,6 @@ SELECT onek.unique1, onek.string4 FROM onek 999 | AAAAxx (19 rows) - -- -- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data | -- sort +0nr -1 +1d -2 @@ -179,7 +176,7 @@ SELECT onek.unique1, onek.string4 FROM onek -- sort +0n -1 +1dr -2 -- SELECT onek.unique1, onek.string4 FROM onek - WHERE onek.unique1 < 20 + WHERE onek.unique1 < 20 ORDER BY unique1 using <, string4 using >; unique1 | string4 ---------+--------- @@ -238,7 +235,7 @@ SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10; -- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 -- SELECT onek2.unique1, onek2.stringu1 FROM onek2 - WHERE onek2.unique1 < 20 + WHERE onek2.unique1 < 20 ORDER BY unique1 using >; unique1 | stringu1 ---------+---------- diff --git a/src/test/regress/expected/select_implicit.out b/src/test/regress/expected/select_implicit.out index 14fcd1c2b5..61b485fdaa 100644 --- a/src/test/regress/expected/select_implicit.out +++ b/src/test/regress/expected/select_implicit.out @@ -121,7 +121,7 @@ LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; ^ -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b ORDER BY b; ERROR: column reference "b" is ambiguous @@ -177,7 +177,7 @@ SELECT a/2, a/2 FROM test_missing_target (5 rows) -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; b | count @@ -189,7 +189,7 @@ SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y (4 rows) -- group w/o existing GROUP BY target under ambiguous condition -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; count @@ -202,8 +202,8 @@ SELECT count(*) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(*) INTO TABLE test_missing_target2 -FROM test_missing_target x, test_missing_target y +SELECT count(*) INTO TABLE test_missing_target2 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; SELECT * FROM test_missing_target2; @@ -291,14 +291,14 @@ SELECT count(b) FROM test_missing_target -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(x.a) FROM test_missing_target x, test_missing_target y +SELECT count(x.a) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b/2 ORDER BY b/2; ERROR: column reference "b" is ambiguous LINE 3: GROUP BY b/2 ORDER BY b/2; ^ -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; ?column? | count @@ -310,7 +310,7 @@ SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- failure expected due to ambiguous b in count(b) -SELECT count(b) FROM test_missing_target x, test_missing_target y +SELECT count(b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2; ERROR: column reference "b" is ambiguous @@ -318,8 +318,8 @@ LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... ^ -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(x.b) INTO TABLE test_missing_target3 -FROM test_missing_target x, test_missing_target y +SELECT count(x.b) INTO TABLE test_missing_target3 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; SELECT * FROM test_missing_target3; diff --git a/src/test/regress/expected/select_implicit_1.out b/src/test/regress/expected/select_implicit_1.out index aee2da72b1..f277375ebf 100644 --- a/src/test/regress/expected/select_implicit_1.out +++ b/src/test/regress/expected/select_implicit_1.out @@ -121,7 +121,7 @@ LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; ^ -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b ORDER BY b; ERROR: column reference "b" is ambiguous @@ -177,7 +177,7 @@ SELECT a/2, a/2 FROM test_missing_target (5 rows) -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; b | count @@ -189,7 +189,7 @@ SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y (4 rows) -- group w/o existing GROUP BY target under ambiguous condition -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; count @@ -202,8 +202,8 @@ SELECT count(*) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(*) INTO TABLE test_missing_target2 -FROM test_missing_target x, test_missing_target y +SELECT count(*) INTO TABLE test_missing_target2 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; SELECT * FROM test_missing_target2; @@ -291,14 +291,14 @@ SELECT count(b) FROM test_missing_target -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(x.a) FROM test_missing_target x, test_missing_target y +SELECT count(x.a) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b/2 ORDER BY b/2; ERROR: column reference "b" is ambiguous LINE 3: GROUP BY b/2 ORDER BY b/2; ^ -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; ?column? | count @@ -310,7 +310,7 @@ SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- failure expected due to ambiguous b in count(b) -SELECT count(b) FROM test_missing_target x, test_missing_target y +SELECT count(b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2; ERROR: column reference "b" is ambiguous @@ -318,8 +318,8 @@ LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... ^ -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(x.b) INTO TABLE test_missing_target3 -FROM test_missing_target x, test_missing_target y +SELECT count(x.b) INTO TABLE test_missing_target3 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; SELECT * FROM test_missing_target3; diff --git a/src/test/regress/expected/select_implicit_2.out b/src/test/regress/expected/select_implicit_2.out index 250f0fedb0..91c3a24f92 100644 --- a/src/test/regress/expected/select_implicit_2.out +++ b/src/test/regress/expected/select_implicit_2.out @@ -121,7 +121,7 @@ LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; ^ -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b ORDER BY b; ERROR: column reference "b" is ambiguous @@ -177,7 +177,7 @@ SELECT a/2, a/2 FROM test_missing_target (5 rows) -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; b | count @@ -189,7 +189,7 @@ SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y (4 rows) -- group w/o existing GROUP BY target under ambiguous condition -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; count @@ -202,8 +202,8 @@ SELECT count(*) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(*) INTO TABLE test_missing_target2 -FROM test_missing_target x, test_missing_target y +SELECT count(*) INTO TABLE test_missing_target2 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; SELECT * FROM test_missing_target2; @@ -291,14 +291,14 @@ SELECT count(b) FROM test_missing_target -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(x.a) FROM test_missing_target x, test_missing_target y +SELECT count(x.a) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b/2 ORDER BY b/2; ERROR: column reference "b" is ambiguous LINE 3: GROUP BY b/2 ORDER BY b/2; ^ -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; ?column? | count @@ -310,7 +310,7 @@ SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y -- group w/o existing GROUP BY target under ambiguous condition -- failure expected due to ambiguous b in count(b) -SELECT count(b) FROM test_missing_target x, test_missing_target y +SELECT count(b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2; ERROR: column reference "b" is ambiguous @@ -318,8 +318,8 @@ LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... ^ -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(x.b) INTO TABLE test_missing_target3 -FROM test_missing_target x, test_missing_target y +SELECT count(x.b) INTO TABLE test_missing_target3 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; SELECT * FROM test_missing_target3; diff --git a/src/test/regress/expected/sequence.out b/src/test/regress/expected/sequence.out index 823039ae95..19f8f1308d 100644 --- a/src/test/regress/expected/sequence.out +++ b/src/test/regress/expected/sequence.out @@ -1,16 +1,13 @@ --- --- test creation of SERIAL column --- - CREATE TABLE serialTest (f1 text, f2 serial); NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2" - INSERT INTO serialTest VALUES ('foo'); INSERT INTO serialTest VALUES ('bar'); INSERT INTO serialTest VALUES ('force', 100); INSERT INTO serialTest VALUES ('wrong', NULL); ERROR: null value in column "f2" violates not-null constraint - SELECT * FROM serialTest; f1 | f2 -------+----- @@ -21,7 +18,6 @@ SELECT * FROM serialTest; -- basic sequence operations using both text and oid references CREATE SEQUENCE sequence_test; - SELECT nextval('sequence_test'::text); nextval --------- diff --git a/src/test/regress/expected/sequence_1.out b/src/test/regress/expected/sequence_1.out index a97f499e75..ae928e9071 100644 --- a/src/test/regress/expected/sequence_1.out +++ b/src/test/regress/expected/sequence_1.out @@ -1,16 +1,13 @@ --- --- test creation of SERIAL column --- - CREATE TABLE serialTest (f1 text, f2 serial); NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2" - INSERT INTO serialTest VALUES ('foo'); INSERT INTO serialTest VALUES ('bar'); INSERT INTO serialTest VALUES ('force', 100); INSERT INTO serialTest VALUES ('wrong', NULL); ERROR: null value in column "f2" violates not-null constraint - SELECT * FROM serialTest; f1 | f2 -------+----- @@ -21,7 +18,6 @@ SELECT * FROM serialTest; -- basic sequence operations using both text and oid references CREATE SEQUENCE sequence_test; - SELECT nextval('sequence_test'::text); nextval --------- diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index 7f8c05bc80..2440dcd822 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -297,7 +297,7 @@ SELECT *, ELSE 'Approved' END) ELSE 'PO' - END) + END) END) AS "Status", (CASE WHEN ord.ordercancelled @@ -312,7 +312,7 @@ END) AS "Status", ELSE 'Approved' END) ELSE 'PO' - END) + END) END) AS "Status_OK" FROM orderstest ord; SELECT * FROM orders_view; diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out index 6def970d80..ab8faab52e 100644 --- a/src/test/regress/expected/timestamp.out +++ b/src/test/regress/expected/timestamp.out @@ -180,7 +180,7 @@ INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC'); ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC" LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC')... ^ -SELECT '' AS "64", d1 FROM TIMESTAMP_TBL; +SELECT '' AS "64", d1 FROM TIMESTAMP_TBL; 64 | d1 ----+----------------------------- | -infinity @@ -804,7 +804,7 @@ SELECT '' AS "54", d1 as "timestamp", (55 rows) -- TO_CHAR() -SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') +SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') FROM TIMESTAMP_TBL; to_char_1 | to_char -----------+------------------------------------------------------------------------------------------ @@ -1017,7 +1017,7 @@ SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') | 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 (65 rows) -SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') +SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') FROM TIMESTAMP_TBL; to_char_4 | to_char -----------+------------------------------------------------- @@ -1088,7 +1088,7 @@ SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM | 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 (65 rows) -SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') +SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') FROM TIMESTAMP_TBL; to_char_5 | to_char -----------+---------------------- @@ -1159,7 +1159,7 @@ SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') | 05 05 17 32 01 63121 (65 rows) -SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') +SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') FROM TIMESTAMP_TBL; to_char_6 | to_char -----------+------------------------------------------------- @@ -1301,7 +1301,7 @@ SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS') | 17--text--32--text--01 (65 rows) -SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') +SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') FROM TIMESTAMP_TBL; to_char_8 | to_char -----------+------------------------- @@ -1372,9 +1372,8 @@ SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') | 2001ST 2001st 2451911th (65 rows) - -SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') - FROM TIMESTAMP_TBL; +SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') + FROM TIMESTAMP_TBL; to_char_9 | to_char -----------+--------------------------------------------------------------------- | diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out index 47e3394792..9a4ce3e336 100644 --- a/src/test/regress/expected/timestamptz.out +++ b/src/test/regress/expected/timestamptz.out @@ -251,7 +251,7 @@ SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz; Wed Jul 11 06:51:14 2001 PDT (1 row) -SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL; +SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL; 64 | d1 ----+--------------------------------- | -infinity @@ -883,7 +883,7 @@ SELECT '' AS "54", d1 as timestamptz, (56 rows) -- TO_CHAR() -SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') +SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') FROM TIMESTAMPTZ_TBL; to_char_1 | to_char -----------+------------------------------------------------------------------------------------------ @@ -955,9 +955,8 @@ SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM M | MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan (66 rows) - SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') - FROM TIMESTAMPTZ_TBL; + FROM TIMESTAMPTZ_TBL; to_char_2 | to_char -----------+-------------------------------------------------------------- | @@ -1100,9 +1099,8 @@ SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') | 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 (66 rows) - -SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') - FROM TIMESTAMPTZ_TBL; +SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') + FROM TIMESTAMPTZ_TBL; to_char_4 | to_char -----------+------------------------------------------------- | @@ -1173,8 +1171,7 @@ SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM | 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 (66 rows) - -SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') +SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') FROM TIMESTAMPTZ_TBL; to_char_5 | to_char -----------+---------------------- @@ -1246,8 +1243,8 @@ SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') | 05 05 17 32 01 63121 (66 rows) -SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') - FROM TIMESTAMPTZ_TBL; +SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') + FROM TIMESTAMPTZ_TBL; to_char_6 | to_char -----------+------------------------------------------------- | @@ -1318,9 +1315,8 @@ SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between qu | HH:MI:SS is 05:32:01 "text between quote marks" (66 rows) - SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS') - FROM TIMESTAMPTZ_TBL; + FROM TIMESTAMPTZ_TBL; to_char_7 | to_char -----------+------------------------ | @@ -1391,7 +1387,7 @@ SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS') | 17--text--32--text--01 (66 rows) -SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') +SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') FROM TIMESTAMPTZ_TBL; to_char_8 | to_char -----------+------------------------- @@ -1463,9 +1459,8 @@ SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') | 2001ST 2001st 2451911th (66 rows) - -SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') - FROM TIMESTAMPTZ_TBL; +SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') + FROM TIMESTAMPTZ_TBL; to_char_9 | to_char -----------+--------------------------------------------------------------------- | diff --git a/src/test/regress/expected/tinterval.out b/src/test/regress/expected/tinterval.out index 89e850cf61..a0189729fc 100644 --- a/src/test/regress/expected/tinterval.out +++ b/src/test/regress/expected/tinterval.out @@ -14,7 +14,7 @@ INSERT INTO TINTERVAL_TBL (f1) VALUES ('["epoch" "Mon May 1 00:30:30 1995"]'); INSERT INTO TINTERVAL_TBL (f1) VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]'); --- badly formatted tintervals +-- badly formatted tintervals INSERT INTO TINTERVAL_TBL (f1) VALUES ('["bad time specifications" ""]'); ERROR: invalid input syntax for type abstime: "bad time specifications" @@ -146,7 +146,7 @@ SELECT '' AS fourteen, t1.f1 AS interval1, t2.f1 AS interval2 -- contains SELECT '' AS five, t1.f1 FROM TINTERVAL_TBL t1 - WHERE not t1.f1 << + WHERE not t1.f1 << tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]' ORDER BY t1.f1; five | f1 diff --git a/src/test/regress/expected/transactions.out b/src/test/regress/expected/transactions.out index c4f8965fd1..84d14537f1 100644 --- a/src/test/regress/expected/transactions.out +++ b/src/test/regress/expected/transactions.out @@ -2,7 +2,7 @@ -- TRANSACTIONS -- BEGIN; -SELECT * +SELECT * INTO TABLE xacttest FROM aggtest; INSERT INTO xacttest (a, b) VALUES (777, 777.777); @@ -24,13 +24,13 @@ SELECT * FROM aggtest; (0 rows) ABORT; --- should not exist +-- should not exist SELECT oid FROM pg_class WHERE relname = 'disappear'; oid ----- (0 rows) --- should have members again +-- should have members again SELECT * FROM aggtest; a | b -----+--------- @@ -179,7 +179,6 @@ BEGIN; ROLLBACK; COMMIT; -- should not be in a transaction block WARNING: there is no transaction in progress - SELECT * FROM savepoints; a --- diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out index 1b9cdd4375..c9e8c1a141 100644 --- a/src/test/regress/expected/triggers.out +++ b/src/test/regress/expected/triggers.out @@ -19,23 +19,23 @@ create unique index pkeys_i on pkeys (pkey1, pkey2); -- (fkey1, fkey2) --> pkeys (pkey1, pkey2) -- (fkey3) --> fkeys2 (pkey23) -- -create trigger check_fkeys_pkey_exist - before insert or update on fkeys - for each row - execute procedure +create trigger check_fkeys_pkey_exist + before insert or update on fkeys + for each row + execute procedure check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); -create trigger check_fkeys_pkey2_exist - before insert or update on fkeys - for each row +create trigger check_fkeys_pkey2_exist + before insert or update on fkeys + for each row execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23'); -- -- For fkeys2: -- (fkey21, fkey22) --> pkeys (pkey1, pkey2) -- -create trigger check_fkeys2_pkey_exist - before insert or update on fkeys2 - for each row - execute procedure +create trigger check_fkeys2_pkey_exist + before insert or update on fkeys2 + for each row + execute procedure check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2'); -- Test comments COMMENT ON TRIGGER check_fkeys2_pkey_bad ON fkeys2 IS 'wrong'; @@ -48,19 +48,19 @@ COMMENT ON TRIGGER check_fkeys2_pkey_exist ON fkeys2 IS NULL; -- fkeys (fkey1, fkey2) and fkeys2 (fkey21, fkey22) -- create trigger check_pkeys_fkey_cascade - before delete or update on pkeys - for each row - execute procedure - check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', + before delete or update on pkeys + for each row + execute procedure + check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', 'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22'); -- -- For fkeys2: -- ON DELETE/UPDATE (pkey23) RESTRICT: -- fkeys (fkey3) -- -create trigger check_fkeys2_fkey_restrict +create trigger check_fkeys2_fkey_restrict before delete or update on fkeys2 - for each row + for each row execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3'); insert into fkeys2 values (10, '1', 1); insert into fkeys2 values (30, '3', 2); @@ -106,49 +106,49 @@ DROP TABLE fkeys2; -- -- Jan -- -- create table dup17 (x int4); --- --- create trigger dup17_before +-- +-- create trigger dup17_before -- before insert on dup17 --- for each row --- execute procedure +-- for each row +-- execute procedure -- funny_dup17 () -- ; --- +-- -- insert into dup17 values (17); -- select count(*) from dup17; -- insert into dup17 values (17); -- select count(*) from dup17; --- +-- -- drop trigger dup17_before on dup17; --- +-- -- create trigger dup17_after -- after insert on dup17 --- for each row --- execute procedure +-- for each row +-- execute procedure -- funny_dup17 () -- ; -- insert into dup17 values (13); -- select count(*) from dup17 where x = 13; -- insert into dup17 values (13); -- select count(*) from dup17 where x = 13; --- +-- -- DROP TABLE dup17; create sequence ttdummy_seq increment 10 start 0 minvalue 0; create table tttest ( - price_id int4, - price_val int4, + price_id int4, + price_val int4, price_on int4, price_off int4 default 999999 ); -create trigger ttdummy +create trigger ttdummy before delete or update on tttest - for each row - execute procedure + for each row + execute procedure ttdummy (price_on, price_off); -create trigger ttserial +create trigger ttserial before insert or update on tttest - for each row - execute procedure + for each row + execute procedure autoinc (price_on, ttdummy_seq); insert into tttest values (1, 1, null); insert into tttest values (2, 2, null); @@ -567,7 +567,7 @@ CREATE TABLE trigger_test ( i int, v varchar ); -CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger +CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpgsql AS $$ declare @@ -580,7 +580,7 @@ begin relid := TG_relid::regclass; -- plpgsql can't discover its trigger data in a hash like perl and python - -- can, or by a sort of reflection like tcl can, + -- can, or by a sort of reflection like tcl can, -- so we have to hard code the names. raise NOTICE 'TG_NAME: %', TG_name; raise NOTICE 'TG_WHEN: %', TG_when; @@ -618,7 +618,7 @@ begin end; $$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); insert into trigger_test values(1,'insert'); @@ -658,9 +658,7 @@ NOTICE: TG_TABLE_SCHEMA: public NOTICE: TG_NARGS: 2 NOTICE: TG_ARGV: [23, skidoo] NOTICE: OLD: (1,update) - DROP TRIGGER show_trigger_data_trig on trigger_test; - DROP FUNCTION trigger_data(); DROP TABLE trigger_test; -- @@ -755,10 +753,10 @@ CREATE TABLE min_updates_test_oids ( f3 int) WITH OIDS; INSERT INTO min_updates_test VALUES ('a',1,2),('b','2',null); INSERT INTO min_updates_test_oids VALUES ('a',1,2),('b','2',null); -CREATE TRIGGER z_min_update +CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); -CREATE TRIGGER z_min_update +CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test_oids FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); \set QUIET false diff --git a/src/test/regress/expected/truncate.out b/src/test/regress/expected/truncate.out index 6e190fd5f6..1817bac8e3 100644 --- a/src/test/regress/expected/truncate.out +++ b/src/test/regress/expected/truncate.out @@ -302,7 +302,7 @@ $$ LANGUAGE plpgsql; INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); CREATE TRIGGER t BEFORE TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT +FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('before trigger truncate'); SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; Row count in test table @@ -334,7 +334,7 @@ truncate trunc_trigger_log; INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); CREATE TRIGGER tt AFTER TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT +FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('after trigger truncate'); SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; Row count in test table diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out index aba67fcab7..9df1434a14 100644 --- a/src/test/regress/expected/tsdicts.out +++ b/src/test/regress/expected/tsdicts.out @@ -193,7 +193,7 @@ SELECT ts_lexize('hunspell', 'footballyklubber'); -- Synonim dictionary CREATE TEXT SEARCH DICTIONARY synonym ( - Template=synonym, + Template=synonym, Synonyms=synonym_sample ); SELECT ts_lexize('synonym', 'PoStGrEs'); @@ -219,7 +219,7 @@ SELECT ts_lexize('synonym', 'indices'); -- cannot pass more than one word to thesaurus. CREATE TEXT SEARCH DICTIONARY thesaurus ( Template=thesaurus, - DictFile=thesaurus_sample, + DictFile=thesaurus_sample, Dictionary=english_stem ); SELECT ts_lexize('thesaurus', 'one'); @@ -281,8 +281,8 @@ SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); CREATE TEXT SEARCH CONFIGURATION synonym_tst ( COPY=english ); -ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword +ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword WITH synonym, english_stem; SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre'); to_tsvector @@ -313,8 +313,8 @@ SELECT to_tsquery('synonym_tst', 'Index & indices'); CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( COPY=synonym_tst ); -ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword +ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword WITH synonym, thesaurus, english_stem; SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); to_tsvector diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out index 86ea5efc7b..e1d7646c0d 100644 --- a/src/test/regress/expected/tsearch.out +++ b/src/test/regress/expected/tsearch.out @@ -46,7 +46,7 @@ WHERE mapcfg = 0 OR mapdict = 0; -- Look for pg_ts_config_map entries that aren't one of parser's token types SELECT * FROM ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt + FROM pg_ts_config ) AS tt RIGHT JOIN pg_ts_config_map AS m ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) WHERE @@ -188,7 +188,6 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; 494 (1 row) - RESET enable_seqscan; INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; @@ -672,7 +671,7 @@ to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); (1 row) ---Check if headline fragments work +--Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out index b7433653d1..556672d408 100644 --- a/src/test/regress/expected/type_sanity.out +++ b/src/test/regress/expected/type_sanity.out @@ -72,7 +72,7 @@ WHERE p1.typtype in ('b','e') AND p1.typname NOT LIKE E'\\_%' AND NOT EXISTS (3 rows) -- Make sure typarray points to a varlena array type of our own base -SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype, +SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype, p2.typelem, p2.typlen FROM pg_type p1 LEFT JOIN pg_type p2 ON (p1.typarray = p2.oid) WHERE p1.typarray <> 0 AND diff --git a/src/test/regress/expected/varchar.out b/src/test/regress/expected/varchar.out index 48a77f5e13..e1120234ac 100644 --- a/src/test/regress/expected/varchar.out +++ b/src/test/regress/expected/varchar.out @@ -4,13 +4,13 @@ CREATE TABLE VARCHAR_TBL(f1 varchar(1)); INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); INSERT INTO VARCHAR_TBL (f1) VALUES (2); INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO VARCHAR_TBL (f1) VALUES (''); --- try varchar's of greater than 1 length +-- try varchar's of greater than 1 length INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character varying(1) INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/varchar_1.out b/src/test/regress/expected/varchar_1.out index d726e4cc43..35f6180d48 100644 --- a/src/test/regress/expected/varchar_1.out +++ b/src/test/regress/expected/varchar_1.out @@ -4,13 +4,13 @@ CREATE TABLE VARCHAR_TBL(f1 varchar(1)); INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); INSERT INTO VARCHAR_TBL (f1) VALUES (2); INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO VARCHAR_TBL (f1) VALUES (''); --- try varchar's of greater than 1 length +-- try varchar's of greater than 1 length INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character varying(1) INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/varchar_2.out b/src/test/regress/expected/varchar_2.out index 79c4782462..49add1f621 100644 --- a/src/test/regress/expected/varchar_2.out +++ b/src/test/regress/expected/varchar_2.out @@ -4,13 +4,13 @@ CREATE TABLE VARCHAR_TBL(f1 varchar(1)); INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); INSERT INTO VARCHAR_TBL (f1) VALUES (2); INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO VARCHAR_TBL (f1) VALUES (''); --- try varchar's of greater than 1 length +-- try varchar's of greater than 1 length INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); ERROR: value too long for type character varying(1) INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out index 0481cc6dd8..aa0a0c2067 100644 --- a/src/test/regress/expected/window.out +++ b/src/test/regress/expected/window.out @@ -361,7 +361,7 @@ SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM te (10 rows) -- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window. -SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; +SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; last_value | ten | four ------------+-----+------ 0 | 0 | 0 @@ -409,7 +409,7 @@ SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four | 3 | 3 (10 rows) -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum FROM tenk1 GROUP BY ten, two; ten | two | gsum | wsum -----+-----+-------+-------- @@ -436,8 +436,8 @@ SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE t 2 | 3 (6 rows) -SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum +SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum FROM tenk1 WHERE unique2 < 10; cntsum -------- @@ -455,8 +455,8 @@ SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + -- opexpr with different windows evaluation. SELECT * FROM( - SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, + SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum FROM tenk1 @@ -481,7 +481,7 @@ SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHE 3.0000000000000000 (10 rows) -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten); ten | two | gsum | wsum -----+-----+-------+-------- diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source index 2d34c72b6c..ab3f5083e8 100644 --- a/src/test/regress/input/copy.source +++ b/src/test/regress/input/copy.source @@ -95,8 +95,8 @@ select * from copytest except select * from copytest2; -- test header line feature create temp table copytest3 ( - c1 int, - "col with , comma" text, + c1 int, + "col with , comma" text, "col with "" quote" int); copy copytest3 from stdin csv header; diff --git a/src/test/regress/input/create_function_2.source b/src/test/regress/input/create_function_2.source index 459ca3c0a3..6aed5f008b 100644 --- a/src/test/regress/input/create_function_2.source +++ b/src/test/regress/input/create_function_2.source @@ -2,7 +2,7 @@ -- CREATE_FUNCTION_2 -- CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r + RETURNS setof hobbies_r AS 'select * from hobbies_r where person = $1.name' LANGUAGE SQL; @@ -27,7 +27,7 @@ CREATE FUNCTION equipment(hobbies_r) CREATE FUNCTION user_relns() RETURNS setof name - AS 'select relname + AS 'select relname from pg_class c, pg_namespace n where relnamespace = n.oid and (nspname !~ ''pg_.*'' and nspname <> ''information_schema'') and diff --git a/src/test/regress/input/misc.source b/src/test/regress/input/misc.source index fa58b54564..0930a6a4bb 100644 --- a/src/test/regress/input/misc.source +++ b/src/test/regress/input/misc.source @@ -17,16 +17,16 @@ UPDATE onek -- UPDATE onek2 -- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 +--UPDATE onek2 -- SET unique1 = onek2.unique1 - 1; -- -- BTREE shutting out non-functional updates -- --- the following two tests seem to take a long time on some +-- the following two tests seem to take a long time on some -- systems. This non-func update stuff needs to be examined -- more closely. - jolly (2/22/96) --- +-- UPDATE tmp SET stringu1 = reverse_name(onek.stringu1) FROM onek @@ -87,12 +87,12 @@ SELECT * FROM stud_emp; -- SELECT * FROM a_star*; -SELECT * +SELECT * FROM b_star* x WHERE x.b = text 'bumble' or x.a < 3; -SELECT class, a - FROM c_star* x +SELECT class, a + FROM c_star* x WHERE x.c ~ text 'hi'; SELECT class, b, c @@ -137,7 +137,7 @@ SELECT class, foo ALTER TABLE a_star RENAME COLUMN foo TO aa; -SELECT * +SELECT * from a_star* WHERE aa < 1000; diff --git a/src/test/regress/output/copy.source b/src/test/regress/output/copy.source index 5a88d6ef20..febca712bb 100644 --- a/src/test/regress/output/copy.source +++ b/src/test/regress/output/copy.source @@ -63,8 +63,8 @@ select * from copytest except select * from copytest2; -- test header line feature create temp table copytest3 ( - c1 int, - "col with , comma" text, + c1 int, + "col with , comma" text, "col with "" quote" int); copy copytest3 from stdin csv header; copy copytest3 to stdout csv header; diff --git a/src/test/regress/output/create_function_2.source b/src/test/regress/output/create_function_2.source index 0feb975355..94ab7eba56 100644 --- a/src/test/regress/output/create_function_2.source +++ b/src/test/regress/output/create_function_2.source @@ -2,7 +2,7 @@ -- CREATE_FUNCTION_2 -- CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r + RETURNS setof hobbies_r AS 'select * from hobbies_r where person = $1.name' LANGUAGE SQL; CREATE FUNCTION hobby_construct(text, text) @@ -21,7 +21,7 @@ CREATE FUNCTION equipment(hobbies_r) LANGUAGE SQL; CREATE FUNCTION user_relns() RETURNS setof name - AS 'select relname + AS 'select relname from pg_class c, pg_namespace n where relnamespace = n.oid and (nspname !~ ''pg_.*'' and nspname <> ''information_schema'') and diff --git a/src/test/regress/output/misc.source b/src/test/regress/output/misc.source index 0effa4b853..c225d0f37f 100644 --- a/src/test/regress/output/misc.source +++ b/src/test/regress/output/misc.source @@ -13,15 +13,15 @@ UPDATE onek -- -- UPDATE onek2 -- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 +--UPDATE onek2 -- SET unique1 = onek2.unique1 - 1; -- -- BTREE shutting out non-functional updates -- --- the following two tests seem to take a long time on some +-- the following two tests seem to take a long time on some -- systems. This non-func update stuff needs to be examined -- more closely. - jolly (2/22/96) --- +-- UPDATE tmp SET stringu1 = reverse_name(onek.stringu1) FROM onek @@ -136,7 +136,7 @@ SELECT * FROM a_star*; f | (50 rows) -SELECT * +SELECT * FROM b_star* x WHERE x.b = text 'bumble' or x.a < 3; class | a | b @@ -144,8 +144,8 @@ SELECT * b | | bumble (1 row) -SELECT class, a - FROM c_star* x +SELECT class, a + FROM c_star* x WHERE x.c ~ text 'hi'; class | a -------+---- @@ -309,7 +309,7 @@ SELECT class, foo (25 rows) ALTER TABLE a_star RENAME COLUMN foo TO aa; -SELECT * +SELECT * from a_star* WHERE aa < 1000; class | aa diff --git a/src/test/regress/sql/abstime.sql b/src/test/regress/sql/abstime.sql index cbaeb62957..4ab821b1b8 100644 --- a/src/test/regress/sql/abstime.sql +++ b/src/test/regress/sql/abstime.sql @@ -6,7 +6,7 @@ -- -- timezones may vary based not only on location but the operating --- system. the main correctness issue is that the OS may not get +-- system. the main correctness issue is that the OS may not get -- daylight savings time right for times prior to Unix epoch (jan 1 1970). -- @@ -27,11 +27,11 @@ INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'infinity'); INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity'); INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12'); --- what happens if we specify slightly misformatted abstime? +-- what happens if we specify slightly misformatted abstime? INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'); INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'); --- badly formatted abstimes: these should result in invalid abstimes +-- badly formatted abstimes: these should result in invalid abstimes INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); INSERT INTO ABSTIME_TBL (f1) VALUES ('Jun 10, 1843'); diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index 3825d7b302..232649937a 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -99,7 +99,7 @@ CREATE TEMPORARY TABLE bitwise_test( ); -- empty case -SELECT +SELECT BIT_AND(i2) AS "?", BIT_OR(i4) AS "?" FROM bitwise_test; @@ -159,7 +159,7 @@ SELECT boolor_statefunc(FALSE, TRUE) AS "t", NOT boolor_statefunc(FALSE, FALSE) AS "t"; -CREATE TEMPORARY TABLE bool_test( +CREATE TEMPORARY TABLE bool_test( b1 BOOL, b2 BOOL, b3 BOOL, diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 3e1646a96d..c6015cbb40 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -62,8 +62,8 @@ ALTER TABLE tmp ADD COLUMN z int2[]; INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', @@ -73,7 +73,7 @@ SELECT * FROM tmp; DROP TABLE tmp; --- the wolf bug - schema mods caused inconsistent row descriptors +-- the wolf bug - schema mods caused inconsistent row descriptors CREATE TABLE tmp ( initial int4 ); @@ -131,8 +131,8 @@ ALTER TABLE tmp ADD COLUMN z int2[]; INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, v, w, x, y, z) - VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', @@ -176,7 +176,7 @@ ALTER TABLE tmp_view RENAME TO tmp_view_new; ANALYZE tenk1; set enable_seqscan to off; set enable_bitmapscan to off; --- 5 values, sorted +-- 5 values, sorted SELECT unique1 FROM tenk1 WHERE unique1 < 5; reset enable_seqscan; reset enable_bitmapscan; @@ -1053,7 +1053,7 @@ insert into anothertab (atcol1, atcol2) values (default, null); select * from anothertab; alter table anothertab alter column atcol2 type text - using case when atcol2 is true then 'IT WAS TRUE' + using case when atcol2 is true then 'IT WAS TRUE' when atcol2 is false then 'IT WAS FALSE' else 'IT WAS NULL!' end; diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql index a75b8c4d2d..b0c096d9e5 100644 --- a/src/test/regress/sql/arrays.sql +++ b/src/test/regress/sql/arrays.sql @@ -6,7 +6,7 @@ CREATE TABLE arrtest ( a int2[], b int4[][][], c name[], - d text[][], + d text[][], e float8[], f char(5)[], g varchar(5)[] @@ -27,7 +27,7 @@ INSERT INTO arrtest (f) VALUES ('{"too long"}'); INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g) - VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}', + VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}', '{{"elt1", "elt2"}}', '{"3.4", "6.7"}', '{"abc","abcde"}', '{"abc","abcde"}'); @@ -40,7 +40,7 @@ SELECT * FROM arrtest; SELECT arrtest.a[1], arrtest.b[1][1][1], arrtest.c[1], - arrtest.d[1][1], + arrtest.d[1][1], arrtest.e[0] FROM arrtest; @@ -49,7 +49,7 @@ SELECT a[1], b[1][1][1], c[1], d[1][1], e[0] SELECT a[1:3], b[1:1][1:2][1:2], - c[1:2], + c[1:2], d[1:1][1:2] FROM arrtest; @@ -59,10 +59,10 @@ SELECT array_ndims(a) AS a,array_ndims(b) AS b,array_ndims(c) AS c SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c FROM arrtest; --- returns nothing +-- returns nothing SELECT * FROM arrtest - WHERE a[1] < 5 and + WHERE a[1] < 5 and c = '{"foobar"}'::_name; UPDATE arrtest @@ -82,7 +82,7 @@ SELECT a,b,c FROM arrtest; SELECT a[1:3], b[1:1][1:2][1:2], - c[1:2], + c[1:2], d[1:1][2:2] FROM arrtest; @@ -346,12 +346,12 @@ drop type _comptype; drop table comptable; drop type comptype; -create or replace function unnest1(anyarray) +create or replace function unnest1(anyarray) returns setof anyelement as $$ select $1[s] from generate_subscripts($1,1) g(s); $$ language sql immutable; -create or replace function unnest2(anyarray) +create or replace function unnest2(anyarray) returns setof anyelement as $$ select $1[s1][s2] from generate_subscripts($1,1) g1(s1), generate_subscripts($1,2) g2(s2); diff --git a/src/test/regress/sql/bit.sql b/src/test/regress/sql/bit.sql index 73ddd379c0..419d47c8b7 100644 --- a/src/test/regress/sql/bit.sql +++ b/src/test/regress/sql/bit.sql @@ -16,7 +16,7 @@ INSERT INTO BIT_TABLE VALUES (B'101011111010'); -- too long --INSERT INTO BIT_TABLE VALUES ('X554'); --INSERT INTO BIT_TABLE VALUES ('X555'); -SELECT * FROM BIT_TABLE; +SELECT * FROM BIT_TABLE; CREATE TABLE VARBIT_TABLE(v BIT VARYING(11)); @@ -27,12 +27,12 @@ INSERT INTO VARBIT_TABLE VALUES (B'01010101010'); INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long --INSERT INTO VARBIT_TABLE VALUES ('X554'); --INSERT INTO VARBIT_TABLE VALUES ('X555'); -SELECT * FROM VARBIT_TABLE; +SELECT * FROM VARBIT_TABLE; -- Concatenation SELECT v, b, (v || b) AS concat - FROM BIT_TABLE, VARBIT_TABLE + FROM BIT_TABLE, VARBIT_TABLE ORDER BY 3; -- Length @@ -69,7 +69,7 @@ XFA50 X05AF X1234 XFFF5 \. -SELECT a, b, ~a AS "~ a", a & b AS "a & b", +SELECT a, b, ~a AS "~ a", a & b AS "a & b", a | b AS "a | b", a # b AS "a # b" FROM varbit_table; SELECT a,b,a=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM varbit_table; @@ -93,7 +93,7 @@ XFA50 X05AF X1234 XFFF5 \. -SELECT a,b,~a AS "~ a",a & b AS "a & b", +SELECT a,b,~a AS "~ a",a & b AS "a & b", a|b AS "a | b", a # b AS "a # b" FROM bit_table; SELECT a,b,a=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM bit_table; @@ -166,7 +166,7 @@ INSERT INTO BIT_SHIFT_TABLE SELECT b>>4 FROM BIT_SHIFT_TABLE; INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE; SELECT POSITION(B'1101' IN b), POSITION(B'11011' IN b), - b + b FROM BIT_SHIFT_TABLE ; @@ -178,7 +178,7 @@ INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0000' AS BIT VARYING(12)) >>4 INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE; SELECT POSITION(B'1101' IN v), POSITION(B'11011' IN v), - v + v FROM VARBIT_SHIFT_TABLE ; diff --git a/src/test/regress/sql/bitmapops.sql b/src/test/regress/sql/bitmapops.sql index 0b5477e8e1..498f4721b5 100644 --- a/src/test/regress/sql/bitmapops.sql +++ b/src/test/regress/sql/bitmapops.sql @@ -14,7 +14,7 @@ CREATE TABLE bmscantest (a int, b int, t text); -INSERT INTO bmscantest +INSERT INTO bmscantest SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,70000) r; diff --git a/src/test/regress/sql/boolean.sql b/src/test/regress/sql/boolean.sql index a605302e12..d92a41ffe1 100644 --- a/src/test/regress/sql/boolean.sql +++ b/src/test/regress/sql/boolean.sql @@ -100,7 +100,7 @@ INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True'); INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true'); --- BOOLTBL1 should be full of true's at this point +-- BOOLTBL1 should be full of true's at this point SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1; @@ -109,7 +109,7 @@ SELECT '' AS t_3, BOOLTBL1.* WHERE f1 = bool 'true'; -SELECT '' AS t_3, BOOLTBL1.* +SELECT '' AS t_3, BOOLTBL1.* FROM BOOLTBL1 WHERE f1 <> bool 'false'; @@ -119,7 +119,7 @@ SELECT '' AS zero, BOOLTBL1.* INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f'); -SELECT '' AS f_1, BOOLTBL1.* +SELECT '' AS f_1, BOOLTBL1.* FROM BOOLTBL1 WHERE f1 = bool 'false'; @@ -136,10 +136,10 @@ INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE'); -- This is now an invalid expression -- For pre-v6.3 this evaluated to false - thomas 1997-10-23 -INSERT INTO BOOLTBL2 (f1) - VALUES (bool 'XXX'); +INSERT INTO BOOLTBL2 (f1) + VALUES (bool 'XXX'); --- BOOLTBL2 should be full of false's at this point +-- BOOLTBL2 should be full of false's at this point SELECT '' AS f_4, BOOLTBL2.* FROM BOOLTBL2; diff --git a/src/test/regress/sql/box.sql b/src/test/regress/sql/box.sql index f1ff8158b0..234c2f28a3 100644 --- a/src/test/regress/sql/box.sql +++ b/src/test/regress/sql/box.sql @@ -25,13 +25,13 @@ INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)'); INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)'); --- degenerate cases where the box is a line or a point --- note that lines and points boxes all have zero area +-- degenerate cases where the box is a line or a point +-- note that lines and points boxes all have zero area INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)'); INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)'); --- badly formatted box inputs +-- badly formatted box inputs INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); @@ -42,78 +42,78 @@ SELECT '' AS four, * FROM BOX_TBL; SELECT '' AS four, b.*, area(b.f1) as barea FROM BOX_TBL b; --- overlap +-- overlap SELECT '' AS three, b.f1 - FROM BOX_TBL b + FROM BOX_TBL b WHERE b.f1 && box '(2.5,2.5,1.0,1.0)'; --- left-or-overlap (x only) +-- left-or-overlap (x only) SELECT '' AS two, b1.* FROM BOX_TBL b1 WHERE b1.f1 &< box '(2.0,2.0,2.5,2.5)'; --- right-or-overlap (x only) +-- right-or-overlap (x only) SELECT '' AS two, b1.* FROM BOX_TBL b1 WHERE b1.f1 &> box '(2.0,2.0,2.5,2.5)'; --- left of +-- left of SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 << box '(3.0,3.0,5.0,5.0)'; --- area <= +-- area <= SELECT '' AS four, b.f1 FROM BOX_TBL b WHERE b.f1 <= box '(3.0,3.0,5.0,5.0)'; --- area < +-- area < SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 < box '(3.0,3.0,5.0,5.0)'; --- area = +-- area = SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE b.f1 = box '(3.0,3.0,5.0,5.0)'; --- area > +-- area > SELECT '' AS two, b.f1 - FROM BOX_TBL b -- zero area - WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; + FROM BOX_TBL b -- zero area + WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; --- area >= +-- area >= SELECT '' AS four, b.f1 - FROM BOX_TBL b -- zero area + FROM BOX_TBL b -- zero area WHERE b.f1 >= box '(3.5,3.0,4.5,3.0)'; --- right of +-- right of SELECT '' AS two, b.f1 FROM BOX_TBL b WHERE box '(3.0,3.0,5.0,5.0)' >> b.f1; --- contained in +-- contained in SELECT '' AS three, b.f1 FROM BOX_TBL b WHERE b.f1 <@ box '(0,0,3,3)'; --- contains +-- contains SELECT '' AS three, b.f1 FROM BOX_TBL b WHERE box '(0,0,3,3)' @> b.f1; --- box equality +-- box equality SELECT '' AS one, b.f1 FROM BOX_TBL b WHERE box '(1,1,3,3)' ~= b.f1; --- center of box, left unary operator +-- center of box, left unary operator SELECT '' AS four, @@(b1.f1) AS p FROM BOX_TBL b1; --- wholly-contained +-- wholly-contained SELECT '' AS one, b1.*, b2.* - FROM BOX_TBL b1, BOX_TBL b2 + FROM BOX_TBL b1, BOX_TBL b2 WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1; SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL; diff --git a/src/test/regress/sql/char.sql b/src/test/regress/sql/char.sql index fcaef7e086..235ec62823 100644 --- a/src/test/regress/sql/char.sql +++ b/src/test/regress/sql/char.sql @@ -17,17 +17,17 @@ INSERT INTO CHAR_TBL (f1) VALUES ('a'); INSERT INTO CHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO CHAR_TBL (f1) VALUES ('1'); INSERT INTO CHAR_TBL (f1) VALUES (2); INSERT INTO CHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO CHAR_TBL (f1) VALUES (''); --- try char's of greater than 1 length +-- try char's of greater than 1 length INSERT INTO CHAR_TBL (f1) VALUES ('cd'); INSERT INTO CHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/sql/cluster.sql b/src/test/regress/sql/cluster.sql index 3dea2e4b29..8d536c8c36 100644 --- a/src/test/regress/sql/cluster.sql +++ b/src/test/regress/sql/cluster.sql @@ -174,7 +174,7 @@ UPDATE clustertest SET key = 100 WHERE key = 10; -- Test update where the new row version is found first in the scan UPDATE clustertest SET key = 35 WHERE key = 40; --- Test longer update chain +-- Test longer update chain UPDATE clustertest SET key = 60 WHERE key = 50; UPDATE clustertest SET key = 70 WHERE key = 60; UPDATE clustertest SET key = 80 WHERE key = 70; diff --git a/src/test/regress/sql/copyselect.sql b/src/test/regress/sql/copyselect.sql index beca507ae7..621d49444d 100644 --- a/src/test/regress/sql/copyselect.sql +++ b/src/test/regress/sql/copyselect.sql @@ -70,7 +70,7 @@ copy (select t from test1 where id = 1) to stdout csv header force quote t; -- This should fail -- \copy v_test1 to stdout --- +-- -- Test \copy (select ...) -- \copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout diff --git a/src/test/regress/sql/create_aggregate.sql b/src/test/regress/sql/create_aggregate.sql index 6174248252..84f9a4f1e0 100644 --- a/src/test/regress/sql/create_aggregate.sql +++ b/src/test/regress/sql/create_aggregate.sql @@ -4,7 +4,7 @@ -- all functions CREATEd CREATE AGGREGATE newavg ( - sfunc = int4_avg_accum, basetype = int4, stype = _int8, + sfunc = int4_avg_accum, basetype = int4, stype = _int8, finalfunc = int8_avg, initcond1 = '{0,0}' ); @@ -16,7 +16,7 @@ COMMENT ON AGGREGATE newavg (int4) IS NULL; -- without finalfunc; test obsolete spellings 'sfunc1' etc CREATE AGGREGATE newsum ( - sfunc1 = int4pl, basetype = int4, stype1 = int4, + sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond1 = '0' ); diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql index a4261c0f5e..abf222de8e 100644 --- a/src/test/regress/sql/create_index.sql +++ b/src/test/regress/sql/create_index.sql @@ -80,8 +80,8 @@ CREATE INDEX gpointind ON point_tbl USING gist (f1); CREATE TEMP TABLE gpolygon_tbl AS SELECT polygon(home_base) AS f1 FROM slow_emp4000; -INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); -INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); CREATE TEMP TABLE gcircle_tbl AS SELECT circle(home_base) AS f1 FROM slow_emp4000; @@ -424,5 +424,5 @@ SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NUL RESET enable_seqscan; RESET enable_indexscan; RESET enable_bitmapscan; - + DROP TABLE onek_with_null; diff --git a/src/test/regress/sql/create_misc.sql b/src/test/regress/sql/create_misc.sql index 40c9b417d9..705a7e55b1 100644 --- a/src/test/regress/sql/create_misc.sql +++ b/src/test/regress/sql/create_misc.sql @@ -42,14 +42,14 @@ SELECT * FROM road WHERE name ~ '.*Ramp'; -INSERT INTO ihighway - SELECT * - FROM road +INSERT INTO ihighway + SELECT * + FROM road WHERE name ~ 'I- .*'; -INSERT INTO shighway - SELECT * - FROM road +INSERT INTO shighway + SELECT * + FROM road WHERE name ~ 'State Hwy.*'; UPDATE shighway @@ -154,7 +154,7 @@ INSERT INTO f_star (class, a, e, f) VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon); INSERT INTO f_star (class, c, e, f) - VALUES ('f', 'hi keith'::name, '-8'::int2, + VALUES ('f', 'hi keith'::name, '-8'::int2, '(1111,3333),(2222,4444)'::polygon); INSERT INTO f_star (class, a, c) @@ -164,7 +164,7 @@ INSERT INTO f_star (class, a, e) VALUES ('f', 25, '-9'::int2); INSERT INTO f_star (class, a, f) - VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon); + VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon); INSERT INTO f_star (class, c, e) VALUES ('f', 'hi allison'::name, '-10'::int2); @@ -182,7 +182,7 @@ INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name); INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2); -INSERT INTO f_star (class, f) +INSERT INTO f_star (class, f) VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon); INSERT INTO f_star (class) VALUES ('f'); @@ -192,8 +192,8 @@ INSERT INTO f_star (class) VALUES ('f'); -- for internal portal (cursor) tests -- CREATE TABLE iportaltest ( - i int4, - d float4, + i int4, + d float4, p polygon ); diff --git a/src/test/regress/sql/create_operator.sql b/src/test/regress/sql/create_operator.sql index 5ce8128a24..dcad804eec 100644 --- a/src/test/regress/sql/create_operator.sql +++ b/src/test/regress/sql/create_operator.sql @@ -2,11 +2,11 @@ -- CREATE_OPERATOR -- -CREATE OPERATOR ## ( +CREATE OPERATOR ## ( leftarg = path, rightarg = path, procedure = path_inter, - commutator = ## + commutator = ## ); CREATE OPERATOR <% ( @@ -14,12 +14,12 @@ CREATE OPERATOR <% ( rightarg = widget, procedure = pt_in_widget, commutator = >% , - negator = >=% + negator = >=% ); CREATE OPERATOR @#@ ( - rightarg = int8, -- left unary - procedure = numeric_fac + rightarg = int8, -- left unary + procedure = numeric_fac ); CREATE OPERATOR #@# ( @@ -27,9 +27,9 @@ CREATE OPERATOR #@# ( procedure = numeric_fac ); -CREATE OPERATOR #%# ( - leftarg = int8, -- right unary - procedure = numeric_fac +CREATE OPERATOR #%# ( + leftarg = int8, -- right unary + procedure = numeric_fac ); -- Test comments diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql index f491e8c142..e622b1f0f5 100644 --- a/src/test/regress/sql/create_table.sql +++ b/src/test/regress/sql/create_table.sql @@ -6,7 +6,7 @@ -- CLASS DEFINITIONS -- CREATE TABLE hobbies_r ( - name text, + name text, person text ); @@ -143,7 +143,7 @@ CREATE TABLE real_city ( -- f inherits from e (three-level single inheritance) -- CREATE TABLE a_star ( - class char, + class char, a int4 ); @@ -194,7 +194,7 @@ CREATE TABLE hash_f8_heap ( -- don't include the hash_ovfl_heap stuff in the distribution -- the data set is too large for what it's worth --- +-- -- CREATE TABLE hash_ovfl_heap ( -- x int4, -- y int4 @@ -216,7 +216,7 @@ CREATE TABLE bt_txt_heap ( ); CREATE TABLE bt_f8_heap ( - seqno float8, + seqno float8, random int4 ); @@ -232,11 +232,11 @@ CREATE TABLE array_index_op_test ( t text[] ); -CREATE TABLE IF NOT EXISTS test_tsvector( - t text, - a tsvector +CREATE TABLE IF NOT EXISTS test_tsvector( + t text, + a tsvector ); -CREATE TABLE IF NOT EXISTS test_tsvector( +CREATE TABLE IF NOT EXISTS test_tsvector( t text ); diff --git a/src/test/regress/sql/create_type.sql b/src/test/regress/sql/create_type.sql index c667313c71..a4906b64e1 100644 --- a/src/test/regress/sql/create_type.sql +++ b/src/test/regress/sql/create_type.sql @@ -8,7 +8,7 @@ -- of the "old style" approach of making the functions first. -- CREATE TYPE widget ( - internallength = 24, + internallength = 24, input = widget_in, output = widget_out, typmod_in = numerictypmodin, @@ -16,10 +16,10 @@ CREATE TYPE widget ( alignment = double ); -CREATE TYPE city_budget ( - internallength = 16, - input = int44in, - output = int44out, +CREATE TYPE city_budget ( + internallength = 16, + input = int44in, + output = int44out, element = int4, category = 'x', -- just to verify the system will take it preferred = true -- ditto diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql index f8942c93f5..86cfc5162c 100644 --- a/src/test/regress/sql/create_view.sql +++ b/src/test/regress/sql/create_view.sql @@ -5,12 +5,12 @@ -- CREATE VIEW street AS - SELECT r.name, r.thepath, c.cname AS cname + SELECT r.name, r.thepath, c.cname AS cname FROM ONLY road r, real_city c WHERE c.outline ## r.thepath; CREATE VIEW iexit AS - SELECT ih.name, ih.thepath, + SELECT ih.name, ih.thepath, interpt_pp(ih.thepath, r.thepath) AS exit FROM ihighway ih, ramp r WHERE ih.thepath ## r.thepath; @@ -61,7 +61,7 @@ CREATE OR REPLACE VIEW viewtest AS CREATE OR REPLACE VIEW viewtest AS SELECT a, b::numeric FROM viewtest_tbl; --- should work +-- should work CREATE OR REPLACE VIEW viewtest AS SELECT a, b, 0 AS c FROM viewtest_tbl; @@ -135,11 +135,11 @@ CREATE VIEW v9 AS SELECT seq1.is_called FROM seq1; CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp; SELECT relname FROM pg_class - WHERE relname LIKE 'v_' + WHERE relname LIKE 'v_' AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'temp_view_test') ORDER BY relname; SELECT relname FROM pg_class - WHERE relname LIKE 'v%' + WHERE relname LIKE 'v%' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') ORDER BY relname; @@ -164,7 +164,7 @@ SELECT relname FROM pg_class AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'testviewschm2') ORDER BY relname; SELECT relname FROM pg_class - WHERE relname LIKE 'temporal%' + WHERE relname LIKE 'temporal%' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') ORDER BY relname; diff --git a/src/test/regress/sql/drop.sql b/src/test/regress/sql/drop.sql index 5df1b6adce..da9297d8b6 100644 --- a/src/test/regress/sql/drop.sql +++ b/src/test/regress/sql/drop.sql @@ -5,7 +5,7 @@ -- -- this will fail if the user is not the postgres superuser. -- if it does, don't worry about it (you can turn usersuper --- back on as "postgres"). too many people don't follow +-- back on as "postgres"). too many people don't follow -- directions and run this as "postgres", though... -- UPDATE pg_user @@ -47,14 +47,14 @@ DROP OPERATOR ## (path, path); DROP OPERATOR <% (point, widget); --- left unary +-- left unary DROP OPERATOR @#@ (none, int4); --- right unary -DROP OPERATOR #@# (int4, none); +-- right unary +DROP OPERATOR #@# (int4, none); --- right unary -DROP OPERATOR #%# (int4, none); +-- right unary +DROP OPERATOR #%# (int4, none); -- diff --git a/src/test/regress/sql/drop_if_exists.sql b/src/test/regress/sql/drop_if_exists.sql index 62b3f579a5..3d7e46cf2e 100644 --- a/src/test/regress/sql/drop_if_exists.sql +++ b/src/test/regress/sql/drop_if_exists.sql @@ -1,6 +1,6 @@ --- +-- -- IF EXISTS tests --- +-- -- table (will be really dropped at the end) diff --git a/src/test/regress/sql/errors.sql b/src/test/regress/sql/errors.sql index cf63474160..2ee707c5c7 100644 --- a/src/test/regress/sql/errors.sql +++ b/src/test/regress/sql/errors.sql @@ -8,18 +8,18 @@ select 1; -- -- UNSUPPORTED STUFF - --- doesn't work + +-- doesn't work -- notify pg_class -- -- -- SELECT - --- missing relation name + +-- missing relation name select; --- no such relation +-- no such relation select * from nonesuch; -- missing target list @@ -43,74 +43,74 @@ select distinct on (foobar) * from pg_database; -- -- DELETE - --- missing relation name (this had better not wildcard!) + +-- missing relation name (this had better not wildcard!) delete from; --- no such relation +-- no such relation delete from nonesuch; -- -- DROP - --- missing relation name (this had better not wildcard!) + +-- missing relation name (this had better not wildcard!) drop table; --- no such relation +-- no such relation drop table nonesuch; -- -- ALTER TABLE - --- relation renaming --- missing relation name +-- relation renaming + +-- missing relation name alter table rename; --- no such relation +-- no such relation alter table nonesuch rename to newnonesuch; --- no such relation +-- no such relation alter table nonesuch rename to stud_emp; --- conflict +-- conflict alter table stud_emp rename to aggtest; --- self-conflict +-- self-conflict alter table stud_emp rename to stud_emp; --- attribute renaming +-- attribute renaming --- no such relation +-- no such relation alter table nonesuchrel rename column nonesuchatt to newnonesuchatt; --- no such attribute +-- no such attribute alter table emp rename column nonesuchatt to newnonesuchatt; --- conflict +-- conflict alter table emp rename column salary to manager; --- conflict +-- conflict alter table emp rename column salary to oid; -- -- TRANSACTION STUFF - --- not in a xact + +-- not in a xact abort; --- not in a xact +-- not in a xact end; -- -- CREATE AGGREGATE --- sfunc/finalfunc type disagreement +-- sfunc/finalfunc type disagreement create aggregate newavg2 (sfunc = int4pl, basetype = int4, stype = int4, @@ -125,33 +125,33 @@ create aggregate newcnt1 (sfunc = int4inc, -- -- DROP INDEX - --- missing index name + +-- missing index name drop index; --- bad index name +-- bad index name drop index 314159; --- no such index +-- no such index drop index nonesuch; -- -- DROP AGGREGATE - --- missing aggregate name + +-- missing aggregate name drop aggregate; -- missing aggregate type drop aggregate newcnt1; --- bad aggregate name +-- bad aggregate name drop aggregate 314159 (int); -- bad aggregate type drop aggregate newcnt (nonesuch); --- no such aggregate +-- no such aggregate drop aggregate nonesuch (int4); -- no such aggregate for type @@ -160,83 +160,83 @@ drop aggregate newcnt (float4); -- -- DROP FUNCTION - --- missing function name + +-- missing function name drop function (); --- bad function name +-- bad function name drop function 314159(); --- no such function +-- no such function drop function nonesuch(); -- -- DROP TYPE - --- missing type name + +-- missing type name drop type; --- bad type name +-- bad type name drop type 314159; --- no such type +-- no such type drop type nonesuch; -- -- DROP OPERATOR - --- missing everything + +-- missing everything drop operator; --- bad operator name +-- bad operator name drop operator equals; --- missing type list +-- missing type list drop operator ===; --- missing parentheses +-- missing parentheses drop operator int4, int4; --- missing operator name +-- missing operator name drop operator (int4, int4); --- missing type list contents +-- missing type list contents drop operator === (); --- no such operator +-- no such operator drop operator === (int4); --- no such operator by that name +-- no such operator by that name drop operator === (int4, int4); --- no such type1 +-- no such type1 drop operator = (nonesuch); --- no such type1 +-- no such type1 drop operator = ( , int4); --- no such type1 +-- no such type1 drop operator = (nonesuch, int4); --- no such type2 +-- no such type2 drop operator = (int4, nonesuch); --- no such type2 +-- no such type2 drop operator = (int4, ); -- -- DROP RULE - --- missing rule name + +-- missing rule name drop rule; --- bad rule name +-- bad rule name drop rule 314159; --- no such rule +-- no such rule drop rule nonesuch on noplace; -- these postquel variants are no longer supported @@ -289,7 +289,7 @@ INSERT INTO foo VALUES(123) foo; INSERT INTO 123 VALUES(123); -INSERT INTO foo +INSERT INTO foo VALUES(123) 123 ; @@ -300,7 +300,7 @@ CREATE TABLE foo id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); -- long line to be truncated on the left -CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); -- long line to be truncated on the right @@ -313,59 +313,59 @@ CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INT -- long line to be truncated on the left, many lines CREATE TEMPORARY -TABLE -foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, -id4 INT4 -UNIQUE -NOT -NULL, -id5 TEXT -UNIQUE -NOT +TABLE +foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 +UNIQUE +NOT +NULL, +id5 TEXT +UNIQUE +NOT NULL) ; -- long line to be truncated on the right, many lines -CREATE +CREATE TEMPORARY -TABLE +TABLE foo( id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) ; -- long line to be truncated both ways, many lines -CREATE +CREATE TEMPORARY -TABLE +TABLE foo -(id -INT4 -UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, -idz INT4 UNIQUE NOT NULL, +(id +INT4 +UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, idv INT4 UNIQUE NOT NULL); -- more than 10 lines... -CREATE +CREATE TEMPORARY -TABLE +TABLE foo -(id -INT4 -UNIQUE -NOT +(id +INT4 +UNIQUE +NOT NULL -, +, idm -INT4 -UNIQUE -NOT +INT4 +UNIQUE +NOT NULL, -idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, -idz INT4 UNIQUE NOT NULL, -idv -INT4 -UNIQUE -NOT +idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv +INT4 +UNIQUE +NOT NULL); -- Check that stack depth detection mechanism works and diff --git a/src/test/regress/sql/float4.sql b/src/test/regress/sql/float4.sql index 4ce6d9ea6e..3b363f9463 100644 --- a/src/test/regress/sql/float4.sql +++ b/src/test/regress/sql/float4.sql @@ -10,7 +10,7 @@ INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); --- test for over and under flow +-- test for over and under flow INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); @@ -73,7 +73,7 @@ SELECT '' AS bad, f.f1 / '0.0' from FLOAT4_TBL f; SELECT '' AS five, * FROM FLOAT4_TBL; --- test the unary float4abs operator +-- test the unary float4abs operator SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; UPDATE FLOAT4_TBL diff --git a/src/test/regress/sql/float8.sql b/src/test/regress/sql/float8.sql index 40f488d959..92a574ab7b 100644 --- a/src/test/regress/sql/float8.sql +++ b/src/test/regress/sql/float8.sql @@ -56,7 +56,7 @@ SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1; SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; -SELECT '' AS three, f.f1, f.f1 * '-10' AS x +SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; @@ -75,15 +75,15 @@ SELECT '' AS three, f.f1, f.f1 - '-10' AS x SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 FROM FLOAT8_TBL f where f.f1 = '1004.3'; --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 +-- absolute value +SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT8_TBL f; --- truncate +-- truncate SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 FROM FLOAT8_TBL f; --- round +-- round SELECT '' AS five, f.f1, round(f.f1) AS round_f1 FROM FLOAT8_TBL f; @@ -97,7 +97,7 @@ select floor(f1) as floor_f1 from float8_tbl f; -- sign select sign(f1) as sign_f1 from float8_tbl f; --- square root +-- square root SELECT sqrt(float8 '64') AS eight; SELECT |/ float8 '64' AS eight; @@ -109,12 +109,12 @@ SELECT '' AS three, f.f1, |/f.f1 AS sqrt_f1 -- power SELECT power(float8 '144', float8 '0.5'); --- take exp of ln(f.f1) +-- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f WHERE f.f1 > '0.0'; --- cube root +-- cube root SELECT ||/ float8 '27' AS three; SELECT '' AS five, f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f; @@ -142,7 +142,7 @@ SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; SELECT '' AS five, * FROM FLOAT8_TBL; --- test for over- and underflow +-- test for over- and underflow INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql index cc7b23f113..6d7bdbe77a 100644 --- a/src/test/regress/sql/foreign_key.sql +++ b/src/test/regress/sql/foreign_key.sql @@ -47,7 +47,7 @@ DROP TABLE PKTABLE; -- check set NULL and table constraint on multiple columns -- CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL); -- Test comments @@ -110,7 +110,7 @@ DROP TABLE FKTABLE; -- check set default and table constraint on multiple columns -- CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); -CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) +CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT); -- Insert a value in PKTABLE for default @@ -228,7 +228,7 @@ INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -273,7 +273,7 @@ INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -325,8 +325,8 @@ INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); @@ -379,8 +379,8 @@ INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5'); -- Insert Foreign Key values -INSERT INTO FKTABLE VALUES (1, 2, 3, 1); -INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); INSERT INTO FKTABLE VALUES (2, 4, 5, 1); INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); @@ -442,7 +442,7 @@ DROP TABLE PKTABLE; -- -- Tests for mismatched types -- --- Basic one column, two table setup +-- Basic one column, two table setup CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); INSERT INTO PKTABLE VALUES(42); -- This next should fail, because int=inet does not exist @@ -502,7 +502,7 @@ DROP TABLE PKTABLE; CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, ptest4) REFERENCES pktable(ptest1, ptest2)); DROP TABLE PKTABLE; --- And this, +-- And this, CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, ptest4) REFERENCES pktable); DROP TABLE PKTABLE; diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql index 13ef74a1cb..411e8aed39 100644 --- a/src/test/regress/sql/hash_index.sql +++ b/src/test/regress/sql/hash_index.sql @@ -80,15 +80,15 @@ SELECT h.seqno AS i1492, h.random AS i1 FROM hash_i4_heap h WHERE h.random = 1; -UPDATE hash_i4_heap - SET seqno = 20000 +UPDATE hash_i4_heap + SET seqno = 20000 WHERE hash_i4_heap.random = 1492795354; -SELECT h.seqno AS i20000 +SELECT h.seqno AS i20000 FROM hash_i4_heap h WHERE h.random = 1492795354; -UPDATE hash_name_heap +UPDATE hash_name_heap SET random = '0123456789abcdef'::name WHERE hash_name_heap.seqno = 6543; @@ -101,13 +101,13 @@ UPDATE hash_name_heap WHERE hash_name_heap.random = '76652222'::name; -- --- this is the row we just replaced; index scan should return zero rows +-- this is the row we just replaced; index scan should return zero rows -- SELECT h.seqno AS emptyset FROM hash_name_heap h WHERE h.random = '76652222'::name; -UPDATE hash_txt_heap +UPDATE hash_txt_heap SET random = '0123456789abcdefghijklmnop'::text WHERE hash_txt_heap.seqno = 4002; @@ -127,11 +127,11 @@ UPDATE hash_f8_heap SET random = '-1234.1234'::float8 WHERE hash_f8_heap.seqno = 8906; -SELECT h.seqno AS i8096, h.random AS f1234_1234 +SELECT h.seqno AS i8096, h.random AS f1234_1234 FROM hash_f8_heap h WHERE h.random = '-1234.1234'::float8; -UPDATE hash_f8_heap +UPDATE hash_f8_heap SET seqno = 20000 WHERE hash_f8_heap.random = '488912369'::float8; diff --git a/src/test/regress/sql/hs_primary_extremes.sql b/src/test/regress/sql/hs_primary_extremes.sql index 2796bc3818..629efb4be5 100644 --- a/src/test/regress/sql/hs_primary_extremes.sql +++ b/src/test/regress/sql/hs_primary_extremes.sql @@ -8,8 +8,8 @@ drop table if exists hs_extreme; create table hs_extreme (col1 integer); CREATE OR REPLACE FUNCTION hs_subxids (n integer) -RETURNS void -LANGUAGE plpgsql +RETURNS void +LANGUAGE plpgsql AS $$ BEGIN IF n <= 0 THEN RETURN; END IF; @@ -29,13 +29,13 @@ COMMIT; set client_min_messages = 'warning'; CREATE OR REPLACE FUNCTION hs_locks_create (n integer) -RETURNS void -LANGUAGE plpgsql +RETURNS void +LANGUAGE plpgsql AS $$ BEGIN IF n <= 0 THEN CHECKPOINT; - RETURN; + RETURN; END IF; EXECUTE 'CREATE TABLE hs_locks_' || n::text || ' ()'; PERFORM hs_locks_create(n - 1); @@ -44,13 +44,13 @@ AS $$ $$; CREATE OR REPLACE FUNCTION hs_locks_drop (n integer) -RETURNS void -LANGUAGE plpgsql +RETURNS void +LANGUAGE plpgsql AS $$ BEGIN IF n <= 0 THEN CHECKPOINT; - RETURN; + RETURN; END IF; EXECUTE 'DROP TABLE IF EXISTS hs_locks_' || n::text; PERFORM hs_locks_drop(n - 1); diff --git a/src/test/regress/sql/inet.sql b/src/test/regress/sql/inet.sql index f88a17eabc..328f14907b 100644 --- a/src/test/regress/sql/inet.sql +++ b/src/test/regress/sql/inet.sql @@ -49,7 +49,7 @@ SELECT '' AS six, c AS cidr, i AS inet FROM INET_TBL WHERE c = i; SELECT '' AS ten, i, c, - i < c AS lt, i <= c AS le, i = c AS eq, + i < c AS lt, i <= c AS le, i = c AS eq, i >= c AS ge, i > c AS gt, i <> c AS ne, i << c AS sb, i <<= c AS sbe, i >> c AS sup, i >>= c AS spe diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql index e87cf66110..3087a14b72 100644 --- a/src/test/regress/sql/inherit.sql +++ b/src/test/regress/sql/inherit.sql @@ -140,7 +140,7 @@ CREATE TABLE inhx (xx text DEFAULT 'text'); * Test double inheritance * * Ensure that defaults are NOT included unless - * INCLUDING DEFAULTS is specified + * INCLUDING DEFAULTS is specified */ CREATE TABLE inhe (ee text, LIKE inhx) inherits (b); INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); diff --git a/src/test/regress/sql/int2.sql b/src/test/regress/sql/int2.sql index 351f68a84e..f11eb283c6 100644 --- a/src/test/regress/sql/int2.sql +++ b/src/test/regress/sql/int2.sql @@ -53,10 +53,10 @@ SELECT '' AS three, i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0'; SELECT '' AS three, i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0'; --- positive odds +-- positive odds SELECT '' AS one, i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1'; --- any evens +-- any evens SELECT '' AS three, i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0'; SELECT '' AS five, i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i; diff --git a/src/test/regress/sql/int8.sql b/src/test/regress/sql/int8.sql index 597a9a2deb..27e0696b32 100644 --- a/src/test/regress/sql/int8.sql +++ b/src/test/regress/sql/int8.sql @@ -96,27 +96,27 @@ SELECT max(q1), max(q2) FROM INT8_TBL; -- TO_CHAR() -- -SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') +SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') FROM INT8_TBL; -SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') - FROM INT8_TBL; - -SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') +SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') FROM INT8_TBL; -SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') +SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') FROM INT8_TBL; -SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') + FROM INT8_TBL; + +SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; SELECT '' AS to_char_6, to_char(q2, 'FMS9999999999999999') FROM INT8_TBL; SELECT '' AS to_char_7, to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL; -SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; -SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; -SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; -SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; +SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; +SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; SELECT '' AS to_char_12, to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL; -SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; +SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; SELECT '' AS to_char_14, to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL; SELECT '' AS to_char_15, to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL; SELECT '' AS to_char_16, to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL; diff --git a/src/test/regress/sql/interval.sql b/src/test/regress/sql/interval.sql index f342a18af2..9da2dc63e8 100644 --- a/src/test/regress/sql/interval.sql +++ b/src/test/regress/sql/interval.sql @@ -48,7 +48,7 @@ SELECT '' AS three, * FROM INTERVAL_TBL SELECT '' AS one, * FROM INTERVAL_TBL WHERE INTERVAL_TBL.f1 = interval '@ 34 years'; -SELECT '' AS five, * FROM INTERVAL_TBL +SELECT '' AS five, * FROM INTERVAL_TBL WHERE INTERVAL_TBL.f1 >= interval '@ 1 month'; SELECT '' AS nine, * FROM INTERVAL_TBL @@ -61,11 +61,11 @@ SELECT '' AS fortyfive, r1.*, r2.* -- Test multiplication and division with intervals. --- Floating point arithmetic rounding errors can lead to unexpected results, --- though the code attempts to do the right thing and round up to days and --- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'. --- Note that it is expected for some day components to be greater than 29 and --- some time components be greater than 23:59:59 due to how intervals are +-- Floating point arithmetic rounding errors can lead to unexpected results, +-- though the code attempts to do the right thing and round up to days and +-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'. +-- Note that it is expected for some day components to be greater than 29 and +-- some time components be greater than 23:59:59 due to how intervals are -- stored internally. CREATE TABLE INTERVAL_MULDIV_TBL (span interval); @@ -249,7 +249,7 @@ select interval 'P0002' AS "year only", SET IntervalStyle to postgres_verbose; select interval '-10 mons -3 days +03:55:06.70'; select interval '1 year 2 mons 3 days 04:05:06.699999'; -select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; +select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; -- check that '30 days' equals '1 month' according to the hash function select '30 days'::interval = '1 month'::interval as t; diff --git a/src/test/regress/sql/limit.sql b/src/test/regress/sql/limit.sql index 3004550b65..fb86b6f6f7 100644 --- a/src/test/regress/sql/limit.sql +++ b/src/test/regress/sql/limit.sql @@ -3,31 +3,31 @@ -- Check the LIMIT/OFFSET feature of SELECT -- -SELECT ''::text AS two, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 50 +SELECT ''::text AS two, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 50 ORDER BY unique1 LIMIT 2; -SELECT ''::text AS five, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 60 +SELECT ''::text AS five, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 60 ORDER BY unique1 LIMIT 5; -SELECT ''::text AS two, unique1, unique2, stringu1 +SELECT ''::text AS two, unique1, unique2, stringu1 FROM onek WHERE unique1 > 60 AND unique1 < 63 ORDER BY unique1 LIMIT 5; -SELECT ''::text AS three, unique1, unique2, stringu1 - FROM onek WHERE unique1 > 100 +SELECT ''::text AS three, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 100 ORDER BY unique1 LIMIT 3 OFFSET 20; -SELECT ''::text AS zero, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 +SELECT ''::text AS zero, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 ORDER BY unique1 DESC LIMIT 8 OFFSET 99; -SELECT ''::text AS eleven, unique1, unique2, stringu1 - FROM onek WHERE unique1 < 50 +SELECT ''::text AS eleven, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 ORDER BY unique1 DESC LIMIT 20 OFFSET 39; -SELECT ''::text AS ten, unique1, unique2, stringu1 +SELECT ''::text AS ten, unique1, unique2, stringu1 FROM onek ORDER BY unique1 OFFSET 990; -SELECT ''::text AS five, unique1, unique2, stringu1 +SELECT ''::text AS five, unique1, unique2, stringu1 FROM onek ORDER BY unique1 OFFSET 990 LIMIT 5; -SELECT ''::text AS five, unique1, unique2, stringu1 +SELECT ''::text AS five, unique1, unique2, stringu1 FROM onek ORDER BY unique1 LIMIT 5 OFFSET 900; diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql index 8814bba486..a1435ec85e 100644 --- a/src/test/regress/sql/numeric.sql +++ b/src/test/regress/sql/numeric.sql @@ -732,7 +732,7 @@ DROP TABLE width_bucket_test; -- TO_CHAR() -- -SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') +SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') FROM num_data; SELECT '' AS to_char_2, to_char(val, '9G999G999G999G999G999D999G999G999G999G999') @@ -744,18 +744,18 @@ SELECT '' AS to_char_3, to_char(val, '9999999999999999.999999999999999PR') SELECT '' AS to_char_4, to_char(val, '9999999999999999.999999999999999S') FROM num_data; -SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_5, to_char(val, 'MI9999999999999999.999999999999999') FROM num_data; SELECT '' AS to_char_6, to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data; SELECT '' AS to_char_7, to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data; -SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data; -SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data; -SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data; -SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_8, to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data; +SELECT '' AS to_char_9, to_char(val, '0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_10, to_char(val, 'S0999999999999999.999999999999999') FROM num_data; +SELECT '' AS to_char_11, to_char(val, 'FM0999999999999999.999999999999999') FROM num_data; SELECT '' AS to_char_12, to_char(val, 'FM9999999999999999.099999999999999') FROM num_data; SELECT '' AS to_char_13, to_char(val, 'FM9999999999990999.990999999999999') FROM num_data; SELECT '' AS to_char_14, to_char(val, 'FM0999999999999999.999909999999999') FROM num_data; SELECT '' AS to_char_15, to_char(val, 'FM9999999990999999.099999999999999') FROM num_data; -SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data; +SELECT '' AS to_char_16, to_char(val, 'L9999999999999999.099999999999999') FROM num_data; SELECT '' AS to_char_17, to_char(val, 'FM9999999999999999.99999999999999') FROM num_data; SELECT '' AS to_char_18, to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data; SELECT '' AS to_char_19, to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data; diff --git a/src/test/regress/sql/oid.sql b/src/test/regress/sql/oid.sql index 1bdb127a4a..4a096891f5 100644 --- a/src/test/regress/sql/oid.sql +++ b/src/test/regress/sql/oid.sql @@ -14,7 +14,7 @@ INSERT INTO OID_TBL(f1) VALUES (' 10 '); -- leading/trailing hard tab is also allowed INSERT INTO OID_TBL(f1) VALUES (' 15 '); --- bad inputs +-- bad inputs INSERT INTO OID_TBL(f1) VALUES (''); INSERT INTO OID_TBL(f1) VALUES (' '); INSERT INTO OID_TBL(f1) VALUES ('asdfasd'); diff --git a/src/test/regress/sql/oidjoins.sql b/src/test/regress/sql/oidjoins.sql index 2f112fe489..995271b690 100644 --- a/src/test/regress/sql/oidjoins.sql +++ b/src/test/regress/sql/oidjoins.sql @@ -1,479 +1,479 @@ -- -- This is created by pgsql/src/tools/findoidjoins/make_oidjoins_check -- -SELECT ctid, aggfnoid -FROM pg_catalog.pg_aggregate fk -WHERE aggfnoid != 0 AND +SELECT ctid, aggfnoid +FROM pg_catalog.pg_aggregate fk +WHERE aggfnoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfnoid); -SELECT ctid, aggtransfn -FROM pg_catalog.pg_aggregate fk -WHERE aggtransfn != 0 AND +SELECT ctid, aggtransfn +FROM pg_catalog.pg_aggregate fk +WHERE aggtransfn != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggtransfn); -SELECT ctid, aggfinalfn -FROM pg_catalog.pg_aggregate fk -WHERE aggfinalfn != 0 AND +SELECT ctid, aggfinalfn +FROM pg_catalog.pg_aggregate fk +WHERE aggfinalfn != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aggfinalfn); -SELECT ctid, aggsortop -FROM pg_catalog.pg_aggregate fk -WHERE aggsortop != 0 AND +SELECT ctid, aggsortop +FROM pg_catalog.pg_aggregate fk +WHERE aggsortop != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.aggsortop); -SELECT ctid, aggtranstype -FROM pg_catalog.pg_aggregate fk -WHERE aggtranstype != 0 AND +SELECT ctid, aggtranstype +FROM pg_catalog.pg_aggregate fk +WHERE aggtranstype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.aggtranstype); -SELECT ctid, amkeytype -FROM pg_catalog.pg_am fk -WHERE amkeytype != 0 AND +SELECT ctid, amkeytype +FROM pg_catalog.pg_am fk +WHERE amkeytype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amkeytype); -SELECT ctid, aminsert -FROM pg_catalog.pg_am fk -WHERE aminsert != 0 AND +SELECT ctid, aminsert +FROM pg_catalog.pg_am fk +WHERE aminsert != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.aminsert); -SELECT ctid, ambeginscan -FROM pg_catalog.pg_am fk -WHERE ambeginscan != 0 AND +SELECT ctid, ambeginscan +FROM pg_catalog.pg_am fk +WHERE ambeginscan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambeginscan); -SELECT ctid, amgettuple -FROM pg_catalog.pg_am fk -WHERE amgettuple != 0 AND +SELECT ctid, amgettuple +FROM pg_catalog.pg_am fk +WHERE amgettuple != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgettuple); -SELECT ctid, amgetbitmap -FROM pg_catalog.pg_am fk -WHERE amgetbitmap != 0 AND +SELECT ctid, amgetbitmap +FROM pg_catalog.pg_am fk +WHERE amgetbitmap != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amgetbitmap); -SELECT ctid, amrescan -FROM pg_catalog.pg_am fk -WHERE amrescan != 0 AND +SELECT ctid, amrescan +FROM pg_catalog.pg_am fk +WHERE amrescan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrescan); -SELECT ctid, amendscan -FROM pg_catalog.pg_am fk -WHERE amendscan != 0 AND +SELECT ctid, amendscan +FROM pg_catalog.pg_am fk +WHERE amendscan != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amendscan); -SELECT ctid, ammarkpos -FROM pg_catalog.pg_am fk -WHERE ammarkpos != 0 AND +SELECT ctid, ammarkpos +FROM pg_catalog.pg_am fk +WHERE ammarkpos != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ammarkpos); -SELECT ctid, amrestrpos -FROM pg_catalog.pg_am fk -WHERE amrestrpos != 0 AND +SELECT ctid, amrestrpos +FROM pg_catalog.pg_am fk +WHERE amrestrpos != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amrestrpos); -SELECT ctid, ambuild -FROM pg_catalog.pg_am fk -WHERE ambuild != 0 AND +SELECT ctid, ambuild +FROM pg_catalog.pg_am fk +WHERE ambuild != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambuild); -SELECT ctid, ambulkdelete -FROM pg_catalog.pg_am fk -WHERE ambulkdelete != 0 AND +SELECT ctid, ambulkdelete +FROM pg_catalog.pg_am fk +WHERE ambulkdelete != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.ambulkdelete); -SELECT ctid, amvacuumcleanup -FROM pg_catalog.pg_am fk -WHERE amvacuumcleanup != 0 AND +SELECT ctid, amvacuumcleanup +FROM pg_catalog.pg_am fk +WHERE amvacuumcleanup != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amvacuumcleanup); -SELECT ctid, amcostestimate -FROM pg_catalog.pg_am fk -WHERE amcostestimate != 0 AND +SELECT ctid, amcostestimate +FROM pg_catalog.pg_am fk +WHERE amcostestimate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amcostestimate); -SELECT ctid, amoptions -FROM pg_catalog.pg_am fk -WHERE amoptions != 0 AND +SELECT ctid, amoptions +FROM pg_catalog.pg_am fk +WHERE amoptions != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amoptions); -SELECT ctid, amopfamily -FROM pg_catalog.pg_amop fk -WHERE amopfamily != 0 AND +SELECT ctid, amopfamily +FROM pg_catalog.pg_amop fk +WHERE amopfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amopfamily); -SELECT ctid, amoplefttype -FROM pg_catalog.pg_amop fk -WHERE amoplefttype != 0 AND +SELECT ctid, amoplefttype +FROM pg_catalog.pg_amop fk +WHERE amoplefttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoplefttype); -SELECT ctid, amoprighttype -FROM pg_catalog.pg_amop fk -WHERE amoprighttype != 0 AND +SELECT ctid, amoprighttype +FROM pg_catalog.pg_amop fk +WHERE amoprighttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amoprighttype); -SELECT ctid, amopopr -FROM pg_catalog.pg_amop fk -WHERE amopopr != 0 AND +SELECT ctid, amopopr +FROM pg_catalog.pg_amop fk +WHERE amopopr != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.amopopr); -SELECT ctid, amopmethod -FROM pg_catalog.pg_amop fk -WHERE amopmethod != 0 AND +SELECT ctid, amopmethod +FROM pg_catalog.pg_amop fk +WHERE amopmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.amopmethod); -SELECT ctid, amprocfamily -FROM pg_catalog.pg_amproc fk -WHERE amprocfamily != 0 AND +SELECT ctid, amprocfamily +FROM pg_catalog.pg_amproc fk +WHERE amprocfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.amprocfamily); -SELECT ctid, amproclefttype -FROM pg_catalog.pg_amproc fk -WHERE amproclefttype != 0 AND +SELECT ctid, amproclefttype +FROM pg_catalog.pg_amproc fk +WHERE amproclefttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amproclefttype); -SELECT ctid, amprocrighttype -FROM pg_catalog.pg_amproc fk -WHERE amprocrighttype != 0 AND +SELECT ctid, amprocrighttype +FROM pg_catalog.pg_amproc fk +WHERE amprocrighttype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.amprocrighttype); -SELECT ctid, amproc -FROM pg_catalog.pg_amproc fk -WHERE amproc != 0 AND +SELECT ctid, amproc +FROM pg_catalog.pg_amproc fk +WHERE amproc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.amproc); -SELECT ctid, attrelid -FROM pg_catalog.pg_attribute fk -WHERE attrelid != 0 AND +SELECT ctid, attrelid +FROM pg_catalog.pg_attribute fk +WHERE attrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.attrelid); -SELECT ctid, atttypid -FROM pg_catalog.pg_attribute fk -WHERE atttypid != 0 AND +SELECT ctid, atttypid +FROM pg_catalog.pg_attribute fk +WHERE atttypid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.atttypid); -SELECT ctid, castsource -FROM pg_catalog.pg_cast fk -WHERE castsource != 0 AND +SELECT ctid, castsource +FROM pg_catalog.pg_cast fk +WHERE castsource != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.castsource); -SELECT ctid, casttarget -FROM pg_catalog.pg_cast fk -WHERE casttarget != 0 AND +SELECT ctid, casttarget +FROM pg_catalog.pg_cast fk +WHERE casttarget != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.casttarget); -SELECT ctid, castfunc -FROM pg_catalog.pg_cast fk -WHERE castfunc != 0 AND +SELECT ctid, castfunc +FROM pg_catalog.pg_cast fk +WHERE castfunc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.castfunc); -SELECT ctid, relnamespace -FROM pg_catalog.pg_class fk -WHERE relnamespace != 0 AND +SELECT ctid, relnamespace +FROM pg_catalog.pg_class fk +WHERE relnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.relnamespace); -SELECT ctid, reltype -FROM pg_catalog.pg_class fk -WHERE reltype != 0 AND +SELECT ctid, reltype +FROM pg_catalog.pg_class fk +WHERE reltype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.reltype); -SELECT ctid, relowner -FROM pg_catalog.pg_class fk -WHERE relowner != 0 AND +SELECT ctid, relowner +FROM pg_catalog.pg_class fk +WHERE relowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.relowner); -SELECT ctid, relam -FROM pg_catalog.pg_class fk -WHERE relam != 0 AND +SELECT ctid, relam +FROM pg_catalog.pg_class fk +WHERE relam != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.relam); -SELECT ctid, reltablespace -FROM pg_catalog.pg_class fk -WHERE reltablespace != 0 AND +SELECT ctid, reltablespace +FROM pg_catalog.pg_class fk +WHERE reltablespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.reltablespace); -SELECT ctid, reltoastrelid -FROM pg_catalog.pg_class fk -WHERE reltoastrelid != 0 AND +SELECT ctid, reltoastrelid +FROM pg_catalog.pg_class fk +WHERE reltoastrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastrelid); -SELECT ctid, reltoastidxid -FROM pg_catalog.pg_class fk -WHERE reltoastidxid != 0 AND +SELECT ctid, reltoastidxid +FROM pg_catalog.pg_class fk +WHERE reltoastidxid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.reltoastidxid); -SELECT ctid, connamespace -FROM pg_catalog.pg_constraint fk -WHERE connamespace != 0 AND +SELECT ctid, connamespace +FROM pg_catalog.pg_constraint fk +WHERE connamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace); -SELECT ctid, contypid -FROM pg_catalog.pg_constraint fk -WHERE contypid != 0 AND +SELECT ctid, contypid +FROM pg_catalog.pg_constraint fk +WHERE contypid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.contypid); -SELECT ctid, connamespace -FROM pg_catalog.pg_conversion fk -WHERE connamespace != 0 AND +SELECT ctid, connamespace +FROM pg_catalog.pg_conversion fk +WHERE connamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.connamespace); -SELECT ctid, conowner -FROM pg_catalog.pg_conversion fk -WHERE conowner != 0 AND +SELECT ctid, conowner +FROM pg_catalog.pg_conversion fk +WHERE conowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.conowner); -SELECT ctid, conproc -FROM pg_catalog.pg_conversion fk -WHERE conproc != 0 AND +SELECT ctid, conproc +FROM pg_catalog.pg_conversion fk +WHERE conproc != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.conproc); -SELECT ctid, datdba -FROM pg_catalog.pg_database fk -WHERE datdba != 0 AND +SELECT ctid, datdba +FROM pg_catalog.pg_database fk +WHERE datdba != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.datdba); -SELECT ctid, dattablespace -FROM pg_catalog.pg_database fk -WHERE dattablespace != 0 AND +SELECT ctid, dattablespace +FROM pg_catalog.pg_database fk +WHERE dattablespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_tablespace pk WHERE pk.oid = fk.dattablespace); -SELECT ctid, setdatabase -FROM pg_catalog.pg_db_role_setting fk -WHERE setdatabase != 0 AND +SELECT ctid, setdatabase +FROM pg_catalog.pg_db_role_setting fk +WHERE setdatabase != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_database pk WHERE pk.oid = fk.setdatabase); -SELECT ctid, classid -FROM pg_catalog.pg_depend fk -WHERE classid != 0 AND +SELECT ctid, classid +FROM pg_catalog.pg_depend fk +WHERE classid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classid); -SELECT ctid, refclassid -FROM pg_catalog.pg_depend fk -WHERE refclassid != 0 AND +SELECT ctid, refclassid +FROM pg_catalog.pg_depend fk +WHERE refclassid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid); -SELECT ctid, classoid -FROM pg_catalog.pg_description fk -WHERE classoid != 0 AND +SELECT ctid, classoid +FROM pg_catalog.pg_description fk +WHERE classoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid); -SELECT ctid, indexrelid -FROM pg_catalog.pg_index fk -WHERE indexrelid != 0 AND +SELECT ctid, indexrelid +FROM pg_catalog.pg_index fk +WHERE indexrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indexrelid); -SELECT ctid, indrelid -FROM pg_catalog.pg_index fk -WHERE indrelid != 0 AND +SELECT ctid, indrelid +FROM pg_catalog.pg_index fk +WHERE indrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.indrelid); -SELECT ctid, lanowner -FROM pg_catalog.pg_language fk -WHERE lanowner != 0 AND +SELECT ctid, lanowner +FROM pg_catalog.pg_language fk +WHERE lanowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lanowner); -SELECT ctid, lanplcallfoid -FROM pg_catalog.pg_language fk -WHERE lanplcallfoid != 0 AND +SELECT ctid, lanplcallfoid +FROM pg_catalog.pg_language fk +WHERE lanplcallfoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanplcallfoid); -SELECT ctid, laninline -FROM pg_catalog.pg_language fk -WHERE laninline != 0 AND +SELECT ctid, laninline +FROM pg_catalog.pg_language fk +WHERE laninline != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.laninline); -SELECT ctid, lanvalidator -FROM pg_catalog.pg_language fk -WHERE lanvalidator != 0 AND +SELECT ctid, lanvalidator +FROM pg_catalog.pg_language fk +WHERE lanvalidator != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanvalidator); -SELECT ctid, nspowner -FROM pg_catalog.pg_namespace fk -WHERE nspowner != 0 AND +SELECT ctid, nspowner +FROM pg_catalog.pg_namespace fk +WHERE nspowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.nspowner); -SELECT ctid, opcmethod -FROM pg_catalog.pg_opclass fk -WHERE opcmethod != 0 AND +SELECT ctid, opcmethod +FROM pg_catalog.pg_opclass fk +WHERE opcmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opcmethod); -SELECT ctid, opcnamespace -FROM pg_catalog.pg_opclass fk -WHERE opcnamespace != 0 AND +SELECT ctid, opcnamespace +FROM pg_catalog.pg_opclass fk +WHERE opcnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opcnamespace); -SELECT ctid, opcowner -FROM pg_catalog.pg_opclass fk -WHERE opcowner != 0 AND +SELECT ctid, opcowner +FROM pg_catalog.pg_opclass fk +WHERE opcowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opcowner); -SELECT ctid, opcfamily -FROM pg_catalog.pg_opclass fk -WHERE opcfamily != 0 AND +SELECT ctid, opcfamily +FROM pg_catalog.pg_opclass fk +WHERE opcfamily != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_opfamily pk WHERE pk.oid = fk.opcfamily); -SELECT ctid, opcintype -FROM pg_catalog.pg_opclass fk -WHERE opcintype != 0 AND +SELECT ctid, opcintype +FROM pg_catalog.pg_opclass fk +WHERE opcintype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opcintype); -SELECT ctid, opckeytype -FROM pg_catalog.pg_opclass fk -WHERE opckeytype != 0 AND +SELECT ctid, opckeytype +FROM pg_catalog.pg_opclass fk +WHERE opckeytype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.opckeytype); -SELECT ctid, oprnamespace -FROM pg_catalog.pg_operator fk -WHERE oprnamespace != 0 AND +SELECT ctid, oprnamespace +FROM pg_catalog.pg_operator fk +WHERE oprnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.oprnamespace); -SELECT ctid, oprowner -FROM pg_catalog.pg_operator fk -WHERE oprowner != 0 AND +SELECT ctid, oprowner +FROM pg_catalog.pg_operator fk +WHERE oprowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.oprowner); -SELECT ctid, oprleft -FROM pg_catalog.pg_operator fk -WHERE oprleft != 0 AND +SELECT ctid, oprleft +FROM pg_catalog.pg_operator fk +WHERE oprleft != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprleft); -SELECT ctid, oprright -FROM pg_catalog.pg_operator fk -WHERE oprright != 0 AND +SELECT ctid, oprright +FROM pg_catalog.pg_operator fk +WHERE oprright != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprright); -SELECT ctid, oprresult -FROM pg_catalog.pg_operator fk -WHERE oprresult != 0 AND +SELECT ctid, oprresult +FROM pg_catalog.pg_operator fk +WHERE oprresult != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.oprresult); -SELECT ctid, oprcom -FROM pg_catalog.pg_operator fk -WHERE oprcom != 0 AND +SELECT ctid, oprcom +FROM pg_catalog.pg_operator fk +WHERE oprcom != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom); -SELECT ctid, oprnegate -FROM pg_catalog.pg_operator fk -WHERE oprnegate != 0 AND +SELECT ctid, oprnegate +FROM pg_catalog.pg_operator fk +WHERE oprnegate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate); -SELECT ctid, oprcode -FROM pg_catalog.pg_operator fk -WHERE oprcode != 0 AND +SELECT ctid, oprcode +FROM pg_catalog.pg_operator fk +WHERE oprcode != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprcode); -SELECT ctid, oprrest -FROM pg_catalog.pg_operator fk -WHERE oprrest != 0 AND +SELECT ctid, oprrest +FROM pg_catalog.pg_operator fk +WHERE oprrest != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprrest); -SELECT ctid, oprjoin -FROM pg_catalog.pg_operator fk -WHERE oprjoin != 0 AND +SELECT ctid, oprjoin +FROM pg_catalog.pg_operator fk +WHERE oprjoin != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.oprjoin); -SELECT ctid, opfmethod -FROM pg_catalog.pg_opfamily fk -WHERE opfmethod != 0 AND +SELECT ctid, opfmethod +FROM pg_catalog.pg_opfamily fk +WHERE opfmethod != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_am pk WHERE pk.oid = fk.opfmethod); -SELECT ctid, opfnamespace -FROM pg_catalog.pg_opfamily fk -WHERE opfnamespace != 0 AND +SELECT ctid, opfnamespace +FROM pg_catalog.pg_opfamily fk +WHERE opfnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.opfnamespace); -SELECT ctid, opfowner -FROM pg_catalog.pg_opfamily fk -WHERE opfowner != 0 AND +SELECT ctid, opfowner +FROM pg_catalog.pg_opfamily fk +WHERE opfowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.opfowner); -SELECT ctid, pronamespace -FROM pg_catalog.pg_proc fk -WHERE pronamespace != 0 AND +SELECT ctid, pronamespace +FROM pg_catalog.pg_proc fk +WHERE pronamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.pronamespace); -SELECT ctid, proowner -FROM pg_catalog.pg_proc fk -WHERE proowner != 0 AND +SELECT ctid, proowner +FROM pg_catalog.pg_proc fk +WHERE proowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.proowner); -SELECT ctid, prolang -FROM pg_catalog.pg_proc fk -WHERE prolang != 0 AND +SELECT ctid, prolang +FROM pg_catalog.pg_proc fk +WHERE prolang != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.prolang); -SELECT ctid, prorettype -FROM pg_catalog.pg_proc fk -WHERE prorettype != 0 AND +SELECT ctid, prorettype +FROM pg_catalog.pg_proc fk +WHERE prorettype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.prorettype); -SELECT ctid, ev_class -FROM pg_catalog.pg_rewrite fk -WHERE ev_class != 0 AND +SELECT ctid, ev_class +FROM pg_catalog.pg_rewrite fk +WHERE ev_class != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.ev_class); -SELECT ctid, refclassid -FROM pg_catalog.pg_shdepend fk -WHERE refclassid != 0 AND +SELECT ctid, refclassid +FROM pg_catalog.pg_shdepend fk +WHERE refclassid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.refclassid); -SELECT ctid, classoid -FROM pg_catalog.pg_shdescription fk -WHERE classoid != 0 AND +SELECT ctid, classoid +FROM pg_catalog.pg_shdescription fk +WHERE classoid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.classoid); -SELECT ctid, starelid -FROM pg_catalog.pg_statistic fk -WHERE starelid != 0 AND +SELECT ctid, starelid +FROM pg_catalog.pg_statistic fk +WHERE starelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.starelid); -SELECT ctid, staop1 -FROM pg_catalog.pg_statistic fk -WHERE staop1 != 0 AND +SELECT ctid, staop1 +FROM pg_catalog.pg_statistic fk +WHERE staop1 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop1); -SELECT ctid, staop2 -FROM pg_catalog.pg_statistic fk -WHERE staop2 != 0 AND +SELECT ctid, staop2 +FROM pg_catalog.pg_statistic fk +WHERE staop2 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop2); -SELECT ctid, staop3 -FROM pg_catalog.pg_statistic fk -WHERE staop3 != 0 AND +SELECT ctid, staop3 +FROM pg_catalog.pg_statistic fk +WHERE staop3 != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.staop3); -SELECT ctid, spcowner -FROM pg_catalog.pg_tablespace fk -WHERE spcowner != 0 AND +SELECT ctid, spcowner +FROM pg_catalog.pg_tablespace fk +WHERE spcowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.spcowner); -SELECT ctid, cfgnamespace -FROM pg_catalog.pg_ts_config fk -WHERE cfgnamespace != 0 AND +SELECT ctid, cfgnamespace +FROM pg_catalog.pg_ts_config fk +WHERE cfgnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.cfgnamespace); -SELECT ctid, cfgowner -FROM pg_catalog.pg_ts_config fk -WHERE cfgowner != 0 AND +SELECT ctid, cfgowner +FROM pg_catalog.pg_ts_config fk +WHERE cfgowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.cfgowner); -SELECT ctid, cfgparser -FROM pg_catalog.pg_ts_config fk -WHERE cfgparser != 0 AND +SELECT ctid, cfgparser +FROM pg_catalog.pg_ts_config fk +WHERE cfgparser != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_parser pk WHERE pk.oid = fk.cfgparser); -SELECT ctid, mapcfg -FROM pg_catalog.pg_ts_config_map fk -WHERE mapcfg != 0 AND +SELECT ctid, mapcfg +FROM pg_catalog.pg_ts_config_map fk +WHERE mapcfg != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_config pk WHERE pk.oid = fk.mapcfg); -SELECT ctid, mapdict -FROM pg_catalog.pg_ts_config_map fk -WHERE mapdict != 0 AND +SELECT ctid, mapdict +FROM pg_catalog.pg_ts_config_map fk +WHERE mapdict != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_dict pk WHERE pk.oid = fk.mapdict); -SELECT ctid, dictnamespace -FROM pg_catalog.pg_ts_dict fk -WHERE dictnamespace != 0 AND +SELECT ctid, dictnamespace +FROM pg_catalog.pg_ts_dict fk +WHERE dictnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.dictnamespace); -SELECT ctid, dictowner -FROM pg_catalog.pg_ts_dict fk -WHERE dictowner != 0 AND +SELECT ctid, dictowner +FROM pg_catalog.pg_ts_dict fk +WHERE dictowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.dictowner); -SELECT ctid, dicttemplate -FROM pg_catalog.pg_ts_dict fk -WHERE dicttemplate != 0 AND +SELECT ctid, dicttemplate +FROM pg_catalog.pg_ts_dict fk +WHERE dicttemplate != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_ts_template pk WHERE pk.oid = fk.dicttemplate); -SELECT ctid, prsnamespace -FROM pg_catalog.pg_ts_parser fk -WHERE prsnamespace != 0 AND +SELECT ctid, prsnamespace +FROM pg_catalog.pg_ts_parser fk +WHERE prsnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.prsnamespace); -SELECT ctid, prsstart -FROM pg_catalog.pg_ts_parser fk -WHERE prsstart != 0 AND +SELECT ctid, prsstart +FROM pg_catalog.pg_ts_parser fk +WHERE prsstart != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsstart); -SELECT ctid, prstoken -FROM pg_catalog.pg_ts_parser fk -WHERE prstoken != 0 AND +SELECT ctid, prstoken +FROM pg_catalog.pg_ts_parser fk +WHERE prstoken != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prstoken); -SELECT ctid, prsend -FROM pg_catalog.pg_ts_parser fk -WHERE prsend != 0 AND +SELECT ctid, prsend +FROM pg_catalog.pg_ts_parser fk +WHERE prsend != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsend); -SELECT ctid, prsheadline -FROM pg_catalog.pg_ts_parser fk -WHERE prsheadline != 0 AND +SELECT ctid, prsheadline +FROM pg_catalog.pg_ts_parser fk +WHERE prsheadline != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prsheadline); -SELECT ctid, prslextype -FROM pg_catalog.pg_ts_parser fk -WHERE prslextype != 0 AND +SELECT ctid, prslextype +FROM pg_catalog.pg_ts_parser fk +WHERE prslextype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.prslextype); -SELECT ctid, tmplnamespace -FROM pg_catalog.pg_ts_template fk -WHERE tmplnamespace != 0 AND +SELECT ctid, tmplnamespace +FROM pg_catalog.pg_ts_template fk +WHERE tmplnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.tmplnamespace); -SELECT ctid, tmplinit -FROM pg_catalog.pg_ts_template fk -WHERE tmplinit != 0 AND +SELECT ctid, tmplinit +FROM pg_catalog.pg_ts_template fk +WHERE tmplinit != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmplinit); -SELECT ctid, tmpllexize -FROM pg_catalog.pg_ts_template fk -WHERE tmpllexize != 0 AND +SELECT ctid, tmpllexize +FROM pg_catalog.pg_ts_template fk +WHERE tmpllexize != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.tmpllexize); -SELECT ctid, typnamespace -FROM pg_catalog.pg_type fk -WHERE typnamespace != 0 AND +SELECT ctid, typnamespace +FROM pg_catalog.pg_type fk +WHERE typnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.typnamespace); -SELECT ctid, typowner -FROM pg_catalog.pg_type fk -WHERE typowner != 0 AND +SELECT ctid, typowner +FROM pg_catalog.pg_type fk +WHERE typowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.typowner); -SELECT ctid, typrelid -FROM pg_catalog.pg_type fk -WHERE typrelid != 0 AND +SELECT ctid, typrelid +FROM pg_catalog.pg_type fk +WHERE typrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.typrelid); -SELECT ctid, typelem -FROM pg_catalog.pg_type fk -WHERE typelem != 0 AND +SELECT ctid, typelem +FROM pg_catalog.pg_type fk +WHERE typelem != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typelem); -SELECT ctid, typarray -FROM pg_catalog.pg_type fk -WHERE typarray != 0 AND +SELECT ctid, typarray +FROM pg_catalog.pg_type fk +WHERE typarray != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typarray); -SELECT ctid, typinput -FROM pg_catalog.pg_type fk -WHERE typinput != 0 AND +SELECT ctid, typinput +FROM pg_catalog.pg_type fk +WHERE typinput != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typinput); -SELECT ctid, typoutput -FROM pg_catalog.pg_type fk -WHERE typoutput != 0 AND +SELECT ctid, typoutput +FROM pg_catalog.pg_type fk +WHERE typoutput != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typoutput); -SELECT ctid, typreceive -FROM pg_catalog.pg_type fk -WHERE typreceive != 0 AND +SELECT ctid, typreceive +FROM pg_catalog.pg_type fk +WHERE typreceive != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typreceive); -SELECT ctid, typsend -FROM pg_catalog.pg_type fk -WHERE typsend != 0 AND +SELECT ctid, typsend +FROM pg_catalog.pg_type fk +WHERE typsend != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typsend); -SELECT ctid, typmodin -FROM pg_catalog.pg_type fk -WHERE typmodin != 0 AND +SELECT ctid, typmodin +FROM pg_catalog.pg_type fk +WHERE typmodin != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodin); -SELECT ctid, typmodout -FROM pg_catalog.pg_type fk -WHERE typmodout != 0 AND +SELECT ctid, typmodout +FROM pg_catalog.pg_type fk +WHERE typmodout != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typmodout); -SELECT ctid, typanalyze -FROM pg_catalog.pg_type fk -WHERE typanalyze != 0 AND +SELECT ctid, typanalyze +FROM pg_catalog.pg_type fk +WHERE typanalyze != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.typanalyze); -SELECT ctid, typbasetype -FROM pg_catalog.pg_type fk -WHERE typbasetype != 0 AND +SELECT ctid, typbasetype +FROM pg_catalog.pg_type fk +WHERE typbasetype != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.typbasetype); diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql index 015fbd6317..d0f4e3b5e1 100644 --- a/src/test/regress/sql/plpgsql.sql +++ b/src/test/regress/sql/plpgsql.sql @@ -2657,7 +2657,7 @@ select exc_using(5, 'foobar'); drop function exc_using(int, text); create or replace function exc_using(int) returns void as $$ -declare +declare c refcursor; i int; begin @@ -2668,7 +2668,7 @@ begin raise notice '%', i; end loop; close c; - return; + return; end; $$ language plpgsql; diff --git a/src/test/regress/sql/point.sql b/src/test/regress/sql/point.sql index 40cd4ec022..1b62b10d40 100644 --- a/src/test/regress/sql/point.sql +++ b/src/test/regress/sql/point.sql @@ -14,7 +14,7 @@ INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)'); INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)'); --- bad format points +-- bad format points INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); INSERT INTO POINT_TBL(f1) VALUES ('10.0,10.0'); @@ -26,22 +26,22 @@ INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); SELECT '' AS six, * FROM POINT_TBL; --- left of +-- left of SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)'; --- right of +-- right of SELECT '' AS three, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1; --- above +-- above SELECT '' AS one, p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >^ p.f1; --- below +-- below SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 <^ '(0.0, 0.0)'; --- equal +-- equal SELECT '' AS one, p.* FROM POINT_TBL p WHERE p.f1 ~= '(5.1, 34.5)'; --- point in box +-- point in box SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 <@ box '(0,0,100,100)'; @@ -77,6 +77,6 @@ SELECT '' AS fifteen, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS dis -- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 SELECT '' AS three, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance - FROM POINT_TBL p1, POINT_TBL p2 + FROM POINT_TBL p1, POINT_TBL p2 WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 >^ p2.f1 ORDER BY distance; diff --git a/src/test/regress/sql/polygon.sql b/src/test/regress/sql/polygon.sql index 1b3903b782..2dad566f37 100644 --- a/src/test/regress/sql/polygon.sql +++ b/src/test/regress/sql/polygon.sql @@ -21,12 +21,12 @@ INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)'); --- degenerate polygons +-- degenerate polygons INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)'); --- bad polygon input strings +-- bad polygon input strings INSERT INTO POLYGON_TBL(f1) VALUES ('0.0'); INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0 0.0'); @@ -40,42 +40,42 @@ INSERT INTO POLYGON_TBL(f1) VALUES ('asdf'); SELECT '' AS four, * FROM POLYGON_TBL; --- overlap +-- overlap SELECT '' AS three, p.* FROM POLYGON_TBL p WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- left overlap -SELECT '' AS four, p.* +-- left overlap +SELECT '' AS four, p.* FROM POLYGON_TBL p WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- right overlap -SELECT '' AS two, p.* +-- right overlap +SELECT '' AS two, p.* FROM POLYGON_TBL p WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- left of +-- left of SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- right of +-- right of SELECT '' AS zero, p.* FROM POLYGON_TBL p WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- contained -SELECT '' AS one, p.* +-- contained +SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- same +-- same SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; --- contains +-- contains SELECT '' AS one, p.* FROM POLYGON_TBL p WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; @@ -93,27 +93,27 @@ SELECT '' AS one, p.* -- -- 0 1 2 3 4 -- --- left of +-- left of SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- left overlap +-- left overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; --- right overlap +-- right overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- right of +-- right of SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- contained in +-- contained in SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- contains +-- contains SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; -- +------------------------+ -- | *---* 1 --- | + | | +-- | + | | -- | 2 *---* -- +------------------------+ -- 3 @@ -122,10 +122,10 @@ SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),( SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "false"; -- +-----------+ --- | *---* / --- | | |/ --- | | + --- | | |\ +-- | *---* / +-- | | |/ +-- | | + +-- | | |\ -- | *---* \ -- +-----------+ SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; @@ -148,15 +148,15 @@ SELECT '((1,1),(1,4),(5,4),(5,3),(2,3),(2,2),(5,2),(5,1))'::polygon @> '((3,2),( -- +---------+ SELECT '((0,0),(0,3),(3,3),(3,0))'::polygon @> '((2,1),(2,2),(3,2),(3,1))'::polygon AS "true"; --- same +-- same SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- overlap +-- overlap SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; -- +--------------------+ -- | *---* 1 --- | + | | +-- | + | | -- | 2 *---* -- +--------------------+ -- 3 diff --git a/src/test/regress/sql/portals.sql b/src/test/regress/sql/portals.sql index 585a7c25ea..02286c4096 100644 --- a/src/test/regress/sql/portals.sql +++ b/src/test/regress/sql/portals.sql @@ -461,12 +461,12 @@ ROLLBACK; -- Make sure snapshot management works okay, per bug report in -- 235395b90909301035v7228ce63q392931f15aa74b31@mail.gmail.com -BEGIN; -SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; -CREATE TABLE cursor (a int); -INSERT INTO cursor VALUES (1); -DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; -UPDATE cursor SET a = 2; -FETCH ALL FROM c1; -COMMIT; +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE cursor (a int); +INSERT INTO cursor VALUES (1); +DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; +UPDATE cursor SET a = 2; +FETCH ALL FROM c1; +COMMIT; DROP TABLE cursor; diff --git a/src/test/regress/sql/portals_p2.sql b/src/test/regress/sql/portals_p2.sql index 190035ae01..555820da39 100644 --- a/src/test/regress/sql/portals_p2.sql +++ b/src/test/regress/sql/portals_p2.sql @@ -4,43 +4,43 @@ BEGIN; -DECLARE foo13 CURSOR FOR +DECLARE foo13 CURSOR FOR SELECT * FROM onek WHERE unique1 = 50; -DECLARE foo14 CURSOR FOR +DECLARE foo14 CURSOR FOR SELECT * FROM onek WHERE unique1 = 51; -DECLARE foo15 CURSOR FOR +DECLARE foo15 CURSOR FOR SELECT * FROM onek WHERE unique1 = 52; -DECLARE foo16 CURSOR FOR +DECLARE foo16 CURSOR FOR SELECT * FROM onek WHERE unique1 = 53; -DECLARE foo17 CURSOR FOR +DECLARE foo17 CURSOR FOR SELECT * FROM onek WHERE unique1 = 54; -DECLARE foo18 CURSOR FOR +DECLARE foo18 CURSOR FOR SELECT * FROM onek WHERE unique1 = 55; -DECLARE foo19 CURSOR FOR +DECLARE foo19 CURSOR FOR SELECT * FROM onek WHERE unique1 = 56; -DECLARE foo20 CURSOR FOR +DECLARE foo20 CURSOR FOR SELECT * FROM onek WHERE unique1 = 57; -DECLARE foo21 CURSOR FOR +DECLARE foo21 CURSOR FOR SELECT * FROM onek WHERE unique1 = 58; -DECLARE foo22 CURSOR FOR +DECLARE foo22 CURSOR FOR SELECT * FROM onek WHERE unique1 = 59; -DECLARE foo23 CURSOR FOR +DECLARE foo23 CURSOR FOR SELECT * FROM onek WHERE unique1 = 60; -DECLARE foo24 CURSOR FOR +DECLARE foo24 CURSOR FOR SELECT * FROM onek2 WHERE unique1 = 50; -DECLARE foo25 CURSOR FOR +DECLARE foo25 CURSOR FOR SELECT * FROM onek2 WHERE unique1 = 60; FETCH all in foo13; diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql index c7cf788b20..16dc106ab0 100644 --- a/src/test/regress/sql/rules.sql +++ b/src/test/regress/sql/rules.sql @@ -37,9 +37,9 @@ create table rtest_person (pname text, pdesc text); create table rtest_admin (pname text, sysname text); create rule rtest_sys_upd as on update to rtest_system do also ( - update rtest_interface set sysname = new.sysname + update rtest_interface set sysname = new.sysname where sysname = old.sysname; - update rtest_admin set sysname = new.sysname + update rtest_admin set sysname = new.sysname where sysname = old.sysname ); @@ -75,7 +75,7 @@ create rule rtest_emp_del as on delete to rtest_emp do -- -- Tables and rules for the multiple cascaded qualified instead --- rule test +-- rule test -- create table rtest_t4 (a int4, b text); create table rtest_t5 (a int4, b text); @@ -420,7 +420,7 @@ create table rtest_view1 (a int4, b text, v bool); create table rtest_view2 (a int4); create table rtest_view3 (a int4, b text); create table rtest_view4 (a int4, b text, c int4); -create view rtest_vview1 as select a, b from rtest_view1 X +create view rtest_vview1 as select a, b from rtest_view1 X where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); create view rtest_vview2 as select a, b from rtest_view1 where v; create view rtest_vview3 as select a, b from rtest_vview2 X @@ -493,7 +493,7 @@ create table rtest_unitfact ( factor float ); -create view rtest_vcomp as +create view rtest_vcomp as select X.part, (X.size * Y.factor) as size_in_cm from rtest_comp X, rtest_unitfact Y where X.unit = Y.unit; @@ -746,7 +746,7 @@ create rule rrule as on update to vview do instead ( insert into cchild (pid, descrip) - select old.pid, new.descrip where old.descrip isnull; + select old.pid, new.descrip where old.descrip isnull; update cchild set descrip = new.descrip where cchild.pid = old.pid; ); @@ -770,7 +770,7 @@ drop table cchild; -- SELECT viewname, definition FROM pg_views WHERE schemaname <> 'information_schema' ORDER BY viewname; -SELECT tablename, rulename, definition FROM pg_rules +SELECT tablename, rulename, definition FROM pg_rules ORDER BY tablename, rulename; -- @@ -797,14 +797,14 @@ SELECT * FROM ruletest_tbl2; create table rule_and_refint_t1 ( id1a integer, id1b integer, - + primary key (id1a, id1b) ); create table rule_and_refint_t2 ( id2a integer, id2c integer, - + primary key (id2a, id2c) ); @@ -901,11 +901,11 @@ create temp table t1 (a integer primary key); create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); -create rule t1_ins_1 as on insert to t1 +create rule t1_ins_1 as on insert to t1 where new.a >= 0 and new.a < 10 do instead insert into t1_1 values (new.a); -create rule t1_ins_2 as on insert to t1 +create rule t1_ins_2 as on insert to t1 where new.a >= 10 and new.a < 20 do instead insert into t1_2 values (new.a); diff --git a/src/test/regress/sql/select.sql b/src/test/regress/sql/select.sql index 451fcf78d9..b99fb13c7d 100644 --- a/src/test/regress/sql/select.sql +++ b/src/test/regress/sql/select.sql @@ -13,24 +13,24 @@ SELECT * FROM onek -- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 -- SELECT onek.unique1, onek.stringu1 FROM onek - WHERE onek.unique1 < 20 + WHERE onek.unique1 < 20 ORDER BY unique1 using >; -- -- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2 -- SELECT onek.unique1, onek.stringu1 FROM onek - WHERE onek.unique1 > 980 + WHERE onek.unique1 > 980 ORDER BY stringu1 using <; - + -- -- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | -- sort +1d -2 +0nr -1 -- SELECT onek.unique1, onek.string4 FROM onek - WHERE onek.unique1 > 980 + WHERE onek.unique1 > 980 ORDER BY string4 using <, unique1 using >; - + -- -- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | -- sort +1dr -2 +0n -1 @@ -38,7 +38,7 @@ SELECT onek.unique1, onek.string4 FROM onek SELECT onek.unique1, onek.string4 FROM onek WHERE onek.unique1 > 980 ORDER BY string4 using >, unique1 using <; - + -- -- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data | -- sort +0nr -1 +1d -2 @@ -52,7 +52,7 @@ SELECT onek.unique1, onek.string4 FROM onek -- sort +0n -1 +1dr -2 -- SELECT onek.unique1, onek.string4 FROM onek - WHERE onek.unique1 < 20 + WHERE onek.unique1 < 20 ORDER BY unique1 using <, string4 using >; -- @@ -77,7 +77,7 @@ SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10; -- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 -- SELECT onek2.unique1, onek2.stringu1 FROM onek2 - WHERE onek2.unique1 < 20 + WHERE onek2.unique1 < 20 ORDER BY unique1 using >; -- diff --git a/src/test/regress/sql/select_implicit.sql b/src/test/regress/sql/select_implicit.sql index 448405bb1e..d815504222 100644 --- a/src/test/regress/sql/select_implicit.sql +++ b/src/test/regress/sql/select_implicit.sql @@ -55,7 +55,7 @@ SELECT c, count(*) FROM test_missing_target GROUP BY 3; -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b ORDER BY b; @@ -75,19 +75,19 @@ SELECT a/2, a/2 FROM test_missing_target GROUP BY a/2 ORDER BY a/2; -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; -- group w/o existing GROUP BY target under ambiguous condition -SELECT count(*) FROM test_missing_target x, test_missing_target y +SELECT count(*) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(*) INTO TABLE test_missing_target2 -FROM test_missing_target x, test_missing_target y +SELECT count(*) INTO TABLE test_missing_target2 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b ORDER BY x.b; SELECT * FROM test_missing_target2; @@ -125,25 +125,25 @@ SELECT count(b) FROM test_missing_target -- group w/o existing GROUP BY and ORDER BY target under ambiguous condition -- failure expected -SELECT count(x.a) FROM test_missing_target x, test_missing_target y +SELECT count(x.a) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY b/2 ORDER BY b/2; -- group w/ existing GROUP BY target under ambiguous condition -SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; -- group w/o existing GROUP BY target under ambiguous condition -- failure expected due to ambiguous b in count(b) -SELECT count(b) FROM test_missing_target x, test_missing_target y +SELECT count(b) FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2; -- group w/o existing GROUP BY target under ambiguous condition -- into a table -SELECT count(x.b) INTO TABLE test_missing_target3 -FROM test_missing_target x, test_missing_target y +SELECT count(x.b) INTO TABLE test_missing_target3 +FROM test_missing_target x, test_missing_target y WHERE x.a = y.a GROUP BY x.b/2 ORDER BY x.b/2; SELECT * FROM test_missing_target3; diff --git a/src/test/regress/sql/sequence.sql b/src/test/regress/sql/sequence.sql index af35a054d8..433e992994 100644 --- a/src/test/regress/sql/sequence.sql +++ b/src/test/regress/sql/sequence.sql @@ -1,19 +1,19 @@ --- --- test creation of SERIAL column --- - + CREATE TABLE serialTest (f1 text, f2 serial); - + INSERT INTO serialTest VALUES ('foo'); INSERT INTO serialTest VALUES ('bar'); INSERT INTO serialTest VALUES ('force', 100); INSERT INTO serialTest VALUES ('wrong', NULL); - + SELECT * FROM serialTest; -- basic sequence operations using both text and oid references CREATE SEQUENCE sequence_test; - + SELECT nextval('sequence_test'::text); SELECT nextval('sequence_test'::regclass); SELECT currval('sequence_test'::text); diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql index 9d13c39c8a..296e38b8c1 100644 --- a/src/test/regress/sql/subselect.sql +++ b/src/test/regress/sql/subselect.sql @@ -169,7 +169,7 @@ SELECT *, ELSE 'Approved' END) ELSE 'PO' - END) + END) END) AS "Status", (CASE WHEN ord.ordercancelled @@ -184,7 +184,7 @@ END) AS "Status", ELSE 'Approved' END) ELSE 'PO' - END) + END) END) AS "Status_OK" FROM orderstest ord; diff --git a/src/test/regress/sql/timestamp.sql b/src/test/regress/sql/timestamp.sql index 790ade3137..c4ed4eee3b 100644 --- a/src/test/regress/sql/timestamp.sql +++ b/src/test/regress/sql/timestamp.sql @@ -141,7 +141,7 @@ INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001'); INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097'); INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC'); -SELECT '' AS "64", d1 FROM TIMESTAMP_TBL; +SELECT '' AS "64", d1 FROM TIMESTAMP_TBL; -- Demonstrate functions and operators SELECT '' AS "48", d1 FROM TIMESTAMP_TBL @@ -190,7 +190,7 @@ SELECT '' AS "54", d1 as "timestamp", FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; -- TO_CHAR() -SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') +SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') FROM TIMESTAMP_TBL; SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') @@ -199,23 +199,23 @@ SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth F SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') FROM TIMESTAMP_TBL; -SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') +SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') FROM TIMESTAMP_TBL; -SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') +SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') FROM TIMESTAMP_TBL; -SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') +SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') FROM TIMESTAMP_TBL; SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS') FROM TIMESTAMP_TBL; -SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') +SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') + FROM TIMESTAMP_TBL; + +SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') FROM TIMESTAMP_TBL; - -SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') - FROM TIMESTAMP_TBL; SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID') FROM TIMESTAMP_TBL; diff --git a/src/test/regress/sql/timestamptz.sql b/src/test/regress/sql/timestamptz.sql index e74691cc04..863b2865cb 100644 --- a/src/test/regress/sql/timestamptz.sql +++ b/src/test/regress/sql/timestamptz.sql @@ -160,7 +160,7 @@ SELECT 'Wed Jul 11 10:51:14 GMT+4 2001'::timestamptz; SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz; SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz; -SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL; +SELECT '' AS "64", d1 FROM TIMESTAMPTZ_TBL; -- Demonstrate functions and operators SELECT '' AS "48", d1 FROM TIMESTAMPTZ_TBL @@ -208,32 +208,32 @@ SELECT '' AS "54", d1 as timestamptz, FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; -- TO_CHAR() -SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') +SELECT '' AS to_char_1, to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') FROM TIMESTAMPTZ_TBL; - + SELECT '' AS to_char_2, to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') - FROM TIMESTAMPTZ_TBL; + FROM TIMESTAMPTZ_TBL; SELECT '' AS to_char_3, to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') FROM TIMESTAMPTZ_TBL; - -SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') - FROM TIMESTAMPTZ_TBL; - -SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') + +SELECT '' AS to_char_4, to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') + FROM TIMESTAMPTZ_TBL; + +SELECT '' AS to_char_5, to_char(d1, 'HH HH12 HH24 MI SS SSSS') + FROM TIMESTAMPTZ_TBL; + +SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') FROM TIMESTAMPTZ_TBL; -SELECT '' AS to_char_6, to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') - FROM TIMESTAMPTZ_TBL; - SELECT '' AS to_char_7, to_char(d1, 'HH24--text--MI--text--SS') - FROM TIMESTAMPTZ_TBL; - -SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') FROM TIMESTAMPTZ_TBL; - -SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') - FROM TIMESTAMPTZ_TBL; + +SELECT '' AS to_char_8, to_char(d1, 'YYYYTH YYYYth Jth') + FROM TIMESTAMPTZ_TBL; + +SELECT '' AS to_char_9, to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') + FROM TIMESTAMPTZ_TBL; SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID') FROM TIMESTAMPTZ_TBL; diff --git a/src/test/regress/sql/tinterval.sql b/src/test/regress/sql/tinterval.sql index 5abdb6d106..42399ce694 100644 --- a/src/test/regress/sql/tinterval.sql +++ b/src/test/regress/sql/tinterval.sql @@ -23,7 +23,7 @@ INSERT INTO TINTERVAL_TBL (f1) VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]'); --- badly formatted tintervals +-- badly formatted tintervals INSERT INTO TINTERVAL_TBL (f1) VALUES ('["bad time specifications" ""]'); @@ -84,7 +84,7 @@ SELECT '' AS fourteen, t1.f1 AS interval1, t2.f1 AS interval2 -- contains SELECT '' AS five, t1.f1 FROM TINTERVAL_TBL t1 - WHERE not t1.f1 << + WHERE not t1.f1 << tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]' ORDER BY t1.f1; diff --git a/src/test/regress/sql/transactions.sql b/src/test/regress/sql/transactions.sql index c670ae18d0..17e830e7a4 100644 --- a/src/test/regress/sql/transactions.sql +++ b/src/test/regress/sql/transactions.sql @@ -4,7 +4,7 @@ BEGIN; -SELECT * +SELECT * INTO TABLE xacttest FROM aggtest; @@ -27,10 +27,10 @@ SELECT * FROM aggtest; ABORT; --- should not exist +-- should not exist SELECT oid FROM pg_class WHERE relname = 'disappear'; --- should have members again +-- should have members again SELECT * FROM aggtest; @@ -129,7 +129,7 @@ BEGIN; DELETE FROM savepoints WHERE a=2; ROLLBACK; COMMIT; -- should not be in a transaction block - + SELECT * FROM savepoints; -- test whole-tree commit on an aborted subtransaction diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql index a830b3b392..28928d5a93 100644 --- a/src/test/regress/sql/triggers.sql +++ b/src/test/regress/sql/triggers.sql @@ -23,25 +23,25 @@ create unique index pkeys_i on pkeys (pkey1, pkey2); -- (fkey1, fkey2) --> pkeys (pkey1, pkey2) -- (fkey3) --> fkeys2 (pkey23) -- -create trigger check_fkeys_pkey_exist - before insert or update on fkeys - for each row - execute procedure +create trigger check_fkeys_pkey_exist + before insert or update on fkeys + for each row + execute procedure check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); -create trigger check_fkeys_pkey2_exist - before insert or update on fkeys - for each row +create trigger check_fkeys_pkey2_exist + before insert or update on fkeys + for each row execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23'); -- -- For fkeys2: -- (fkey21, fkey22) --> pkeys (pkey1, pkey2) -- -create trigger check_fkeys2_pkey_exist - before insert or update on fkeys2 - for each row - execute procedure +create trigger check_fkeys2_pkey_exist + before insert or update on fkeys2 + for each row + execute procedure check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2'); -- Test comments @@ -55,10 +55,10 @@ COMMENT ON TRIGGER check_fkeys2_pkey_exist ON fkeys2 IS NULL; -- fkeys (fkey1, fkey2) and fkeys2 (fkey21, fkey22) -- create trigger check_pkeys_fkey_cascade - before delete or update on pkeys - for each row - execute procedure - check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', + before delete or update on pkeys + for each row + execute procedure + check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', 'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22'); -- @@ -66,9 +66,9 @@ create trigger check_pkeys_fkey_cascade -- ON DELETE/UPDATE (pkey23) RESTRICT: -- fkeys (fkey3) -- -create trigger check_fkeys2_fkey_restrict +create trigger check_fkeys2_fkey_restrict before delete or update on fkeys2 - for each row + for each row execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3'); insert into fkeys2 values (10, '1', 1); @@ -103,53 +103,53 @@ DROP TABLE fkeys2; -- -- Jan -- -- create table dup17 (x int4); --- --- create trigger dup17_before +-- +-- create trigger dup17_before -- before insert on dup17 --- for each row --- execute procedure +-- for each row +-- execute procedure -- funny_dup17 () -- ; --- +-- -- insert into dup17 values (17); -- select count(*) from dup17; -- insert into dup17 values (17); -- select count(*) from dup17; --- +-- -- drop trigger dup17_before on dup17; --- +-- -- create trigger dup17_after -- after insert on dup17 --- for each row --- execute procedure +-- for each row +-- execute procedure -- funny_dup17 () -- ; -- insert into dup17 values (13); -- select count(*) from dup17 where x = 13; -- insert into dup17 values (13); -- select count(*) from dup17 where x = 13; --- +-- -- DROP TABLE dup17; create sequence ttdummy_seq increment 10 start 0 minvalue 0; create table tttest ( - price_id int4, - price_val int4, + price_id int4, + price_val int4, price_on int4, price_off int4 default 999999 ); -create trigger ttdummy +create trigger ttdummy before delete or update on tttest - for each row - execute procedure + for each row + execute procedure ttdummy (price_on, price_off); -create trigger ttserial +create trigger ttserial before insert or update on tttest - for each row - execute procedure + for each row + execute procedure autoinc (price_on, ttdummy_seq); insert into tttest values (1, 1, null); @@ -386,7 +386,7 @@ CREATE TABLE trigger_test ( v varchar ); -CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger +CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger LANGUAGE plpgsql AS $$ declare @@ -399,7 +399,7 @@ begin relid := TG_relid::regclass; -- plpgsql can't discover its trigger data in a hash like perl and python - -- can, or by a sort of reflection like tcl can, + -- can, or by a sort of reflection like tcl can, -- so we have to hard code the names. raise NOTICE 'TG_NAME: %', TG_name; raise NOTICE 'TG_WHEN: %', TG_when; @@ -438,16 +438,16 @@ begin end; $$; -CREATE TRIGGER show_trigger_data_trig +CREATE TRIGGER show_trigger_data_trig BEFORE INSERT OR UPDATE OR DELETE ON trigger_test FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); insert into trigger_test values(1,'insert'); update trigger_test set v = 'update' where i = 1; delete from trigger_test; - + DROP TRIGGER show_trigger_data_trig on trigger_test; - + DROP FUNCTION trigger_data(); DROP TABLE trigger_test; @@ -547,11 +547,11 @@ INSERT INTO min_updates_test VALUES ('a',1,2),('b','2',null); INSERT INTO min_updates_test_oids VALUES ('a',1,2),('b','2',null); -CREATE TRIGGER z_min_update +CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); -CREATE TRIGGER z_min_update +CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test_oids FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); diff --git a/src/test/regress/sql/truncate.sql b/src/test/regress/sql/truncate.sql index a3e324db21..a3d6f5368f 100644 --- a/src/test/regress/sql/truncate.sql +++ b/src/test/regress/sql/truncate.sql @@ -148,7 +148,7 @@ INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); CREATE TRIGGER t BEFORE TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT +FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('before trigger truncate'); SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; @@ -166,7 +166,7 @@ INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); CREATE TRIGGER tt AFTER TRUNCATE ON trunc_trigger_test -FOR EACH STATEMENT +FOR EACH STATEMENT EXECUTE PROCEDURE trunctrigger('after trigger truncate'); SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql index 000f6eb2e7..55afcec906 100644 --- a/src/test/regress/sql/tsdicts.sql +++ b/src/test/regress/sql/tsdicts.sql @@ -50,7 +50,7 @@ SELECT ts_lexize('hunspell', 'footballyklubber'); -- Synonim dictionary CREATE TEXT SEARCH DICTIONARY synonym ( - Template=synonym, + Template=synonym, Synonyms=synonym_sample ); @@ -63,7 +63,7 @@ SELECT ts_lexize('synonym', 'indices'); -- cannot pass more than one word to thesaurus. CREATE TEXT SEARCH DICTIONARY thesaurus ( Template=thesaurus, - DictFile=thesaurus_sample, + DictFile=thesaurus_sample, Dictionary=english_stem ); @@ -99,8 +99,8 @@ CREATE TEXT SEARCH CONFIGURATION synonym_tst ( COPY=english ); -ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword +ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword WITH synonym, english_stem; SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre'); @@ -114,8 +114,8 @@ CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( COPY=synonym_tst ); -ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR - asciiword, hword_asciipart, asciihword +ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword WITH synonym, thesaurus, english_stem; SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql index 3c0a7dd82a..d261da2104 100644 --- a/src/test/regress/sql/tsearch.sql +++ b/src/test/regress/sql/tsearch.sql @@ -33,7 +33,7 @@ WHERE mapcfg = 0 OR mapdict = 0; -- Look for pg_ts_config_map entries that aren't one of parser's token types SELECT * FROM ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt + FROM pg_ts_config ) AS tt RIGHT JOIN pg_ts_config_map AS m ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) WHERE @@ -76,7 +76,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - + RESET enable_seqscan; INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; @@ -214,7 +214,7 @@ ff-bg ', to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); ---Check if headline fragments work +--Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql index 479bf8542a..af7aa2d8b3 100644 --- a/src/test/regress/sql/type_sanity.sql +++ b/src/test/regress/sql/type_sanity.sql @@ -61,7 +61,7 @@ WHERE p1.typtype in ('b','e') AND p1.typname NOT LIKE E'\\_%' AND NOT EXISTS p2.typelem = p1.oid and p1.typarray = p2.oid); -- Make sure typarray points to a varlena array type of our own base -SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype, +SELECT p1.oid, p1.typname as basetype, p2.typname as arraytype, p2.typelem, p2.typlen FROM pg_type p1 LEFT JOIN pg_type p2 ON (p1.typarray = p2.oid) WHERE p1.typarray <> 0 AND diff --git a/src/test/regress/sql/varchar.sql b/src/test/regress/sql/varchar.sql index 414c585d9a..58d29ca4ba 100644 --- a/src/test/regress/sql/varchar.sql +++ b/src/test/regress/sql/varchar.sql @@ -8,17 +8,17 @@ INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); --- any of the following three input formats are acceptable +-- any of the following three input formats are acceptable INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); INSERT INTO VARCHAR_TBL (f1) VALUES (2); INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); --- zero-length char +-- zero-length char INSERT INTO VARCHAR_TBL (f1) VALUES (''); --- try varchar's of greater than 1 length +-- try varchar's of greater than 1 length INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); diff --git a/src/test/regress/sql/window.sql b/src/test/regress/sql/window.sql index 1cfc64bd8b..6a5c855ead 100644 --- a/src/test/regress/sql/window.sql +++ b/src/test/regress/sql/window.sql @@ -73,7 +73,7 @@ SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FRO SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; -- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window. -SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; +SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s @@ -82,19 +82,19 @@ SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s; -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum FROM tenk1 GROUP BY ten, two; SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10; -SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum +SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum FROM tenk1 WHERE unique2 < 10; -- opexpr with different windows evaluation. SELECT * FROM( - SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + - sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, + SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum FROM tenk1 @@ -103,7 +103,7 @@ WHERE total <> fourcount + twosum; SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10; -SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten); -- more than one window with GROUP BY diff --git a/src/test/thread/README b/src/test/thread/README index 509f3dc24e..00ec2fff06 100644 --- a/src/test/thread/README +++ b/src/test/thread/README @@ -17,21 +17,21 @@ To use this program manually, you must: o compile and run this program If your platform requires special thread flags that are not tested by -/config/acx_pthread.m4, add PTHREAD_CFLAGS and PTHREAD_LIBS defines to +/config/acx_pthread.m4, add PTHREAD_CFLAGS and PTHREAD_LIBS defines to your template/${port} file. Windows Systems =============== Windows systems do not vary in their thread-safeness in the same way that -other systems might, nor do they generally have pthreads installed, hence -on Windows this test is skipped by the configure program (pthreads is +other systems might, nor do they generally have pthreads installed, hence +on Windows this test is skipped by the configure program (pthreads is required by the test program, but not PostgreSQL itself). If you do wish to test your system however, you can do so as follows: 1) Install pthreads in you Mingw/Msys environment. You can download pthreads from ftp://sources.redhat.com/pub/pthreads-win32/. - + 2) Build the test program: gcc -o thread_test.exe \ diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES index de994b2b50..8e54793f1d 100644 --- a/src/tools/RELEASE_CHANGES +++ b/src/tools/RELEASE_CHANGES @@ -143,7 +143,7 @@ function: } If we wanted to add a third argument: - + void print_stuff(int arg1, int arg2, int arg3) { printf("stuff: %d %d %d\n", arg1, arg2, arg3); diff --git a/src/tools/backend/README b/src/tools/backend/README index d779c0e11a..2b8692d393 100644 --- a/src/tools/backend/README +++ b/src/tools/backend/README @@ -1,4 +1,4 @@ src/tools/backend/README -Just point your browser at the index.html file, and click on the +Just point your browser at the index.html file, and click on the flowchart to see the description and source code. diff --git a/src/tools/backend/backend_dirs.html b/src/tools/backend/backend_dirs.html index 9b675c3e2a..16bd894582 100644 --- a/src/tools/backend/backend_dirs.html +++ b/src/tools/backend/backend_dirs.html @@ -339,7 +339,7 @@ i.e. '~'.

href="../../backend/port">port - compatibility routines
- +
Maintainer: Bruce Momjian ( pgman@candle.pha.pa.us diff --git a/src/tools/check_keywords.pl b/src/tools/check_keywords.pl index 24d7bf6f20..3b68638614 100755 --- a/src/tools/check_keywords.pl +++ b/src/tools/check_keywords.pl @@ -13,7 +13,7 @@ if (@ARGV) { $path = $ARGV[0]; shift @ARGV; } else { - $path = "."; + $path = "."; } $[ = 1; # set array base to 1 @@ -86,7 +86,7 @@ line: while () { if ($arr[$fieldIndexer] eq '|') { next; } - + # Put this keyword into the right list push @{$keywords{$kcat}}, $arr[$fieldIndexer]; } diff --git a/src/tools/editors/emacs.samples b/src/tools/editors/emacs.samples index c1820f28c5..f755843d40 100644 --- a/src/tools/editors/emacs.samples +++ b/src/tools/editors/emacs.samples @@ -64,11 +64,11 @@ (add-hook 'c-mode-hook (function - (lambda nil + (lambda nil (if (string-match "pgsql" buffer-file-name) (progn (c-set-style "bsd") - (setq c-basic-offset 4) + (setq c-basic-offset 4) (setq tab-width 4) (c-set-offset 'case-label '+) (setq indent-tabs-mode t) diff --git a/src/tools/entab/Makefile b/src/tools/entab/Makefile index b252432e14..de8181828a 100644 --- a/src/tools/entab/Makefile +++ b/src/tools/entab/Makefile @@ -4,17 +4,17 @@ # TARGET = entab BINDIR = /usr/local/bin -XFLAGS = +XFLAGS = CFLAGS = -O -LIBS = +LIBS = $(TARGET) : entab.o halt.o $(CC) -o $(TARGET) $(XFLAGS) $(CFLAGS) entab.o halt.o $(LIBS) -entab.o : entab.c +entab.o : entab.c $(CC) -c $(XFLAGS) $(CFLAGS) entab.c -halt.o : halt.c +halt.o : halt.c $(CC) -c $(XFLAGS) $(CFLAGS) halt.c clean: diff --git a/src/tools/entab/entab.man b/src/tools/entab/entab.man index 1692ee631b..362ec730f4 100644 --- a/src/tools/entab/entab.man +++ b/src/tools/entab/entab.man @@ -41,7 +41,7 @@ leaving a large gap. The quote-protection option allows tab replacement without quoted strings being changed. Useful when strings in source code will not have the same tab stops -when executed in the program. +when executed in the program. .LP To change a text file created on a system with one size of tab stop to display properly on a device with different tab setting, diff --git a/src/tools/find_static b/src/tools/find_static index 28762728af..c7014e6014 100755 --- a/src/tools/find_static +++ b/src/tools/find_static @@ -26,8 +26,8 @@ echo " copy debug from '/tmp/"$$"'; - select * - into table debug2 + select * + into table debug2 from debug; create index idebug on debug(scope,func); @@ -35,8 +35,8 @@ echo " vacuum debug; vacuum debug2; - update debug2 - set scope = '_' + update debug2 + set scope = '_' from debug where debug2.func = debug.func and debug2.scope = 'T' and debug.scope = 'U'; diff --git a/src/tools/find_typedef b/src/tools/find_typedef index 838c29b881..8b07de62ef 100755 --- a/src/tools/find_typedef +++ b/src/tools/find_typedef @@ -4,12 +4,12 @@ # This script attempts to find all typedef's in the postgres binaries # by using 'nm' to report all typedef debugging symbols. -# -# For this program to work, you must have compiled all binaries with +# +# For this program to work, you must have compiled all binaries with # debugging symbols. # # This is run on BSD/OS 4.0 or Linux, so you may need to make changes. -# +# # Ignore the nm errors about a file not being a binary file. # # It gets typedefs by reading "STABS": diff --git a/src/tools/make_diff/README b/src/tools/make_diff/README index bc5cea4ceb..9401a74a64 100644 --- a/src/tools/make_diff/README +++ b/src/tools/make_diff/README @@ -14,7 +14,7 @@ for every file in the current directory. I can: cporig `grep -l HeapTuple *` If I use mkid (from ftp.postgreSQL.org), I can do: - + cporig `lid -kn 'fsyncOff'` and get a copy of every file containing that word. I can then do: @@ -29,7 +29,7 @@ to edit all those files. When I am ready to generate a patch, I run 'difforig' command from the top of the source tree: - + I pipe the output of this to a file to hold my patch, and the file names it processes appear on my screen. It creates a nice patch for me of all the files I used with cporig. diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index bb1aed7b7c..1e0c9d6e1e 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -25,9 +25,9 @@ my $postgres; my $libpq; my $contrib_defines = {'refint' => 'REFINT_VERBOSE'}; -my @contrib_uselibpq = ('dblink', 'oid2name', 'pgbench', 'pg_upgrade', +my @contrib_uselibpq = ('dblink', 'oid2name', 'pgbench', 'pg_upgrade', 'vacuumlo'); -my @contrib_uselibpgport = ('oid2name', 'pgbench', 'pg_standby', +my @contrib_uselibpgport = ('oid2name', 'pgbench', 'pg_standby', 'pg_archivecleanup', 'pg_upgrade', 'vacuumlo'); my $contrib_extralibs = {'pgbench' => ['wsock32.lib']}; my $contrib_extraincludes = {'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend']}; diff --git a/src/tools/msvc/README b/src/tools/msvc/README index 531c286f47..58e266e11f 100644 --- a/src/tools/msvc/README +++ b/src/tools/msvc/README @@ -18,13 +18,13 @@ perltidy -b -bl -nsfs -naws -l=100 -ole=unix *.pl *.pm Notes about Visual Studio Express --------------------------------- To build PostgreSQL using Visual Studio Express, the Platform SDK -has to be installed. Since this is not included in the product +has to be installed. Since this is not included in the product originally, extra steps are needed to make it work. -First, download and install the latest Platform SDK from -www.microsoft.com. +First, download and install the latest Platform SDK from +www.microsoft.com. -Locate the files vcprojectengine.dll.express.config and +Locate the files vcprojectengine.dll.express.config and vcprojectengine.dll.config in the vc\vcpackages directory of the Visual C++ Express installation. In these files, add the paths to the Platform SDK to the Include, Library and Path tags. Be sure diff --git a/src/tools/pginclude/pgrminclude b/src/tools/pginclude/pgrminclude index 1e99b12b73..a8ec10a486 100755 --- a/src/tools/pginclude/pgrminclude +++ b/src/tools/pginclude/pgrminclude @@ -4,7 +4,7 @@ # src/tools/pginclude/pgrminclude trap "rm -f /tmp/$$.c /tmp/$$.o /tmp/$$ /tmp/$$a /tmp/$$b" 0 1 2 3 15 -find . \( -name CVS -a -prune \) -o -type f -name '*.[ch]' -print | +find . \( -name CVS -a -prune \) -o -type f -name '*.[ch]' -print | grep -v '\./postgres.h' | grep -v '\./pg_config.h' | grep -v '\./c.h' | @@ -14,7 +14,7 @@ do then IS_INCLUDE="Y" else IS_INCLUDE="N" fi - + # loop through all includes cat "$FILE" | grep "^#include" | sed 's/^#include[ ]*[<"]\([^>"]*\).*$/\1/g' | @@ -39,7 +39,7 @@ do # remove defines from include files if [ "$IS_INCLUDE" = "Y" ] - then cat "$FILE" | grep -v "^#if" | grep -v "^#else" | + then cat "$FILE" | grep -v "^#if" | grep -v "^#else" | grep -v "^#endif" | sed 's/->[a-zA-Z0-9_\.]*//g' >/tmp/$$a else cat "$FILE" >/tmp/$$a fi diff --git a/src/tools/pgindent/README b/src/tools/pgindent/README index 49e0893575..0fedfa99ff 100644 --- a/src/tools/pgindent/README +++ b/src/tools/pgindent/README @@ -4,7 +4,7 @@ pgindent ======== This can format all PostgreSQL *.c and *.h files, but excludes *.y, and -*.l files. +*.l files. 1) Change directory to the top of the build tree. @@ -36,8 +36,8 @@ This can format all PostgreSQL *.c and *.h files, but excludes *.y, and --------------------------------------------------------------------------- -We have standardized on NetBSD's indent. We have fixed a few bugs which -requre the NetBSD source to be patched with indent.bsd.patch patch. A +We have standardized on NetBSD's indent. We have fixed a few bugs which +requre the NetBSD source to be patched with indent.bsd.patch patch. A fully patched version is available at ftp://ftp.postgresql.org/pub/dev. GNU indent, version 2.2.6, has several problems, and is not recommended. diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 00267ec845..429dc7c64b 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -39,7 +39,7 @@ if [ "$?" -eq 0 ] then echo "You appear to have GNU indent rather than BSD indent." >&2 echo "See the pgindent/README file for a description of its problems." >&2 EXTRA_OPTS="-cdb -bli0 -npcs -cli4 -sc" -else +else EXTRA_OPTS="-cli1" fi @@ -193,7 +193,7 @@ do # isn't needed for general use. # awk ' # { -# line3 = $0; +# line3 = $0; # if (skips > 0) # skips--; # if (line1 ~ / *{$/ && @@ -221,7 +221,7 @@ do # Remove blank line between opening brace and block comment. awk ' { - line3 = $0; + line3 = $0; if (skips > 0) skips--; if (line1 ~ / *{$/ && @@ -326,10 +326,10 @@ do print line1; }' | -# Move prototype names to the same line as return type. Useful for ctags. +# Move prototype names to the same line as return type. Useful for ctags. # Indent should do this, but it does not. It formats prototypes just # like real functions. - awk ' BEGIN {paren_level = 0} + awk ' BEGIN {paren_level = 0} { if ($0 ~ /^[a-zA-Z_][a-zA-Z_0-9]*[^\(]*$/) { diff --git a/src/tools/pgtest b/src/tools/pgtest index 11223f31cc..c5356fced8 100755 --- a/src/tools/pgtest +++ b/src/tools/pgtest @@ -18,12 +18,12 @@ TMP="/tmp/$$" [ "X$1" != "X-n" ] && PGCLEAN=clean -(gmake $PGCLEAN check 2>&1; echo "$?" > $TMP/ret) | +(gmake $PGCLEAN check 2>&1; echo "$?" > $TMP/ret) | (tee $TMP/0; exit `cat $TMP/ret`) && cat $TMP/0 | -# The following grep's have to be adjusted for your setup because +# The following grep's have to be adjusted for your setup because # certain warnings are acceptable. -grep -i warning | -grep -v setproctitle | -grep -v find_rule | +grep -i warning | +grep -v setproctitle | +grep -v find_rule | grep -v yy_flex_realloc diff --git a/src/tutorial/advanced.source b/src/tutorial/advanced.source index 2717d4c51a..1dada88e62 100644 --- a/src/tutorial/advanced.source +++ b/src/tutorial/advanced.source @@ -17,7 +17,7 @@ -- descendants. ----------------------------- --- For example, the capitals table inherits from cities table. (It inherits +-- For example, the capitals table inherits from cities table. (It inherits -- all data fields from cities.) CREATE TABLE cities ( diff --git a/src/tutorial/basics.source b/src/tutorial/basics.source index 1092cdf971..9dbd75eb15 100644 --- a/src/tutorial/basics.source +++ b/src/tutorial/basics.source @@ -31,17 +31,17 @@ CREATE TABLE cities ( ----------------------------- -- Populating a Table With Rows: --- An INSERT statement is used to insert a new row into a table. There +-- An INSERT statement is used to insert a new row into a table. There -- are several ways you can specify what columns the data should go to. ----------------------------- -- 1. The simplest case is when the list of value correspond to the order of -- the columns specified in CREATE TABLE. -INSERT INTO weather +INSERT INTO weather VALUES ('San Francisco', 46, 50, 0.25, '1994-11-27'); -INSERT INTO cities +INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); -- 2. You can also specify what column the values correspond to. (The columns @@ -76,7 +76,7 @@ SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date FROM weather; SELECT * FROM weather - WHERE city = 'San Francisco' + WHERE city = 'San Francisco' AND prcp > 0.0; -- Here is a more complicated one. Duplicates are removed when DISTINCT is @@ -128,10 +128,10 @@ SELECT * -- Suppose we want to find all the records that are in the temperature range -- of other records. W1 and W2 are aliases for weather. -SELECT W1.city, W1.temp_lo, W1.temp_hi, +SELECT W1.city, W1.temp_lo, W1.temp_hi, W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 -WHERE W1.temp_lo < W2.temp_lo +WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi; @@ -147,7 +147,7 @@ SELECT city FROM weather -- Aggregate with GROUP BY SELECT city, max(temp_lo) - FROM weather + FROM weather GROUP BY city; -- ... and HAVING @@ -185,7 +185,7 @@ DELETE FROM weather WHERE city = 'Hayward'; SELECT * FROM weather; -- You can also delete all the rows in a table by doing the following. (This --- is different from DROP TABLE which removes the table in addition to the +-- is different from DROP TABLE which removes the table in addition to the -- removing the rows.) DELETE FROM weather; diff --git a/src/tutorial/complex.source b/src/tutorial/complex.source index 30b1e82406..d893c2fef4 100644 --- a/src/tutorial/complex.source +++ b/src/tutorial/complex.source @@ -3,7 +3,7 @@ -- complex.sql- -- This file shows how to create a new user-defined type and how to -- use this new type. --- +-- -- -- Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group -- Portions Copyright (c) 1994, Regents of the University of California @@ -28,7 +28,7 @@ -- C code. We also mark them IMMUTABLE, since they always return the -- same outputs given the same inputs. --- the input function 'complex_in' takes a null-terminated string (the +-- the input function 'complex_in' takes a null-terminated string (the -- textual representation of the type) and turns it into the internal -- (in memory) representation. You will get a message telling you 'complex' -- does not exist yet but that's okay. @@ -67,7 +67,7 @@ CREATE FUNCTION complex_send(complex) -- memory block required to hold the type (we need two 8-byte doubles). CREATE TYPE complex ( - internallength = 16, + internallength = 16, input = complex_in, output = complex_out, receive = complex_recv, @@ -89,7 +89,7 @@ CREATE TABLE test_complex ( ); -- data for user-defined types are just strings in the proper textual --- representation. +-- representation. INSERT INTO test_complex VALUES ('(1.0, 2.5)', '(4.2, 3.55 )'); INSERT INTO test_complex VALUES ('(33.0, 51.4)', '(100.42, 93.55)'); @@ -100,7 +100,7 @@ SELECT * FROM test_complex; -- Creating an operator for the new type: -- Let's define an add operator for complex types. Since POSTGRES -- supports function overloading, we'll use + as the add operator. --- (Operator names can be reused with different numbers and types of +-- (Operator names can be reused with different numbers and types of -- arguments.) ----------------------------- @@ -112,7 +112,7 @@ CREATE FUNCTION complex_add(complex, complex) -- we can now define the operator. We show a binary operator here but you -- can also define unary operators by omitting either of leftarg or rightarg. -CREATE OPERATOR + ( +CREATE OPERATOR + ( leftarg = complex, rightarg = complex, procedure = complex_add, diff --git a/src/tutorial/funcs.source b/src/tutorial/funcs.source index d4d61fa09c..7bbda599a6 100644 --- a/src/tutorial/funcs.source +++ b/src/tutorial/funcs.source @@ -18,14 +18,14 @@ ----------------------------- -- --- let's create a simple SQL function that takes no arguments and +-- let's create a simple SQL function that takes no arguments and -- returns 1 CREATE FUNCTION one() RETURNS integer AS 'SELECT 1 as ONE' LANGUAGE SQL; -- --- functions can be used in any expressions (eg. in the target list or +-- functions can be used in any expressions (eg. in the target list or -- qualifications) SELECT one() AS answer; @@ -61,7 +61,7 @@ INSERT INTO EMP VALUES ('Andy', -1000, 2, '(1,3)'); INSERT INTO EMP VALUES ('Bill', 4200, 36, '(2,1)'); INSERT INTO EMP VALUES ('Ginger', 4800, 30, '(2,4)'); --- the argument of a function can also be a tuple. For instance, +-- the argument of a function can also be a tuple. For instance, -- double_salary takes a tuple of the EMP table CREATE FUNCTION double_salary(EMP) RETURNS integer @@ -71,8 +71,8 @@ SELECT name, double_salary(EMP) AS dream FROM EMP WHERE EMP.cubicle ~= '(2,1)'::point; --- the return value of a function can also be a tuple. However, make sure --- that the expressions in the target list is in the same order as the +-- the return value of a function can also be a tuple. However, make sure +-- that the expressions in the target list is in the same order as the -- columns of EMP. CREATE FUNCTION new_emp() RETURNS EMP @@ -121,7 +121,7 @@ SELECT name(high_pay()) AS overpaid; ----------------------------- -- Creating C Functions --- in addition to SQL functions, you can also create C functions. +-- in addition to SQL functions, you can also create C functions. -- See funcs.c for the definition of the C functions. ----------------------------- @@ -144,7 +144,7 @@ SELECT makepoint('(1,2)'::point, '(3,4)'::point ) AS newpoint; SELECT copytext('hello world!'); SELECT name, c_overpaid(EMP, 1500) AS overpaid -FROM EMP +FROM EMP WHERE name = 'Bill' or name = 'Sam'; -- remove functions that were created in this file diff --git a/src/tutorial/syscat.source b/src/tutorial/syscat.source index ad50d10fd4..10edf62e16 100644 --- a/src/tutorial/syscat.source +++ b/src/tutorial/syscat.source @@ -42,8 +42,8 @@ SELECT n.nspname, c.relname -- column reference) -- SELECT n.nspname AS schema_name, - bc.relname AS class_name, - ic.relname AS index_name, + bc.relname AS class_name, + ic.relname AS index_name, a.attname FROM pg_namespace n, pg_class bc, -- base class @@ -64,7 +64,7 @@ SELECT n.nspname AS schema_name, -- classes -- SELECT n.nspname, c.relname, a.attname, format_type(t.oid, null) as typname - FROM pg_namespace n, pg_class c, + FROM pg_namespace n, pg_class c, pg_attribute a, pg_type t WHERE n.oid = c.relnamespace and c.relkind = 'r' -- no indices @@ -94,10 +94,10 @@ SELECT n.nspname, r.rolname, format_type(t.oid, null) as typname -- -- lists all left unary operators -- -SELECT n.nspname, o.oprname AS left_unary, +SELECT n.nspname, o.oprname AS left_unary, format_type(right_type.oid, null) AS operand, format_type(result.oid, null) AS return_type - FROM pg_namespace n, pg_operator o, + FROM pg_namespace n, pg_operator o, pg_type right_type, pg_type result WHERE o.oprnamespace = n.oid and o.oprkind = 'l' -- left unary @@ -109,10 +109,10 @@ SELECT n.nspname, o.oprname AS left_unary, -- -- lists all right unary operators -- -SELECT n.nspname, o.oprname AS right_unary, +SELECT n.nspname, o.oprname AS right_unary, format_type(left_type.oid, null) AS operand, format_type(result.oid, null) AS return_type - FROM pg_namespace n, pg_operator o, + FROM pg_namespace n, pg_operator o, pg_type left_type, pg_type result WHERE o.oprnamespace = n.oid and o.oprkind = 'r' -- right unary @@ -127,7 +127,7 @@ SELECT n.nspname, o.oprname AS binary_op, format_type(left_type.oid, null) AS left_opr, format_type(right_type.oid, null) AS right_opr, format_type(result.oid, null) AS return_type - FROM pg_namespace n, pg_operator o, pg_type left_type, + FROM pg_namespace n, pg_operator o, pg_type left_type, pg_type right_type, pg_type result WHERE o.oprnamespace = n.oid and o.oprkind = 'b' -- binary @@ -142,12 +142,12 @@ SELECT n.nspname, o.oprname AS binary_op, -- C functions -- SELECT n.nspname, p.proname, p.pronargs, format_type(t.oid, null) as return_type - FROM pg_namespace n, pg_proc p, + FROM pg_namespace n, pg_proc p, pg_language l, pg_type t WHERE p.pronamespace = n.oid and n.nspname not like 'pg\\_%' -- no catalogs and n.nspname != 'information_schema' -- no information_schema - and p.prolang = l.oid + and p.prolang = l.oid and p.prorettype = t.oid and l.lanname = 'c' ORDER BY nspname, proname, pronargs, return_type; @@ -156,7 +156,7 @@ SELECT n.nspname, p.proname, p.pronargs, format_type(t.oid, null) as return_type -- lists all aggregate functions and the types to which they can be applied -- SELECT n.nspname, p.proname, format_type(t.oid, null) as typname - FROM pg_namespace n, pg_aggregate a, + FROM pg_namespace n, pg_aggregate a, pg_proc p, pg_type t WHERE p.pronamespace = n.oid and a.aggfnoid = p.oid @@ -170,7 +170,7 @@ SELECT n.nspname, p.proname, format_type(t.oid, null) as typname -- families -- SELECT am.amname, n.nspname, opf.opfname, opr.oprname - FROM pg_namespace n, pg_am am, pg_opfamily opf, + FROM pg_namespace n, pg_am am, pg_opfamily opf, pg_amop amop, pg_operator opr WHERE opf.opfnamespace = n.oid and opf.opfmethod = am.oid diff --git a/src/win32.mak b/src/win32.mak index 05ed2db114..7bbc988ff6 100644 --- a/src/win32.mak +++ b/src/win32.mak @@ -5,11 +5,11 @@ !IF "$(OS)" == "Windows_NT" NULL= -!ELSE +!ELSE NULL=nul -!ENDIF +!ENDIF -ALL: +ALL: cd include if not exist pg_config.h copy pg_config.h.win32 pg_config.h if not exist pg_config_os.h copy port\win32.h pg_config_os.h