From 0245f8db36f375326c2bae0c3420d3c77714e72d Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 May 2023 17:24:48 -0400 Subject: [PATCH] Pre-beta mechanical code beautification. Run pgindent, pgperltidy, and reformat-dat-files. This set of diffs is a bit larger than typical. We've updated to pg_bsd_indent 2.1.2, which properly indents variable declarations that have multi-line initialization expressions (the continuation lines are now indented one tab stop). We've also updated to perltidy version 20230309 and changed some of its settings, which reduces its desire to add whitespace to lines to make assignments etc. line up. Going forward, that should make for fewer random-seeming changes to existing code. Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql --- contrib/amcheck/t/001_verify_heapam.pl | 4 +- contrib/amcheck/t/003_cic_2pc.pl | 21 +- contrib/amcheck/verify_heapam.c | 34 +- contrib/auto_explain/t/001_auto_explain.pl | 6 +- contrib/basebackup_to_shell/t/001_basic.pl | 4 +- contrib/basic_archive/basic_archive.c | 4 +- contrib/dblink/dblink.c | 2 +- contrib/intarray/bench/bench.pl | 6 +- contrib/intarray/bench/create_test.pl | 2 +- contrib/ltree/ltree_gist.c | 2 +- contrib/ltree/ltree_io.c | 6 +- contrib/ltree/ltxtquery_io.c | 6 +- contrib/pg_prewarm/t/001_basic.pl | 2 +- contrib/pg_walinspect/pg_walinspect.c | 4 +- contrib/postgres_fdw/connection.c | 2 +- contrib/postgres_fdw/postgres_fdw.c | 53 +- contrib/postgres_fdw/shippable.c | 4 +- contrib/seg/seg-validate.pl | 10 +- contrib/test_decoding/t/001_repl_stats.pl | 2 +- contrib/test_decoding/test_decoding.c | 4 +- doc/src/sgml/mk_feature_tables.pl | 2 +- src/backend/access/brin/brin.c | 26 +- src/backend/access/common/reloptions.c | 6 +- src/backend/access/gist/gist.c | 2 +- src/backend/access/gist/gistbuildbuffers.c | 2 +- src/backend/access/gist/gistget.c | 4 +- src/backend/access/gist/gistxlog.c | 2 +- src/backend/access/hash/hashfunc.c | 14 +- src/backend/access/heap/heapam.c | 2 +- src/backend/access/heap/heapam_handler.c | 4 +- src/backend/access/heap/hio.c | 2 +- src/backend/access/heap/pruneheap.c | 2 +- src/backend/access/heap/vacuumlazy.c | 17 +- src/backend/access/heap/visibilitymap.c | 2 +- src/backend/access/nbtree/nbtpage.c | 4 +- src/backend/access/rmgrdesc/dbasedesc.c | 4 +- src/backend/access/rmgrdesc/gindesc.c | 4 +- src/backend/access/spgist/spgscan.c | 10 +- src/backend/access/table/tableam.c | 4 +- src/backend/access/transam/multixact.c | 2 +- src/backend/access/transam/parallel.c | 8 +- src/backend/access/transam/xact.c | 13 +- src/backend/access/transam/xlog.c | 12 +- src/backend/access/transam/xloginsert.c | 4 +- src/backend/access/transam/xlogprefetcher.c | 6 +- src/backend/access/transam/xlogreader.c | 2 +- src/backend/access/transam/xlogrecovery.c | 2 +- src/backend/backup/basebackup.c | 8 +- src/backend/backup/basebackup_copy.c | 3 +- src/backend/catalog/Catalog.pm | 88 +- src/backend/catalog/aclchk.c | 4 +- src/backend/catalog/genbki.pl | 100 +- src/backend/catalog/indexing.c | 4 +- src/backend/catalog/namespace.c | 6 +- src/backend/catalog/pg_operator.c | 2 +- src/backend/catalog/pg_shdepend.c | 1 + src/backend/commands/alter.c | 4 +- src/backend/commands/collationcmds.c | 46 +- src/backend/commands/dbcommands.c | 18 +- src/backend/commands/dropcmds.c | 1 + src/backend/commands/explain.c | 6 +- src/backend/commands/functioncmds.c | 4 +- src/backend/commands/indexcmds.c | 19 +- src/backend/commands/schemacmds.c | 2 +- src/backend/commands/subscriptioncmds.c | 26 +- src/backend/commands/tablecmds.c | 16 +- src/backend/commands/tablespace.c | 4 +- src/backend/commands/typecmds.c | 6 +- src/backend/commands/user.c | 52 +- src/backend/commands/view.c | 2 +- src/backend/executor/execExpr.c | 10 +- src/backend/executor/execExprInterp.c | 14 +- src/backend/executor/execIndexing.c | 4 +- src/backend/executor/execSRF.c | 6 +- src/backend/executor/nodeAgg.c | 8 +- src/backend/executor/nodeHash.c | 6 +- src/backend/executor/nodeHashjoin.c | 28 +- src/backend/executor/nodeIncrementalSort.c | 4 +- src/backend/executor/nodeModifyTable.c | 4 +- src/backend/executor/nodeTableFuncscan.c | 2 +- src/backend/executor/nodeWindowAgg.c | 10 +- src/backend/executor/spi.c | 4 +- src/backend/jit/llvm/llvmjit.c | 10 +- src/backend/jit/llvm/llvmjit_deform.c | 2 +- src/backend/jit/llvm/llvmjit_expr.c | 5 +- src/backend/libpq/be-secure-gssapi.c | 4 +- src/backend/libpq/be-secure-openssl.c | 8 +- src/backend/libpq/hba.c | 10 +- src/backend/nodes/gen_node_support.pl | 82 +- src/backend/optimizer/path/costsize.c | 2 +- src/backend/optimizer/util/appendinfo.c | 2 +- src/backend/optimizer/util/relnode.c | 2 +- src/backend/parser/check_keywords.pl | 10 +- src/backend/parser/parse_expr.c | 2 +- src/backend/parser/parse_merge.c | 4 +- src/backend/parser/parse_utilcmd.c | 4 +- src/backend/partitioning/partbounds.c | 8 +- src/backend/postmaster/fork_process.c | 4 +- src/backend/regex/regc_lex.c | 1 + .../libpqwalreceiver/libpqwalreceiver.c | 2 +- src/backend/replication/logical/decode.c | 10 +- src/backend/replication/logical/logical.c | 4 +- src/backend/replication/logical/origin.c | 2 +- .../replication/logical/reorderbuffer.c | 28 +- src/backend/replication/logical/snapbuild.c | 6 +- src/backend/replication/logical/tablesync.c | 2 +- src/backend/replication/logical/worker.c | 37 +- src/backend/replication/pgoutput/pgoutput.c | 4 +- src/backend/replication/syncrep.c | 4 +- src/backend/rewrite/rewriteHandler.c | 2 +- src/backend/rewrite/rowsecurity.c | 4 +- src/backend/snowball/snowball_create.pl | 69 +- src/backend/statistics/extended_stats.c | 4 +- src/backend/storage/buffer/bufmgr.c | 2 +- src/backend/storage/file/buffile.c | 3 +- src/backend/storage/ipc/dsm_impl.c | 15 +- .../storage/lmgr/generate-lwlocknames.pl | 12 +- src/backend/storage/lmgr/lock.c | 1 + src/backend/storage/lmgr/lwlock.c | 6 +- src/backend/storage/lmgr/predicate.c | 54 +- src/backend/storage/lmgr/proc.c | 8 +- src/backend/storage/smgr/md.c | 8 +- src/backend/tsearch/spell.c | 2 +- src/backend/utils/Gen_dummy_probes.pl | 2 +- src/backend/utils/Gen_fmgrtab.pl | 31 +- src/backend/utils/activity/pgstat.c | 2 +- src/backend/utils/activity/pgstat_shmem.c | 2 +- src/backend/utils/activity/pgstat_xact.c | 8 +- src/backend/utils/adt/datetime.c | 12 +- src/backend/utils/adt/float.c | 3 +- src/backend/utils/adt/jsonfuncs.c | 6 +- src/backend/utils/adt/jsonpath.c | 54 +- src/backend/utils/adt/jsonpath_exec.c | 10 +- src/backend/utils/adt/jsonpath_internal.h | 6 +- src/backend/utils/adt/pg_locale.c | 136 +- src/backend/utils/adt/ruleutils.c | 2 +- src/backend/utils/adt/tsquery_op.c | 6 +- src/backend/utils/adt/tsvector_op.c | 2 +- src/backend/utils/adt/varchar.c | 14 +- src/backend/utils/adt/varlena.c | 5 +- src/backend/utils/adt/xid8funcs.c | 2 +- src/backend/utils/adt/xml.c | 2 +- src/backend/utils/cache/lsyscache.c | 12 +- src/backend/utils/cache/relcache.c | 22 +- src/backend/utils/cache/relmapper.c | 10 +- src/backend/utils/fmgr/fmgr.c | 2 +- src/backend/utils/generate-errcodes.pl | 5 +- src/backend/utils/init/postinit.c | 10 +- src/backend/utils/init/usercontext.c | 8 +- src/backend/utils/mb/Unicode/UCS_to_BIG5.pl | 14 +- src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl | 10 +- .../utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl | 24 +- src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl | 516 +++---- src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl | 30 +- src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl | 14 +- .../utils/mb/Unicode/UCS_to_GB18030.pl | 10 +- src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl | 30 +- .../utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl | 24 +- src/backend/utils/mb/Unicode/UCS_to_SJIS.pl | 84 +- src/backend/utils/mb/Unicode/UCS_to_UHC.pl | 20 +- src/backend/utils/mb/Unicode/UCS_to_most.pl | 44 +- src/backend/utils/mb/Unicode/convutils.pm | 64 +- src/backend/utils/misc/guc.c | 4 +- src/backend/utils/misc/guc_tables.c | 4 +- src/backend/utils/mmgr/dsa.c | 4 +- src/backend/utils/mmgr/freepage.c | 2 +- src/backend/utils/mmgr/mcxt.c | 6 +- src/backend/utils/resowner/resowner.c | 2 +- src/backend/utils/sort/tuplesort.c | 4 +- src/backend/utils/time/snapmgr.c | 6 +- src/bin/initdb/initdb.c | 51 +- src/bin/initdb/t/001_initdb.pl | 20 +- src/bin/pg_amcheck/t/002_nonesuch.pl | 54 +- src/bin/pg_amcheck/t/003_check.pl | 2 +- src/bin/pg_amcheck/t/004_verify_heapam.pl | 126 +- .../t/010_pg_archivecleanup.pl | 12 +- src/bin/pg_basebackup/pg_basebackup.c | 16 +- src/bin/pg_basebackup/pg_receivewal.c | 2 +- src/bin/pg_basebackup/t/010_pg_basebackup.pl | 102 +- src/bin/pg_basebackup/t/020_pg_receivewal.pl | 34 +- src/bin/pg_basebackup/t/030_pg_recvlogical.pl | 26 +- src/bin/pg_basebackup/walmethods.c | 14 +- src/bin/pg_basebackup/walmethods.h | 12 +- src/bin/pg_checksums/t/002_actions.pl | 38 +- .../pg_controldata/t/001_pg_controldata.pl | 2 +- src/bin/pg_ctl/t/001_start_stop.pl | 2 +- src/bin/pg_ctl/t/004_logrotate.pl | 20 +- src/bin/pg_dump/compress_io.c | 4 +- src/bin/pg_dump/compress_lz4.c | 12 +- src/bin/pg_dump/compress_zstd.c | 4 +- src/bin/pg_dump/compress_zstd.h | 6 +- src/bin/pg_dump/pg_backup_archiver.c | 19 +- src/bin/pg_dump/pg_backup_tar.c | 8 +- src/bin/pg_dump/pg_dump.c | 35 +- src/bin/pg_dump/pg_dumpall.c | 14 +- src/bin/pg_dump/t/002_pg_dump.pl | 1267 +++++++++-------- src/bin/pg_dump/t/004_pg_dump_parallel.pl | 12 +- src/bin/pg_dump/t/010_dump_connstr.pl | 36 +- src/bin/pg_resetwal/t/002_corrupted.pl | 2 +- src/bin/pg_rewind/t/001_basic.pl | 14 +- src/bin/pg_rewind/t/006_options.pl | 6 +- src/bin/pg_rewind/t/007_standby_source.pl | 4 +- src/bin/pg_rewind/t/008_min_recovery_point.pl | 4 +- src/bin/pg_rewind/t/009_growing_files.pl | 2 +- src/bin/pg_rewind/t/RewindTest.pm | 26 +- src/bin/pg_test_fsync/pg_test_fsync.c | 2 +- src/bin/pg_upgrade/check.c | 4 +- src/bin/pg_upgrade/info.c | 20 +- src/bin/pg_upgrade/pg_upgrade.c | 8 +- src/bin/pg_upgrade/t/002_pg_upgrade.pl | 76 +- src/bin/pg_verifybackup/t/002_algorithm.pl | 2 +- src/bin/pg_verifybackup/t/003_corruption.pl | 54 +- src/bin/pg_verifybackup/t/004_options.pl | 2 +- src/bin/pg_verifybackup/t/006_encoding.pl | 4 +- src/bin/pg_verifybackup/t/007_wal.pl | 4 +- src/bin/pg_verifybackup/t/008_untar.pl | 44 +- src/bin/pg_verifybackup/t/009_extract.pl | 22 +- src/bin/pg_verifybackup/t/010_client_untar.pl | 54 +- src/bin/pg_waldump/t/002_save_fullpage.pl | 8 +- src/bin/pgbench/pgbench.c | 4 +- src/bin/pgbench/t/001_pgbench_with_server.pl | 76 +- src/bin/pgbench/t/002_pgbench_no_server.pl | 8 +- src/bin/psql/command.c | 11 +- src/bin/psql/common.c | 2 +- src/bin/psql/create_help.pl | 24 +- src/bin/psql/crosstabview.c | 2 +- src/bin/psql/describe.c | 4 +- src/bin/psql/settings.h | 3 +- src/bin/psql/t/001_basic.pl | 58 +- src/bin/psql/t/010_tab_completion.pl | 9 +- src/bin/psql/t/020_cancel.pl | 2 +- src/bin/scripts/t/020_createdb.pl | 45 +- src/bin/scripts/t/040_createuser.pl | 6 +- src/bin/scripts/t/090_reindexdb.pl | 21 +- src/bin/scripts/t/100_vacuumdb.pl | 2 +- src/bin/scripts/t/200_connstr.pl | 2 +- src/bin/scripts/vacuumdb.c | 14 +- .../unicode/generate-norm_test_table.pl | 12 +- .../unicode/generate-unicode_norm_table.pl | 35 +- .../generate-unicode_normprops_table.pl | 6 +- src/fe_utils/print.c | 7 +- src/include/access/amapi.h | 2 +- src/include/access/brin_tuple.h | 2 +- src/include/access/gist_private.h | 1 + src/include/access/tableam.h | 2 +- src/include/access/xlogreader.h | 1 + src/include/catalog/pg_aggregate.dat | 24 +- src/include/catalog/pg_auth_members.h | 2 +- src/include/catalog/pg_database.dat | 3 +- src/include/catalog/pg_proc.dat | 34 +- src/include/catalog/pg_subscription.h | 6 +- src/include/catalog/reformat_dat_file.pl | 10 +- src/include/catalog/renumber_oids.pl | 18 +- src/include/executor/hashjoin.h | 2 +- src/include/executor/tuptable.h | 2 +- src/include/fe_utils/print.h | 17 +- src/include/funcapi.h | 1 + src/include/nodes/primnodes.h | 1 + src/include/port/win32ntdll.h | 6 +- src/include/replication/reorderbuffer.h | 2 +- src/include/storage/bufmgr.h | 2 +- src/include/storage/lock.h | 2 +- src/include/storage/lwlock.h | 9 +- src/include/storage/predicate_internals.h | 2 +- src/include/storage/proc.h | 6 +- src/include/utils/backend_status.h | 4 +- src/include/utils/pg_locale.h | 10 +- src/include/utils/rel.h | 2 +- src/include/utils/varlena.h | 2 +- src/interfaces/ecpg/ecpglib/data.c | 4 +- src/interfaces/ecpg/ecpglib/descriptor.c | 4 +- src/interfaces/ecpg/ecpglib/execute.c | 4 +- .../ecpg/include/pgtypes_interval.h | 6 +- src/interfaces/ecpg/pgtypeslib/dt.h | 2 +- src/interfaces/ecpg/pgtypeslib/interval.c | 12 +- src/interfaces/ecpg/pgtypeslib/timestamp.c | 4 +- src/interfaces/ecpg/preproc/check_rules.pl | 29 +- src/interfaces/ecpg/preproc/parse.pl | 124 +- src/interfaces/ecpg/preproc/type.c | 2 +- src/interfaces/libpq/fe-connect.c | 6 +- src/interfaces/libpq/fe-exec.c | 16 +- src/interfaces/libpq/fe-lobj.c | 42 +- src/interfaces/libpq/fe-misc.c | 10 +- src/interfaces/libpq/fe-print.c | 2 +- src/interfaces/libpq/fe-protocol3.c | 2 +- src/interfaces/libpq/fe-secure-common.c | 6 +- src/interfaces/libpq/fe-secure-gssapi.c | 12 +- src/interfaces/libpq/fe-secure-openssl.c | 66 +- src/interfaces/libpq/fe-secure.c | 8 +- src/interfaces/libpq/libpq-int.h | 4 +- src/interfaces/libpq/t/001_uri.pl | 27 +- .../libpq/t/003_load_balance_host_list.pl | 30 +- .../libpq/t/004_load_balance_dns.pl | 53 +- src/pl/plperl/plc_perlboot.pl | 2 +- src/pl/plperl/text2macro.pl | 8 +- src/port/dirmod.c | 8 +- src/test/authentication/t/001_password.pl | 24 +- src/test/authentication/t/002_saslprep.pl | 20 +- src/test/authentication/t/003_peer.pl | 8 +- .../authentication/t/004_file_inclusion.pl | 16 +- src/test/icu/t/010_database.pl | 3 +- src/test/kerberos/t/001_auth.pl | 234 +-- src/test/ldap/LdapServer.pm | 73 +- src/test/ldap/t/001_auth.pl | 4 +- src/test/modules/commit_ts/t/002_standby.pl | 2 +- src/test/modules/commit_ts/t/003_standby_2.pl | 2 +- src/test/modules/commit_ts/t/004_restart.pl | 4 +- .../t/001_mutated_bindpasswd.pl | 14 +- .../modules/libpq_pipeline/libpq_pipeline.c | 2 +- .../libpq_pipeline/t/001_libpq_pipeline.pl | 2 +- .../ssl_passphrase_callback/t/001_testfunc.pl | 2 +- .../modules/test_custom_rmgrs/t/001_basic.pl | 20 +- .../test_custom_rmgrs/test_custom_rmgrs.c | 2 +- .../test_ddl_deparse/test_ddl_deparse.c | 1 + .../test_misc/t/001_constraint_validation.pl | 4 +- .../modules/test_misc/t/002_tablespace.pl | 2 +- src/test/modules/test_misc/t/003_check_guc.pl | 8 +- src/test/modules/test_pg_dump/t/001_base.pl | 114 +- .../perl/PostgreSQL/Test/AdjustUpgrade.pm | 88 +- .../perl/PostgreSQL/Test/BackgroundPsql.pm | 29 +- src/test/perl/PostgreSQL/Test/Cluster.pm | 180 +-- .../perl/PostgreSQL/Test/RecursiveCopy.pm | 2 +- src/test/perl/PostgreSQL/Test/SimpleTee.pm | 6 +- src/test/perl/PostgreSQL/Test/Utils.pm | 22 +- src/test/perl/PostgreSQL/Version.pm | 4 +- src/test/recovery/t/001_stream_rep.pl | 40 +- src/test/recovery/t/002_archiving.pl | 6 +- src/test/recovery/t/003_recovery_targets.pl | 16 +- src/test/recovery/t/005_replay_delay.pl | 2 +- src/test/recovery/t/006_logical_decoding.pl | 4 +- src/test/recovery/t/009_twophase.pl | 4 +- .../t/010_logical_decoding_timelines.pl | 4 +- src/test/recovery/t/012_subtransactions.pl | 2 +- src/test/recovery/t/013_crash_restart.pl | 4 +- src/test/recovery/t/014_unlogged_reinit.pl | 12 +- src/test/recovery/t/016_min_consistency.pl | 2 +- src/test/recovery/t/017_shm.pl | 4 +- src/test/recovery/t/018_wal_optimize.pl | 4 +- src/test/recovery/t/019_replslot_limit.pl | 4 +- src/test/recovery/t/020_archive_status.pl | 10 +- src/test/recovery/t/022_crash_temp_files.pl | 4 +- src/test/recovery/t/023_pitr_prepared_xact.pl | 2 +- src/test/recovery/t/024_archive_recovery.pl | 6 +- .../recovery/t/025_stuck_on_old_timeline.pl | 4 +- src/test/recovery/t/027_stream_regress.pl | 4 +- src/test/recovery/t/028_pitr_timelines.pl | 6 +- src/test/recovery/t/029_stats_restart.pl | 12 +- src/test/recovery/t/031_recovery_conflict.pl | 27 +- src/test/recovery/t/032_relfilenode_reuse.pl | 4 +- src/test/recovery/t/033_replay_tsp_drops.pl | 6 +- src/test/recovery/t/034_create_database.pl | 2 +- .../t/035_standby_logical_decoding.pl | 306 ++-- src/test/regress/pg_regress.c | 4 +- src/test/ssl/t/001_ssltests.pl | 104 +- src/test/ssl/t/002_scram.pl | 7 +- src/test/ssl/t/003_sslinfo.pl | 7 +- src/test/ssl/t/SSL/Backend/OpenSSL.pm | 16 +- src/test/ssl/t/SSL/Server.pm | 18 +- src/test/subscription/t/001_rep_changes.pl | 3 +- src/test/subscription/t/005_encoding.pl | 6 +- src/test/subscription/t/012_collation.pl | 4 +- src/test/subscription/t/014_binary.pl | 2 +- src/test/subscription/t/015_stream.pl | 23 +- .../t/018_stream_subxact_abort.pl | 3 +- .../subscription/t/023_twophase_stream.pl | 3 +- .../t/025_rep_changes_for_schema.pl | 3 +- src/test/subscription/t/026_stats.pl | 2 +- src/test/subscription/t/027_nosuperuser.pl | 18 +- src/test/subscription/t/028_row_filter.pl | 8 +- src/test/subscription/t/030_origin.pl | 6 +- src/test/subscription/t/031_column_list.pl | 2 +- .../subscription/t/032_subscribe_use_index.pl | 150 +- .../subscription/t/033_run_as_table_owner.pl | 25 +- src/test/subscription/t/100_bugs.pl | 6 +- src/timezone/zic.c | 10 +- src/tools/PerfectHash.pm | 16 +- src/tools/check_bison_recursion.pl | 6 +- src/tools/ci/windows_build_config.pl | 6 +- src/tools/copyright.pl | 4 +- src/tools/gen_export.pl | 11 +- src/tools/gen_keywordlist.pl | 30 +- src/tools/git_changelog | 56 +- src/tools/mark_pgdllimport.pl | 2 +- src/tools/msvc/Install.pm | 69 +- src/tools/msvc/MSBuildProject.pm | 45 +- src/tools/msvc/Mkvcbuild.pm | 134 +- src/tools/msvc/Project.pm | 44 +- src/tools/msvc/Solution.pm | 627 ++++---- src/tools/msvc/VSObjectFactory.pm | 2 +- src/tools/msvc/build.pl | 6 +- src/tools/msvc/config_default.pl | 32 +- src/tools/msvc/dummylib/Win32/Registry.pm | 2 +- src/tools/msvc/dummylib/Win32API/File.pm | 4 +- src/tools/msvc/gendef.pl | 2 +- src/tools/msvc/pgbison.pl | 2 +- src/tools/msvc/vcregress.pl | 76 +- .../pg_bsd_indent/t/001_pg_bsd_indent.pl | 2 +- src/tools/pginclude/pgcheckdefines | 4 +- src/tools/pgindent/pgindent | 52 +- src/tools/pgindent/typedefs.list | 178 ++- src/tools/win32tzlist.pl | 10 +- src/tutorial/funcs.c | 4 +- 402 files changed, 4756 insertions(+), 4427 deletions(-) diff --git a/contrib/amcheck/t/001_verify_heapam.pl b/contrib/amcheck/t/001_verify_heapam.pl index 1aedebe430..46d5b53181 100644 --- a/contrib/amcheck/t/001_verify_heapam.pl +++ b/contrib/amcheck/t/001_verify_heapam.pl @@ -81,7 +81,7 @@ sub relation_filepath my ($relname) = @_; my $pgdata = $node->data_dir; - my $rel = $node->safe_psql('postgres', + my $rel = $node->safe_psql('postgres', qq(SELECT pg_relation_filepath('$relname'))); die "path not found for relation $relname" unless defined $rel; return "$pgdata/$rel"; @@ -267,7 +267,7 @@ sub check_all_options_uncorrupted for my $endblock (qw(NULL 0)) { my $opts = - "on_error_stop := $stop, " + "on_error_stop := $stop, " . "check_toast := $check_toast, " . "skip := $skip, " . "startblock := $startblock, " diff --git a/contrib/amcheck/t/003_cic_2pc.pl b/contrib/amcheck/t/003_cic_2pc.pl index 5323ed11ae..3279a2505a 100644 --- a/contrib/amcheck/t/003_cic_2pc.pl +++ b/contrib/amcheck/t/003_cic_2pc.pl @@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int))); my $main_h = $node->background_psql('postgres'); -$main_h->query_safe(q( +$main_h->query_safe( + q( BEGIN; INSERT INTO tbl VALUES(0); )); my $cic_h = $node->background_psql('postgres'); -$cic_h->query_until(qr/start/, q( +$cic_h->query_until( + qr/start/, q( \echo start CREATE INDEX CONCURRENTLY idx ON tbl(i); )); -$main_h->query_safe(q( +$main_h->query_safe( + q( PREPARE TRANSACTION 'a'; )); -$main_h->query_safe(q( +$main_h->query_safe( + q( BEGIN; INSERT INTO tbl VALUES(0); )); $node->safe_psql('postgres', q(COMMIT PREPARED 'a';)); -$main_h->query_safe(q( +$main_h->query_safe( + q( PREPARE TRANSACTION 'b'; BEGIN; INSERT INTO tbl VALUES(0); @@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0); $node->safe_psql('postgres', q(COMMIT PREPARED 'b';)); -$main_h->query_safe(q( +$main_h->query_safe( + q( PREPARE TRANSACTION 'c'; COMMIT PREPARED 'c'; )); @@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever'; $node->restart; my $reindex_h = $node->background_psql('postgres'); -$reindex_h->query_until(qr/start/, q( +$reindex_h->query_until( + qr/start/, q( \echo start DROP INDEX CONCURRENTLY idx; CREATE INDEX CONCURRENTLY idx ON tbl(i); diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index 34d73ad442..97f3253522 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS) OffsetNumber successor[MaxOffsetNumber]; bool lp_valid[MaxOffsetNumber]; bool xmin_commit_status_ok[MaxOffsetNumber]; - XidCommitStatus xmin_commit_status[MaxOffsetNumber]; + XidCommitStatus xmin_commit_status[MaxOffsetNumber]; CHECK_FOR_INTERRUPTS(); @@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS) for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff; ctx.offnum = OffsetNumberNext(ctx.offnum)) { - BlockNumber nextblkno; + BlockNumber nextblkno; OffsetNumber nextoffnum; successor[ctx.offnum] = InvalidOffsetNumber; @@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS) /* * Since we've checked that this redirect points to a line - * pointer between FirstOffsetNumber and maxoff, it should - * now be safe to fetch the referenced line pointer. We expect - * it to be LP_NORMAL; if not, that's corruption. + * pointer between FirstOffsetNumber and maxoff, it should now + * be safe to fetch the referenced line pointer. We expect it + * to be LP_NORMAL; if not, that's corruption. */ rditem = PageGetItemId(ctx.page, rdoffnum); if (!ItemIdIsUsed(rditem)) @@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS) { /* * We should not have set successor[ctx.offnum] to a value - * other than InvalidOffsetNumber unless that line pointer - * is LP_NORMAL. + * other than InvalidOffsetNumber unless that line pointer is + * LP_NORMAL. */ Assert(ItemIdIsNormal(next_lp)); @@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * If the next line pointer is a redirect, or if it's a tuple - * but the XMAX of this tuple doesn't match the XMIN of the next + * If the next line pointer is a redirect, or if it's a tuple but + * the XMAX of this tuple doesn't match the XMIN of the next * tuple, then the two aren't part of the same update chain and * there is nothing more to do. */ @@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * This tuple and the tuple to which it points seem to be part - * of an update chain. + * This tuple and the tuple to which it points seem to be part of + * an update chain. */ predecessor[nextoffnum] = ctx.offnum; @@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS) } /* - * If the current tuple's xmin is aborted but the successor tuple's - * xmin is in-progress or committed, that's corruption. + * If the current tuple's xmin is aborted but the successor + * tuple's xmin is in-progress or committed, that's corruption. */ if (xmin_commit_status_ok[ctx.offnum] && xmin_commit_status[ctx.offnum] == XID_ABORTED && @@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok, HeapTupleHeader tuphdr = ctx->tuphdr; ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */ - *xmin_commit_status_ok = false; /* have not yet proven otherwise */ + *xmin_commit_status_ok = false; /* have not yet proven otherwise */ /* If xmin is normal, it should be within valid range */ xmin = HeapTupleHeaderGetXmin(tuphdr); @@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok, * therefore cannot check it. */ if (!check_tuple_visibility(ctx, xmin_commit_status_ok, - xmin_commit_status)) + xmin_commit_status)) return; /* @@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx) diff = (int32) (ctx->next_xid - xid); /* - * In cases of corruption we might see a 32bit xid that is before epoch - * 0. We can't represent that as a 64bit xid, due to 64bit xids being + * In cases of corruption we might see a 32bit xid that is before epoch 0. + * We can't represent that as a 64bit xid, due to 64bit xids being * unsigned integers, without the modulo arithmetic of 32bit xid. There's * no really nice way to deal with that, but it works ok enough to use * FirstNormalFullTransactionId in that case, as a freshly initdb'd diff --git a/contrib/auto_explain/t/001_auto_explain.pl b/contrib/auto_explain/t/001_auto_explain.pl index 7873feb044..abb422f8de 100644 --- a/contrib/auto_explain/t/001_auto_explain.pl +++ b/contrib/auto_explain/t/001_auto_explain.pl @@ -19,7 +19,7 @@ sub query_log local $ENV{PGOPTIONS} = join " ", map { "-c $_=$params->{$_}" } keys %$params; - my $log = $node->logfile(); + my $log = $node->logfile(); my $offset = -s $log; $node->safe_psql("postgres", $sql); @@ -113,7 +113,7 @@ $log_contents = query_log( "SELECT * FROM pg_class;", { "auto_explain.log_verbose" => "on", - "compute_query_id" => "on" + "compute_query_id" => "on" }); like( @@ -127,7 +127,7 @@ $log_contents = query_log( "SELECT * FROM pg_class;", { "auto_explain.log_verbose" => "on", - "compute_query_id" => "regress" + "compute_query_id" => "regress" }); unlike( diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl index 84ad93f614..e2cdd2ecb0 100644 --- a/contrib/basebackup_to_shell/t/001_basic.pl +++ b/contrib/basebackup_to_shell/t/001_basic.pl @@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary'); # This is only needed on Windows machines that don't use UNIX sockets. $node->init( 'allows_streaming' => 1, - 'auth_extra' => [ '--create-role', 'backupuser' ]); + 'auth_extra' => [ '--create-role', 'backupuser' ]); $node->append_conf('postgresql.conf', "shared_preload_libraries = 'basebackup_to_shell'"); @@ -50,7 +50,7 @@ $node->command_fails_like( 'fails if basebackup_to_shell.command is not set'); # Configure basebackup_to_shell.command and reload the configuration file. -my $backup_path = PostgreSQL::Test::Utils::tempdir; +my $backup_path = PostgreSQL::Test::Utils::tempdir; my $escaped_backup_path = $backup_path; $escaped_backup_path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os); diff --git a/contrib/basic_archive/basic_archive.c b/contrib/basic_archive/basic_archive.c index cd852888ce..4d78c31859 100644 --- a/contrib/basic_archive/basic_archive.c +++ b/contrib/basic_archive/basic_archive.c @@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state) MemoryContext basic_archive_context; /* - * If we didn't get to storing the pointer to our allocated state, we don't - * have anything to clean up. + * If we didn't get to storing the pointer to our allocated state, we + * don't have anything to clean up. */ if (data == NULL) return; diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 55f75eff36..3a3e916f9e 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS) if (astate) PG_RETURN_DATUM(makeArrayResult(astate, - CurrentMemoryContext)); + CurrentMemoryContext)); else PG_RETURN_NULL(); } diff --git a/contrib/intarray/bench/bench.pl b/contrib/intarray/bench/bench.pl index bd6dd83c93..067654986e 100755 --- a/contrib/intarray/bench/bench.pl +++ b/contrib/intarray/bench/bench.pl @@ -83,7 +83,7 @@ else $outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid'; } my $sql = - "select $outf from " + "select $outf from " . join(', ', keys %table) . " where " . join(' AND ', @where) . ';'; @@ -100,9 +100,9 @@ if ($opt{e}) print @plan; } -my $t0 = [gettimeofday]; +my $t0 = [gettimeofday]; my $count = 0; -my $b = $opt{b}; +my $b = $opt{b}; $b ||= 1; my @a; foreach (1 .. $b) diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl index 5bdcebddbe..6efe9151ca 100755 --- a/contrib/intarray/bench/create_test.pl +++ b/contrib/intarray/bench/create_test.pl @@ -19,7 +19,7 @@ create table message_section_map ( EOT -open(my $msg, '>', "message.tmp") || die; +open(my $msg, '>', "message.tmp") || die; open(my $map, '>', "message_section_map.tmp") || die; srand(1); diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index 21b7d02028..932f69bff2 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen, ltree *left, ltree *right) { int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) + - (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); + (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); ltree_gist *result = palloc(size); SET_VARSIZE(result, size); diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c index 5dce70bd1a..0a12c77a62 100644 --- a/contrib/ltree/ltree_io.c +++ b/contrib/ltree/ltree_io.c @@ -175,7 +175,7 @@ Datum ltree_in(PG_FUNCTION_ARGS) { char *buf = (char *) PG_GETARG_POINTER(0); - ltree *res; + ltree *res; if ((res = parse_ltree(buf, fcinfo->context)) == NULL) PG_RETURN_NULL(); @@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext) */ static bool finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos, - struct Node *escontext) + struct Node *escontext) { if (is_lquery) { @@ -745,7 +745,7 @@ Datum lquery_in(PG_FUNCTION_ARGS) { char *buf = (char *) PG_GETARG_POINTER(0); - lquery *res; + lquery *res; if ((res = parse_lquery(buf, fcinfo->context)) == NULL) PG_RETURN_NULL(); diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c index 0d29e15630..121fc55e46 100644 --- a/contrib/ltree/ltxtquery_io.c +++ b/contrib/ltree/ltxtquery_io.c @@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("word is too long"))); - if (! pushquery(state, type, ltree_crc32_sz(strval, lenval), - state->curop - state->op, lenval, flag)) + if (!pushquery(state, type, ltree_crc32_sz(strval, lenval), + state->curop - state->op, lenval, flag)) return false; while (state->curop - state->op + lenval + 1 >= state->lenop) @@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in); Datum ltxtq_in(PG_FUNCTION_ARGS) { - ltxtquery *res; + ltxtquery *res; if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL) PG_RETURN_NULL(); diff --git a/contrib/pg_prewarm/t/001_basic.pl b/contrib/pg_prewarm/t/001_basic.pl index 9811c51cee..6b7c869afc 100644 --- a/contrib/pg_prewarm/t/001_basic.pl +++ b/contrib/pg_prewarm/t/001_basic.pl @@ -21,7 +21,7 @@ $node->start; # setup $node->safe_psql("postgres", - "CREATE EXTENSION pg_prewarm;\n" + "CREATE EXTENSION pg_prewarm;\n" . "CREATE TABLE test(c1 int);\n" . "INSERT INTO test SELECT generate_series(1, 100);"); diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c index 1cd3744d5d..796a74f322 100644 --- a/contrib/pg_walinspect/pg_walinspect.c +++ b/contrib/pg_walinspect/pg_walinspect.c @@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record, int block_id; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; RmgrData desc; - const char *record_type; - StringInfoData rec_desc; + const char *record_type; + StringInfoData rec_desc; Assert(XLogRecHasAnyBlockRefs(record)); diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index da32d503bc..d918ba89e1 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -61,7 +61,7 @@ typedef struct ConnCacheEntry bool have_error; /* have any subxacts aborted in this xact? */ bool changing_xact_state; /* xact state change in process */ bool parallel_commit; /* do we commit (sub)xacts in parallel? */ - bool parallel_abort; /* do we abort (sub)xacts in parallel? */ + bool parallel_abort; /* do we abort (sub)xacts in parallel? */ bool invalidated; /* true if reconnect is pending */ bool keep_connections; /* setting value of keep_connections * server option */ diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 95dbe8b06c..428ea3810f 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo) /* * Should never get called when the insert is being performed on a table - * that is also among the target relations of an UPDATE operation, - * because postgresBeginForeignInsert() currently rejects such insert - * attempts. + * that is also among the target relations of an UPDATE operation, because + * postgresBeginForeignInsert() currently rejects such insert attempts. */ Assert(fmstate == NULL || fmstate->aux_fmstate == NULL); @@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, */ if (method != ANALYZE_SAMPLE_OFF) { - bool can_tablesample; + bool can_tablesample; reltuples = postgresGetAnalyzeInfoForForeignTable(relation, &can_tablesample); /* - * Make sure we're not choosing TABLESAMPLE when the remote relation does - * not support that. But only do this for "auto" - if the user explicitly - * requested BERNOULLI/SYSTEM, it's better to fail. + * Make sure we're not choosing TABLESAMPLE when the remote relation + * does not support that. But only do this for "auto" - if the user + * explicitly requested BERNOULLI/SYSTEM, it's better to fail. */ if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO)) method = ANALYZE_SAMPLE_RANDOM; @@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, else { /* - * All supported sampling methods require sampling rate, - * not target rows directly, so we calculate that using - * the remote reltuples value. That's imperfect, because - * it might be off a good deal, but that's not something - * we can (or should) address here. + * All supported sampling methods require sampling rate, not + * target rows directly, so we calculate that using the remote + * reltuples value. That's imperfect, because it might be off a + * good deal, but that's not something we can (or should) address + * here. * - * If reltuples is too low (i.e. when table grew), we'll - * end up sampling more rows - but then we'll apply the - * local sampling, so we get the expected sample size. - * This is the same outcome as without remote sampling. + * If reltuples is too low (i.e. when table grew), we'll end up + * sampling more rows - but then we'll apply the local sampling, + * so we get the expected sample size. This is the same outcome as + * without remote sampling. * - * If reltuples is too high (e.g. after bulk DELETE), we - * will end up sampling too few rows. + * If reltuples is too high (e.g. after bulk DELETE), we will end + * up sampling too few rows. * - * We can't really do much better here - we could try - * sampling a bit more rows, but we don't know how off - * the reltuples value is so how much is "a bit more"? + * We can't really do much better here - we could try sampling a + * bit more rows, but we don't know how off the reltuples value is + * so how much is "a bit more"? * - * Furthermore, the targrows value for partitions is - * determined based on table size (relpages), which can - * be off in different ways too. Adjusting the sampling - * rate here might make the issue worse. + * Furthermore, the targrows value for partitions is determined + * based on table size (relpages), which can be off in different + * ways too. Adjusting the sampling rate here might make the issue + * worse. */ sample_frac = targrows / reltuples; /* * We should never get sampling rate outside the valid range - * (between 0.0 and 1.0), because those cases should be covered - * by the previous branch that sets ANALYZE_SAMPLE_OFF. + * (between 0.0 and 1.0), because those cases should be covered by + * the previous branch that sets ANALYZE_SAMPLE_OFF. */ Assert(sample_frac >= 0.0 && sample_frac <= 1.0); } diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index eb33d2a993..07c11b75e9 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo) /* See if we already cached the result. */ entry = (ShippableCacheEntry *) - hash_search(ShippableCacheHash, &key, HASH_FIND, NULL); + hash_search(ShippableCacheHash, &key, HASH_FIND, NULL); if (!entry) { @@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo) * cache invalidation. */ entry = (ShippableCacheEntry *) - hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL); + hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL); entry->shippable = shippable; } diff --git a/contrib/seg/seg-validate.pl b/contrib/seg/seg-validate.pl index 00bc23aa95..67c0015e6b 100755 --- a/contrib/seg/seg-validate.pl +++ b/contrib/seg/seg-validate.pl @@ -6,14 +6,14 @@ use strict; use warnings; my $integer = '[+-]?[0-9]+'; -my $real = '[+-]?[0-9]+\.[0-9]+'; +my $real = '[+-]?[0-9]+\.[0-9]+'; -my $RANGE = '(\.\.)(\.)?'; -my $PLUMIN = q(\'\+\-\'); -my $FLOAT = "(($integer)|($real))([eE]($integer))?"; +my $RANGE = '(\.\.)(\.)?'; +my $PLUMIN = q(\'\+\-\'); +my $FLOAT = "(($integer)|($real))([eE]($integer))?"; my $EXTENSION = '<|>|~'; -my $boundary = "($EXTENSION)?$FLOAT"; +my $boundary = "($EXTENSION)?$FLOAT"; my $deviation = $FLOAT; my $rule_1 = $boundary . $PLUMIN . $deviation; diff --git a/contrib/test_decoding/t/001_repl_stats.pl b/contrib/test_decoding/t/001_repl_stats.pl index dede36ff16..7c2d87561c 100644 --- a/contrib/test_decoding/t/001_repl_stats.pl +++ b/contrib/test_decoding/t/001_repl_stats.pl @@ -92,7 +92,7 @@ regression_slot3|t|t), # replication statistics data is fine after restart. $node->stop; -my $datadir = $node->data_dir; +my $datadir = $node->data_dir; my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3"; rmtree($slot3_replslotdir); diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 628c6a2595..12d1d0505d 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; TestDecodingTxnData *txndata = - MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); + MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); txndata->xact_wrote_changes = false; txn->output_plugin_private = txndata; @@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; TestDecodingTxnData *txndata = - MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); + MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData)); txndata->xact_wrote_changes = false; txn->output_plugin_private = txndata; diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl index 5a16da0d06..824be729a0 100644 --- a/doc/src/sgml/mk_feature_tables.pl +++ b/doc/src/sgml/mk_feature_tables.pl @@ -34,7 +34,7 @@ print "\n"; while (<$feat>) { chomp; - my ($feature_id, $feature_name, $subfeature_id, + my ($feature_id, $feature_name, $subfeature_id, $subfeature_name, $is_supported, $comments) = split /\t/; $is_supported eq $yesno || next; diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index e91fd7e2bd..3c6a956eaa 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) } /* - * If we found a scan key eliminating the range, no need to - * check additional ones. + * If we found a scan key eliminating the range, no need + * to check additional ones. */ if (!addrange) break; @@ -1223,7 +1223,7 @@ brin_build_desc(Relation rel) * Obtain BrinOpcInfo for each indexed column. While at it, accumulate * the number of columns stored, since the number is opclass-defined. */ - opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts); + opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts); for (keyno = 0; keyno < tupdesc->natts; keyno++) { FmgrInfo *opcInfoFn; @@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup, bval = &dtup->bt_columns[keyno]; /* - * Does the range have actual NULL values? Either of the flags can - * be set, but we ignore the state before adding first row. + * Does the range have actual NULL values? Either of the flags can be + * set, but we ignore the state before adding first row. * * We have to remember this, because we'll modify the flags and we * need to know if the range started as empty. @@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup, /* * If the range was had actual NULL values (i.e. did not start empty), - * make sure we don't forget about the NULL values. Either the allnulls - * flag is still set to true, or (if the opclass cleared it) we need to - * set hasnulls=true. + * make sure we don't forget about the NULL values. Either the + * allnulls flag is still set to true, or (if the opclass cleared it) + * we need to set hasnulls=true. * - * XXX This can only happen when the opclass modified the tuple, so the - * modified flag should be set. + * XXX This can only happen when the opclass modified the tuple, so + * the modified flag should be set. */ if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls)) { @@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup, /* * After updating summaries for all the keys, mark it as not empty. * - * If we're actually changing the flag value (i.e. tuple started as empty), - * we should have modified the tuple. So we should not see empty range that - * was not modified. + * If we're actually changing the flag value (i.e. tuple started as + * empty), we should have modified the tuple. So we should not see empty + * range that was not modified. */ Assert(!dtup->bt_empty_range || modified); dtup->bt_empty_range = false; diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 90cb3951fc..11cc431677 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions) if (optstr->fill_cb) { const char *val = optval->isset ? optval->values.string_val : - optstr->default_isnull ? NULL : optstr->default_val; + optstr->default_isnull ? NULL : optstr->default_val; size += optstr->fill_cb(val, NULL); } @@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize, if (optstring->fill_cb) { Size size = - optstring->fill_cb(string_val, - (char *) rdopts + offset); + optstring->fill_cb(string_val, + (char *) rdopts + offset); if (size) { diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index b5c1754e78..516465f8b7 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) { IndexTuple ituple = (IndexTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (downlink == NULL) downlink = CopyIndexTuple(ituple); diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 95cbed4337..1423b4b047 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, { GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc); GISTNodeBuffer *newNodeBuffer; - int i = foreach_current_index(lc); + int i = foreach_current_index(lc); /* Decompress parent index tuple of node buffer page. */ gistDeCompressAtt(giststate, r, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 7382b0921d..e2c9b5f069 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage @@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index a2ddfd5e69..15249aa921 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record) if (data - begin < datalen) { OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + OffsetNumberNext(PageGetMaxOffsetNumber(page)); while (data - begin < datalen) { diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index d850edd1d5..37646cc9a1 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; const char *keydata = VARDATA_ANY(key); size_t keylen = VARSIZE_ANY_EXHDR(key); @@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0124f37911..7ed72abe59 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2491,7 +2491,7 @@ static inline bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) { const uint16 interesting = - HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; + HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; if ((new_infomask & interesting) != (old_infomask & interesting)) return true; diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index cbb35aa73d..646135cc21 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. * - * If the update is not HOT, we must update all indexes. If the update - * is HOT, it could be that we updated summarized columns, so we either + * If the update is not HOT, we must update all indexes. If the update is + * HOT, it could be that we updated summarized columns, so we either * update only summarized indexes, or none at all. */ if (result != TM_Ok) diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index fb95c19e90..c275b08494 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, if (use_fsm && i >= not_in_fsm_pages) { Size freespace = BufferGetPageSize(victim_buffers[i]) - - SizeOfPageHeaderData; + SizeOfPageHeaderData; RecordPageWithFreeSpace(relation, curBlock, freespace); } diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 20df39c149..47b9e20915 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -532,7 +532,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer) if (!TransactionIdIsValid(prstate->old_snap_xmin)) { TransactionId horizon = - GlobalVisTestNonRemovableHorizon(prstate->vistest); + GlobalVisTestNonRemovableHorizon(prstate->vistest); TransactionIdLimitedForOldSnapshots(horizon, prstate->rel, &prstate->old_snap_xmin, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index cda8889f5e..4eb953f904 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED); Assert(params->truncate != VACOPTVALUE_UNSPECIFIED && params->truncate != VACOPTVALUE_AUTO); + /* * While VacuumFailSafeActive is reset to false before calling this, we * still need to reset it here due to recursive calls. @@ -1813,12 +1814,12 @@ retry: { /* * We have no freeze plans to execute, so there's no added cost - * from following the freeze path. That's why it was chosen. - * This is important in the case where the page only contains - * totally frozen tuples at this point (perhaps only following - * pruning). Such pages can be marked all-frozen in the VM by our - * caller, even though none of its tuples were newly frozen here - * (note that the "no freeze" path never sets pages all-frozen). + * from following the freeze path. That's why it was chosen. This + * is important in the case where the page only contains totally + * frozen tuples at this point (perhaps only following pruning). + * Such pages can be marked all-frozen in the VM by our caller, + * even though none of its tuples were newly frozen here (note + * that the "no freeze" path never sets pages all-frozen). * * We never increment the frozen_pages instrumentation counter * here, since it only counts pages with newly frozen tuples @@ -3117,8 +3118,8 @@ dead_items_max_items(LVRelState *vacrel) { int64 max_items; int vac_work_mem = IsAutoVacuumWorkerProcess() && - autovacuum_work_mem != -1 ? - autovacuum_work_mem : maintenance_work_mem; + autovacuum_work_mem != -1 ? + autovacuum_work_mem : maintenance_work_mem; if (vacrel->nindexes > 0) { diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index ac91d1a14d..7d54ec9c0f 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks) { - Buffer buf; + Buffer buf; buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL, EB_CREATE_FORK_IF_NEEDED | diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 41aa1c4ccd..6be8915229 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -2947,7 +2947,7 @@ void _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate) { IndexBulkDeleteResult *stats = vstate->stats; - Relation heaprel = vstate->info->heaprel; + Relation heaprel = vstate->info->heaprel; Assert(stats->pages_newly_deleted >= vstate->npendingpages); @@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate, if (vstate->npendingpages > 0) { FullTransactionId lastsafexid = - vstate->pendingpages[vstate->npendingpages - 1].safexid; + vstate->pendingpages[vstate->npendingpages - 1].safexid; Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid)); } diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index 7d12e0ef91..3922120d64 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) rec; + (xl_dbase_create_file_copy_rec *) rec; appendStringInfo(buf, "copy dir %u/%u to %u/%u", xlrec->src_tablespace_id, xlrec->src_db_id, @@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) rec; + (xl_dbase_create_wal_log_rec *) rec; appendStringInfo(buf, "create dir %u/%u", xlrec->tablespace_id, xlrec->db_id); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 9ef4981ad1..246a6a6b85 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogInsertDataInternal *insertData = - (ginxlogInsertDataInternal *) payload; + (ginxlogInsertDataInternal *) payload; appendStringInfo(buf, " pitem: %u-%u/%u", PostingItemGetBlockNumber(&insertData->newitem), @@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogVacuumDataLeafPage *xlrec = - (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); + (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index f323699165..cbfaf0c00a 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances) { /* allocate distance array only for non-NULL items */ SpGistSearchItem *item = - palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); + palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys)); item->isNull = isnull; @@ -130,7 +130,7 @@ static void spgAddStartItem(SpGistScanOpaque so, bool isnull) { SpGistSearchItem *startEntry = - spgAllocSearchItem(so, isnull, so->zeroDistances); + spgAllocSearchItem(so, isnull, so->zeroDistances); ItemPointerSet(&startEntry->heapPtr, isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO, @@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so, storeRes_func storeRes) { SpGistLeafTuple leafTuple = (SpGistLeafTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (leafTuple->tupstate != SPGIST_LIVE) { @@ -896,7 +896,7 @@ redirect: else /* page is inner */ { SpGistInnerTuple innerTuple = (SpGistInnerTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); if (innerTuple->tupstate != SPGIST_LIVE) { @@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, else { IndexOrderByDistance *distances = - palloc(sizeof(distances[0]) * so->numberOfOrderBys); + palloc(sizeof(distances[0]) * so->numberOfOrderBys); int i; for (i = 0; i < so->numberOfOrderBys; i++) diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index a5e6c92f35..771438c8ce 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -112,7 +112,7 @@ TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; Oid relid = RelationGetRelid(relation); Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); @@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan) { Snapshot snapshot; uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; Assert(RelationGetRelid(relation) == pscan->phs_relid); diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index fe6698d5ff..abb022e067 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record) else if (info == XLOG_MULTIXACT_CREATE_ID) { xl_multixact_create *xlrec = - (xl_multixact_create *) XLogRecGetData(record); + (xl_multixact_create *) XLogRecGetData(record); TransactionId max_xid; int i; diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 7133ec0b22..2b8bc2f58d 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace); /* - * Serialize the transaction snapshot if the transaction - * isolation level uses a transaction snapshot. + * Serialize the transaction snapshot if the transaction isolation + * level uses a transaction snapshot. */ if (IsolationUsesXactSnapshot()) { @@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg) RestoreClientConnectionInfo(clientconninfospace); /* - * Initialize SystemUser now that MyClientConnectionInfo is restored. - * Also ensure that auth_method is actually valid, aka authn_id is not NULL. + * Initialize SystemUser now that MyClientConnectionInfo is restored. Also + * ensure that auth_method is actually valid, aka authn_id is not NULL. */ if (MyClientConnectionInfo.authn_id) InitializeSystemUser(MyClientConnectionInfo.authn_id, diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 6a837e1539..8daaa535ed 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -3152,10 +3152,9 @@ CommitTransactionCommand(void) break; /* - * The user issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already did - * PushTransaction, so as to have someplace to put the SUBBEGIN - * state.) + * The user issued a SAVEPOINT inside a transaction block. Start a + * subtransaction. (DefineSavepoint already did PushTransaction, + * so as to have someplace to put the SUBBEGIN state.) */ case TBLOCK_SUBBEGIN: StartSubTransaction(); @@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void) s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState == TBLOCK_SUBINPROGRESS || - s->blockState == TBLOCK_INPROGRESS || - s->blockState == TBLOCK_IMPLICIT_INPROGRESS || - s->blockState == TBLOCK_STARTED); + s->blockState == TBLOCK_INPROGRESS || + s->blockState == TBLOCK_IMPLICIT_INPROGRESS || + s->blockState == TBLOCK_STARTED); } /* diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index bc5a8e0569..b2430f617c 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5460,8 +5460,8 @@ StartupXLOG(void) missingContrecPtr = endOfRecoveryInfo->missingContrecPtr; /* - * Reset ps status display, so as no information related to recovery - * shows up. + * Reset ps status display, so as no information related to recovery shows + * up. */ set_ps_display(""); @@ -5596,9 +5596,9 @@ StartupXLOG(void) if (!XLogRecPtrIsInvalid(missingContrecPtr)) { /* - * We should only have a missingContrecPtr if we're not switching to - * a new timeline. When a timeline switch occurs, WAL is copied from - * the old timeline to the new only up to the end of the last complete + * We should only have a missingContrecPtr if we're not switching to a + * new timeline. When a timeline switch occurs, WAL is copied from the + * old timeline to the new only up to the end of the last complete * record, so there can't be an incomplete WAL record that we need to * disregard. */ @@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, */ if (rllen > datadirpathlen && strncmp(linkpath, DataDir, datadirpathlen) == 0 && - IS_DIR_SEP(linkpath[datadirpathlen])) + IS_DIR_SEP(linkpath[datadirpathlen])) relpath = pstrdup(linkpath + datadirpathlen + 1); /* diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index ea7e2f67af..54247e1d81 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, * * XLogReader machinery is only able to handle records up to a certain * size (ignoring machine resource limitations), so make sure that we will - * not emit records larger than the sizes advertised to be supported. - * This cap is based on DecodeXLogRecordRequiredSpace(). + * not emit records larger than the sizes advertised to be supported. This + * cap is based on DecodeXLogRecordRequiredSpace(). */ if (total_len >= XLogRecordMaxSize) ereport(ERROR, diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index 906e3d9469..539928cb85 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) record->main_data; + (xl_dbase_create_file_copy_rec *) record->main_data; RelFileLocator rlocator = {InvalidOid, xlrec->db_id, InvalidRelFileNumber}; @@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) if (record_type == XLOG_SMGR_CREATE) { xl_smgr_create *xlrec = (xl_smgr_create *) - record->main_data; + record->main_data; if (xlrec->forkNum == MAIN_FORKNUM) { @@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) else if (record_type == XLOG_SMGR_TRUNCATE) { xl_smgr_truncate *xlrec = (xl_smgr_truncate *) - record->main_data; + record->main_data; /* * Don't consider prefetching anything in the truncated diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 631f260f79..2e7b1ba8e1 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -282,7 +282,7 @@ XLogRecPtr XLogReleasePreviousRecord(XLogReaderState *state) { DecodedXLogRecord *record; - XLogRecPtr next_lsn; + XLogRecPtr next_lsn; if (!state->record) return InvalidXLogRecPtr; diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 188f6d6f85..4883fcb512 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *readBuf) { XLogPageReadPrivate *private = - (XLogPageReadPrivate *) xlogreader->private_data; + (XLogPageReadPrivate *) xlogreader->private_data; int emode = private->emode; uint32 targetPageOff; XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY; diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c index 5baea7535b..45be21131c 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, * * There's no guarantee that this will actually * happen, though: the torn write could take an - * arbitrarily long time to complete. Retrying multiple - * times wouldn't fix this problem, either, though - * it would reduce the chances of it happening in - * practice. The only real fix here seems to be to + * arbitrarily long time to complete. Retrying + * multiple times wouldn't fix this problem, either, + * though it would reduce the chances of it happening + * in practice. The only real fix here seems to be to * have some kind of interlock that allows us to wait * until we can be certain that no write to the block * is in progress. Since we don't have any such thing diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c index 73a3f4a970..1db80cde1b 100644 --- a/src/backend/backup/basebackup_copy.c +++ b/src/backend/backup/basebackup_copy.c @@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tupdesc = CreateTemplateTupleDesc(2); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0); + /* * int8 may seem like a surprising data type for this, but in theory int4 * would not be wide enough for this, as TimeLineID is unsigned. @@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual); /* Data row */ - values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); + values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr))); values[1] = Int64GetDatum(tli); do_tup_output(tstate, values, nulls); diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm index 656b57934e..84aaeb002a 100644 --- a/src/backend/catalog/Catalog.pm +++ b/src/backend/catalog/Catalog.pm @@ -28,25 +28,25 @@ sub ParseHeader # There are a few types which are given one name in the C source, but a # different name at the SQL level. These are enumerated here. my %RENAME_ATTTYPE = ( - 'int16' => 'int2', - 'int32' => 'int4', - 'int64' => 'int8', - 'Oid' => 'oid', - 'NameData' => 'name', + 'int16' => 'int2', + 'int32' => 'int4', + 'int64' => 'int8', + 'Oid' => 'oid', + 'NameData' => 'name', 'TransactionId' => 'xid', - 'XLogRecPtr' => 'pg_lsn'); + 'XLogRecPtr' => 'pg_lsn'); my %catalog; my $declaring_attributes = 0; - my $is_varlen = 0; - my $is_client_code = 0; + my $is_varlen = 0; + my $is_client_code = 0; - $catalog{columns} = []; - $catalog{toasting} = []; - $catalog{indexing} = []; - $catalog{other_oids} = []; + $catalog{columns} = []; + $catalog{toasting} = []; + $catalog{indexing} = []; + $catalog{other_oids} = []; $catalog{foreign_keys} = []; - $catalog{client_code} = []; + $catalog{client_code} = []; open(my $ifh, '<', $input_file) || die "$input_file: $!"; @@ -102,10 +102,10 @@ sub ParseHeader { push @{ $catalog{toasting} }, { - parent_table => $1, - toast_oid => $2, - toast_index_oid => $3, - toast_oid_macro => $4, + parent_table => $1, + toast_oid => $2, + toast_index_oid => $3, + toast_oid_macro => $4, toast_index_oid_macro => $5 }; } @@ -116,11 +116,11 @@ sub ParseHeader push @{ $catalog{indexing} }, { is_unique => $1 ? 1 : 0, - is_pkey => $2 ? 1 : 0, - index_name => $3, - index_oid => $4, + is_pkey => $2 ? 1 : 0, + index_name => $3, + index_oid => $4, index_oid_macro => $5, - index_decl => $6 + index_decl => $6 }; } elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/) @@ -128,7 +128,7 @@ sub ParseHeader push @{ $catalog{other_oids} }, { other_name => $1, - other_oid => $2 + other_oid => $2 }; } elsif ( @@ -138,16 +138,16 @@ sub ParseHeader push @{ $catalog{foreign_keys} }, { is_array => $1 ? 1 : 0, - is_opt => $2 ? 1 : 0, - fk_cols => $3, + is_opt => $2 ? 1 : 0, + fk_cols => $3, pk_table => $4, - pk_cols => $5 + pk_cols => $5 }; } elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/) { - $catalog{catname} = $1; - $catalog{relation_oid} = $2; + $catalog{catname} = $1; + $catalog{relation_oid} = $2; $catalog{relation_oid_macro} = $3; $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; @@ -155,15 +155,15 @@ sub ParseHeader /BKI_SHARED_RELATION/ ? ' shared_relation' : ''; if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/) { - $catalog{rowtype_oid} = $1; + $catalog{rowtype_oid} = $1; $catalog{rowtype_oid_clause} = " rowtype_oid $1"; - $catalog{rowtype_oid_macro} = $2; + $catalog{rowtype_oid_macro} = $2; } else { - $catalog{rowtype_oid} = ''; + $catalog{rowtype_oid} = ''; $catalog{rowtype_oid_clause} = ''; - $catalog{rowtype_oid_macro} = ''; + $catalog{rowtype_oid_macro} = ''; } $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0; $declaring_attributes = 1; @@ -209,8 +209,8 @@ sub ParseHeader $atttype = '_' . $atttype; } - $column{type} = $atttype; - $column{name} = $attname; + $column{type} = $atttype; + $column{name} = $attname; $column{is_varlen} = 1 if $is_varlen; foreach my $attopt (@attopts) @@ -243,14 +243,14 @@ sub ParseHeader # BKI_LOOKUP implicitly makes an FK reference push @{ $catalog{foreign_keys} }, { - is_array => - ($atttype eq 'oidvector' || $atttype eq '_oid') + is_array => ( + $atttype eq 'oidvector' || $atttype eq '_oid') ? 1 : 0, - is_opt => $column{lookup_opt}, - fk_cols => $attname, + is_opt => $column{lookup_opt}, + fk_cols => $attname, pk_table => $column{lookup}, - pk_cols => 'oid' + pk_cols => 'oid' }; } else @@ -285,7 +285,7 @@ sub ParseData $input_file =~ /(\w+)\.dat$/ or die "Input file $input_file needs to be a .dat file.\n"; my $catname = $1; - my $data = []; + my $data = []; if ($preserve_formatting) { @@ -433,7 +433,7 @@ sub AddDefaultValues sub GenerateArrayTypes { my $pgtype_schema = shift; - my $types = shift; + my $types = shift; my @array_types; foreach my $elem_type (@$types) @@ -444,9 +444,9 @@ sub GenerateArrayTypes my %array_type; # Set up metadata fields for array type. - $array_type{oid} = $elem_type->{array_type_oid}; + $array_type{oid} = $elem_type->{array_type_oid}; $array_type{autogenerated} = 1; - $array_type{line_number} = $elem_type->{line_number}; + $array_type{line_number} = $elem_type->{line_number}; # Set up column values derived from the element type. $array_type{typname} = '_' . $elem_type->{typname}; @@ -499,8 +499,8 @@ sub GenerateArrayTypes sub RenameTempFile { my $final_name = shift; - my $extension = shift; - my $temp_name = $final_name . $extension; + my $extension = shift; + my $temp_name = $final_name . $extension; if (-f $final_name && compare($temp_name, $final_name) == 0) diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 45cdcd3dc6..bc2ad773c9 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask, result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)); /* - * Check if ACL_MAINTAIN is being checked and, if so, and not already set as - * part of the result, then check if the user is a member of the + * Check if ACL_MAINTAIN is being checked and, if so, and not already set + * as part of the result, then check if the user is a member of the * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH * MATERIALIZED VIEW, and REINDEX on all relations. */ diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 2c5bfe23a1..4a7205472c 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -29,12 +29,12 @@ my $include_path; my $num_errors = 0; GetOptions( - 'output:s' => \$output_path, - 'set-version:s' => \$major_version, + 'output:s' => \$output_path, + 'set-version:s' => \$major_version, 'include-path:s' => \$include_path) || usage(); # Sanity check arguments. -die "No input files.\n" unless @ARGV; +die "No input files.\n" unless @ARGV; die "--set-version must be specified.\n" unless $major_version; die "Invalid version string: $major_version\n" unless $major_version =~ /^\d+$/; @@ -67,7 +67,7 @@ foreach my $header (@ARGV) my $catalog = Catalog::ParseHeader($header); my $catname = $catalog->{catname}; - my $schema = $catalog->{columns}; + my $schema = $catalog->{columns}; if (defined $catname) { @@ -100,9 +100,9 @@ foreach my $header (@ARGV) if (defined $row->{descr}) { my %descr = ( - objoid => $row->{oid}, - classoid => $catalog->{relation_oid}, - objsubid => 0, + objoid => $row->{oid}, + classoid => $catalog->{relation_oid}, + objsubid => 0, description => $row->{descr}); if ($catalog->{shared_relation}) @@ -364,7 +364,7 @@ open(my $ef, '<', $encfile) || die "$encfile: $!"; # We're parsing an enum, so start with 0 and increment # every time we find an enum member. -my $encid = 0; +my $encid = 0; my $collect_encodings = 0; while (<$ef>) { @@ -387,27 +387,27 @@ close $ef; # Map lookup name to the corresponding hash table. my %lookup_kind = ( - pg_am => \%amoids, - pg_authid => \%authidoids, - pg_class => \%classoids, - pg_collation => \%collationoids, - pg_language => \%langoids, - pg_namespace => \%namespaceoids, - pg_opclass => \%opcoids, - pg_operator => \%operoids, - pg_opfamily => \%opfoids, - pg_proc => \%procoids, - pg_tablespace => \%tablespaceoids, - pg_ts_config => \%tsconfigoids, - pg_ts_dict => \%tsdictoids, - pg_ts_parser => \%tsparseroids, + pg_am => \%amoids, + pg_authid => \%authidoids, + pg_class => \%classoids, + pg_collation => \%collationoids, + pg_language => \%langoids, + pg_namespace => \%namespaceoids, + pg_opclass => \%opcoids, + pg_operator => \%operoids, + pg_opfamily => \%opfoids, + pg_proc => \%procoids, + pg_tablespace => \%tablespaceoids, + pg_ts_config => \%tsconfigoids, + pg_ts_dict => \%tsdictoids, + pg_ts_parser => \%tsparseroids, pg_ts_template => \%tstemplateoids, - pg_type => \%typeoids, - encoding => \%encids); + pg_type => \%typeoids, + encoding => \%encids); # Open temp files -my $tmpext = ".tmp$$"; +my $tmpext = ".tmp$$"; my $bkifile = $output_path . 'postgres.bki'; open my $bki, '>', $bkifile . $tmpext or die "can't open $bkifile$tmpext: $!"; @@ -600,7 +600,7 @@ EOM # each element of the array as per the lookup rule. if ($column->{lookup}) { - my $lookup = $lookup_kind{ $column->{lookup} }; + my $lookup = $lookup_kind{ $column->{lookup} }; my $lookup_opt = $column->{lookup_opt}; my @lookupnames; my @lookupoids; @@ -790,7 +790,7 @@ foreach my $catname (@catnames) printf $fk_info "\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n", - $catname, $catalog->{relation_oid}, + $catname, $catalog->{relation_oid}, $pktabname, $catalogs{$pktabname}->{relation_oid}, $fkinfo->{fk_cols}, $fkinfo->{pk_cols}, @@ -809,9 +809,9 @@ close $fk_info; close $constraints; # Finally, rename the completed files into place. -Catalog::RenameTempFile($bkifile, $tmpext); -Catalog::RenameTempFile($schemafile, $tmpext); -Catalog::RenameTempFile($fk_info_file, $tmpext); +Catalog::RenameTempFile($bkifile, $tmpext); +Catalog::RenameTempFile($schemafile, $tmpext); +Catalog::RenameTempFile($fk_info_file, $tmpext); Catalog::RenameTempFile($constraints_file, $tmpext); exit($num_errors != 0 ? 1 : 0); @@ -845,13 +845,13 @@ sub gen_pg_attribute push @tables_needing_macros, $table_name; # Generate entries for user attributes. - my $attnum = 0; + my $attnum = 0; my $priorfixedwidth = 1; foreach my $attr (@{ $table->{columns} }) { $attnum++; my %row; - $row{attnum} = $attnum; + $row{attnum} = $attnum; $row{attrelid} = $table->{relation_oid}; morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth); @@ -877,18 +877,18 @@ sub gen_pg_attribute { $attnum = 0; my @SYS_ATTRS = ( - { name => 'ctid', type => 'tid' }, - { name => 'xmin', type => 'xid' }, - { name => 'cmin', type => 'cid' }, - { name => 'xmax', type => 'xid' }, - { name => 'cmax', type => 'cid' }, + { name => 'ctid', type => 'tid' }, + { name => 'xmin', type => 'xid' }, + { name => 'cmin', type => 'cid' }, + { name => 'xmax', type => 'xid' }, + { name => 'cmax', type => 'cid' }, { name => 'tableoid', type => 'oid' }); foreach my $attr (@SYS_ATTRS) { $attnum--; my %row; - $row{attnum} = $attnum; - $row{attrelid} = $table->{relation_oid}; + $row{attnum} = $attnum; + $row{attrelid} = $table->{relation_oid}; $row{attstattarget} = '0'; morph_row_for_pgattr(\%row, $schema, $attr, 1); @@ -916,10 +916,10 @@ sub morph_row_for_pgattr # Copy the type data from pg_type, and add some type-dependent items my $type = $types{$atttype}; - $row->{atttypid} = $type->{oid}; - $row->{attlen} = $type->{typlen}; - $row->{attbyval} = $type->{typbyval}; - $row->{attalign} = $type->{typalign}; + $row->{atttypid} = $type->{oid}; + $row->{attlen} = $type->{typlen}; + $row->{attbyval} = $type->{typbyval}; + $row->{attalign} = $type->{typalign}; $row->{attstorage} = $type->{typstorage}; # set attndims if it's an array type @@ -946,7 +946,7 @@ sub morph_row_for_pgattr # At this point the width of type name is still symbolic, # so we need a special test. $row->{attnotnull} = - $row->{attlen} eq 'NAMEDATALEN' ? 't' + $row->{attlen} eq 'NAMEDATALEN' ? 't' : $row->{attlen} > 0 ? 't' : 'f'; } @@ -962,15 +962,15 @@ sub morph_row_for_pgattr # Write an entry to postgres.bki. sub print_bki_insert { - my $row = shift; + my $row = shift; my $schema = shift; my @bki_values; foreach my $column (@$schema) { - my $attname = $column->{name}; - my $atttype = $column->{type}; + my $attname = $column->{name}; + my $atttype = $column->{type}; my $bki_value = $row->{$attname}; # Fold backslash-zero to empty string if it's the entire string, @@ -1002,7 +1002,7 @@ sub print_bki_insert # quite identical, to the corresponding values in postgres.bki. sub morph_row_for_schemapg { - my $row = shift; + my $row = shift; my $pgattr_schema = shift; foreach my $column (@$pgattr_schema) @@ -1027,7 +1027,7 @@ sub morph_row_for_schemapg # don't change. elsif ($atttype eq 'bool') { - $row->{$attname} = 'true' if $row->{$attname} eq 't'; + $row->{$attname} = 'true' if $row->{$attname} eq 't'; $row->{$attname} = 'false' if $row->{$attname} eq 'f'; } @@ -1089,7 +1089,7 @@ sub form_pg_type_symbol # Skip for rowtypes of bootstrap catalogs, since they have their # own naming convention defined elsewhere. return - if $typename eq 'pg_type' + if $typename eq 'pg_type' or $typename eq 'pg_proc' or $typename eq 'pg_attribute' or $typename eq 'pg_class'; diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index feddff654e..522da0ac85 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple, #endif /* USE_ASSERT_CHECKING */ /* - * Skip insertions into non-summarizing indexes if we only need - * to update summarizing indexes. + * Skip insertions into non-summarizing indexes if we only need to + * update summarizing indexes. */ if (onlySummarized && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 73ddb67882..69ab1b8e4b 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -3842,7 +3842,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -3870,7 +3870,7 @@ recomputeNamespacePath(void) if (OidIsValid(namespaceId) && !list_member_oid(oidlist, namespaceId) && object_aclcheck(NamespaceRelationId, namespaceId, roleid, - ACL_USAGE) == ACLCHECK_OK && + ACL_USAGE) == ACLCHECK_OK && InvokeNamespaceSearchHook(namespaceId, false)) oidlist = lappend_oid(oidlist, namespaceId); } @@ -4006,7 +4006,7 @@ InitTempTableNamespace(void) * temp table creation request is made by someone with appropriate rights. */ if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE_TEMP) != ACLCHECK_OK) + ACL_CREATE_TEMP) != ACLCHECK_OK) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create temporary tables in database \"%s\"", diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 792b0ef414..95918a77a1 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId, /* not in catalogs, different from operator, so make shell */ aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(otherNamespace)); diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 64d326f073..91c7f3426f 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior) /* FALLTHROUGH */ case SHARED_DEPENDENCY_OWNER: + /* * Save it for deletion below, if it's a local object or a * role grant. Other shared objects, such as databases, diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 10f28f94bc..e95dc31bde 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) if (OidIsValid(namespaceId)) { aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index c91fe66d9b..2969a2bb21 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e */ if (!IsBinaryUpgrade) { - char *langtag = icu_language_tag(colliculocale, - icu_validation_level); + char *langtag = icu_language_tag(colliculocale, + icu_validation_level); if (langtag && strcmp(colliculocale, langtag) != 0) { @@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt) Datum pg_collation_actual_version(PG_FUNCTION_ARGS) { - Oid collid = PG_GETARG_OID(0); - char provider; - char *locale; - char *version; - Datum datum; + Oid collid = PG_GETARG_OID(0); + char provider; + char *locale; + char *version; + Datum datum; if (collid == DEFAULT_COLLATION_OID) { /* retrieve from pg_database */ HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId)); + if (!HeapTupleIsValid(dbtup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS) { /* retrieve from pg_collation */ - HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid)); + if (!HeapTupleIsValid(colltp)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid, Oid collid; /* - * Some systems have locale names that don't consist entirely of - * ASCII letters (such as "bokmål" or "français"). - * This is pretty silly, since we need the locale itself to - * interpret the non-ASCII characters. We can't do much with - * those, so we filter them out. + * Some systems have locale names that don't consist entirely of ASCII + * letters (such as "bokmål" or "français"). This is pretty + * silly, since we need the locale itself to interpret the non-ASCII + * characters. We can't do much with those, so we filter them out. */ if (!pg_is_ascii(locale)) { @@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid, return -1; } if (enc == PG_SQL_ASCII) - return -1; /* C/POSIX are already in the catalog */ + return -1; /* C/POSIX are already in the catalog */ /* count valid locales found in operating system */ (*nvalidp)++; /* - * Create a collation named the same as the locale, but quietly - * doing nothing if it already exists. This is the behavior we - * need even at initdb time, because some versions of "locale -a" - * can report the same locale name more than once. And it's - * convenient for later import runs, too, since you just about - * always want to add on new locales without a lot of chatter - * about existing ones. + * Create a collation named the same as the locale, but quietly doing + * nothing if it already exists. This is the behavior we need even at + * initdb time, because some versions of "locale -a" can report the same + * locale name more than once. And it's convenient for later import runs, + * too, since you just about always want to add on new locales without a + * lot of chatter about existing ones. */ collid = CollationCreate(locale, nspid, GetUserId(), COLLPROVIDER_LIBC, true, enc, @@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS) param.nvalidp = &nvalid; /* - * Enumerate the locales that are either installed on or supported - * by the OS. + * Enumerate the locales that are either installed on or supported by + * the OS. */ if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL, (LPARAM) ¶m, NULL)) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 2e242eeff2..99d4080ea9 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath) List *rlocatorlist = NIL; LockRelId relid; Snapshot snapshot; - SMgrRelation smgr; + SMgrRelation smgr; BufferAccessStrategy bstrategy; /* Get pg_class relfilenumber. */ @@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) */ if (!IsBinaryUpgrade && dbiculocale != src_iculocale) { - char *langtag = icu_language_tag(dbiculocale, - icu_validation_level); + char *langtag = icu_language_tag(dbiculocale, + icu_validation_level); if (langtag && strcmp(dbiculocale, langtag) != 0) { @@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) dst_deftablespace = get_tablespace_oid(tablespacename, false); /* check permissions */ aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tablespacename); @@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) * If we're going to be reading data for the to-be-created database into * shared_buffers, take a lock on it. Nobody should know that this * database exists yet, but it's good to maintain the invariant that an - * AccessExclusiveLock on the database is sufficient to drop all - * of its buffers without worrying about more being read later. + * AccessExclusiveLock on the database is sufficient to drop all of its + * buffers without worrying about more being read later. * * Note that we need to do this before entering the * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback @@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname) * Permission checks */ aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, tblspcname); @@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record) if (info == XLOG_DBASE_CREATE_FILE_COPY) { xl_dbase_create_file_copy_rec *xlrec = - (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); + (xl_dbase_create_file_copy_rec *) XLogRecGetData(record); char *src_path; char *dst_path; char *parent_path; @@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record) else if (info == XLOG_DBASE_CREATE_WAL_LOG) { xl_dbase_create_wal_log_rec *xlrec = - (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); + (xl_dbase_create_wal_log_rec *) XLogRecGetData(record); char *dbpath; char *parent_path; diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index 82bda15889..469a6c2ee9 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_TABLE: case OBJECT_TABLESPACE: case OBJECT_VIEW: + /* * These are handled elsewhere, so if someone gets here the code * is probably wrong or should be revisited. diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 5334c503e1..15f9bddcdf 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors, { BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan; const char *indexname = - explain_get_index_name(bitmapindexscan->indexid); + explain_get_index_name(bitmapindexscan->indexid); if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfo(es->str, " on %s", @@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, for (n = 0; n < incrsortstate->shared_info->num_workers; n++) { IncrementalSortInfo *incsort_info = - &incrsortstate->shared_info->sinfo[n]; + &incrsortstate->shared_info->sinfo[n]; /* * If a worker hasn't processed any sort groups at all, then @@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es) { ListCell *cell; const char *label = - (list_length(css->custom_ps) != 1 ? "children" : "child"); + (list_length(css->custom_ps) != 1 ? "children" : "child"); foreach(cell, css->custom_ps) ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 71caa3b9f3..49c7864c7c 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid, namespaceId = QualifiedNameGetCreationNamespace(returnType->names, &typname); aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic) AclResult aclresult; aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index e6ee99e51f..a5168c9f09 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -748,7 +748,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); @@ -780,7 +780,7 @@ DefineIndex(Oid relationId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params.tablespaceOid)); @@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, /* * The table can be reindexed if the user has been granted MAINTAIN on * the table or one of its partition ancestors or the user is a - * superuser, the table owner, or the database/schema owner (but in the - * latter case, only if it's not a shared relation). pg_class_aclcheck - * includes the superuser case, and depending on objectKind we already - * know that the user has permission to run REINDEX on this database or - * schema per the permission checks at the beginning of this routine. + * superuser, the table owner, or the database/schema owner (but in + * the latter case, only if it's not a shared relation). + * pg_class_aclcheck includes the superuser case, and depending on + * objectKind we already know that the user has permission to run + * REINDEX on this database or schema per the permission checks at the + * beginning of this routine. */ if (classtuple->relisshared && pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK && @@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(params->tablespaceOid)); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index b6a71154a8..6eb3dc6bab 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -400,7 +400,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) * no special case for them. */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index e8b288d01c..1c88c2bccb 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); /* - * We don't want to allow unprivileged users to be able to trigger attempts - * to access arbitrary network destinations, so require the user to have - * been specifically authorized to create subscriptions. + * We don't want to allow unprivileged users to be able to trigger + * attempts to access arbitrary network destinations, so require the user + * to have been specifically authorized to create subscriptions. */ if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION)) ereport(ERROR, @@ -631,10 +631,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt, * exempt a subscription from this requirement. */ if (!opts.passwordrequired && !superuser_arg(owner)) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* * If built with appropriate switch, whine when regression-testing @@ -1113,8 +1113,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, if (!sub->passwordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Lock the subscription so nobody else can do anything with it. */ LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); @@ -1827,8 +1827,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) if (!form->subpasswordrequired && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("password_required=false is superuser-only"), - errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); + errmsg("password_required=false is superuser-only"), + errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser."))); /* Must be able to become new owner */ check_can_set_role(GetUserId(), newOwnerId); @@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) * current owner must have CREATE on database * * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some - * other object types behave differently (e.g. you can't give a table to - * a user who lacks CREATE privileges on a schema). + * other object types behave differently (e.g. you can't give a table to a + * user who lacks CREATE privileges on a schema). */ aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), ACL_CREATE); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index c7a8a689b7..4d49d70c33 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); @@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels, resultRelInfo = resultRelInfos; foreach(cell, rels) { - UserContext ucxt; + UserContext ucxt; if (run_as_table_owner) SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner, @@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, if (CompressionMethodIsValid(attribute->attcompression)) { const char *compression = - GetCompressionMethodName(attribute->attcompression); + GetCompressionMethodName(attribute->attcompression); if (def->compression == NULL) def->compression = pstrdup(compression); @@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId, - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceOid)); @@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(view_query, true); + view_query_is_auto_updatable(view_query, true); if (view_updatable_error) ereport(ERROR, @@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) AclResult aclresult; aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(new_tablespaceoid)); @@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, if (IsA(stmt, RenameStmt)) { aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace, - GetUserId(), ACL_CREATE); + GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(classform->relnamespace)); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 3dfbf6a917..13b0dee146 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) /* Check permissions, similarly complaining only if interactive */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) { if (source >= PGC_S_INTERACTIVE) @@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void) /* Check permissions similarly */ aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) continue; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 3440dbc440..216482095d 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(), - ACL_CREATE); + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(domainNamespace)); @@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) /* New owner must have CREATE privilege on namespace */ aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace, - newOwnerId, - ACL_CREATE); + newOwnerId, + ACL_CREATE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typTup->typnamespace)); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 707114bdd0..d63d3c58ca 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -86,7 +86,7 @@ typedef struct int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256; char *createrole_self_grant = ""; bool createrole_self_grant_enabled = false; -GrantRoleOptions createrole_self_grant_options; +GrantRoleOptions createrole_self_grant_options; /* Hook to check passwords in CreateRole() and AlterRole() */ check_password_hook_type check_password_hook = NULL; @@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) DefElem *dadminmembers = NULL; DefElem *dvalidUntil = NULL; DefElem *dbypassRLS = NULL; - GrantRoleOptions popt; + GrantRoleOptions popt; /* The defaults can vary depending on the original statement type */ switch (stmt->stmt_type) @@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * * The grantor of record for this implicit grant is the bootstrap * superuser, which means that the CREATEROLE user cannot revoke the - * grant. They can however grant the created role back to themselves - * with different options, since they enjoy ADMIN OPTION on it. + * grant. They can however grant the created role back to themselves with + * different options, since they enjoy ADMIN OPTION on it. */ if (!superuser()) { @@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) BOOTSTRAP_SUPERUSERID, &poptself); /* - * We must make the implicit grant visible to the code below, else - * the additional grants will fail. + * We must make the implicit grant visible to the code below, else the + * additional grants will fail. */ CommandCounterIncrement(); @@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) * Add the specified members to this new role. adminmembers get the admin * option, rolemembers don't. * - * NB: No permissions check is required here. If you have enough rights - * to create a role, you can add any members you like. + * NB: No permissions check is required here. If you have enough rights to + * create a role, you can add any members you like. */ AddRoleMems(currentUserId, stmt->role, roleid, rolemembers, roleSpecsToIds(rolemembers), @@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) DefElem *dbypassRLS = NULL; Oid roleid; Oid currentUserId = GetUserId(); - GrantRoleOptions popt; + GrantRoleOptions popt; check_rolespec_name(stmt->role, _("Cannot alter reserved roles.")); @@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) */ if (dissuper) { - bool should_be_super = boolVal(dissuper->arg); + bool should_be_super = boolVal(dissuper->arg); if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID) ereport(ERROR, @@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt) shdepLockAndCheckObject(AuthIdRelationId, roleid); /* - * To mess with a superuser you gotta be superuser; otherwise you - * need CREATEROLE plus admin option on the target role; unless you're - * just trying to change your own settings + * To mess with a superuser you gotta be superuser; otherwise you need + * CREATEROLE plus admin option on the target role; unless you're just + * trying to change your own settings */ if (roleform->rolsuper) { @@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt) else { if ((!have_createrole_privilege() || - !is_admin_of_role(GetUserId(), roleid)) + !is_admin_of_role(GetUserId(), roleid)) && roleid != GetUserId()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), @@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) Oid grantor; List *grantee_ids; ListCell *item; - GrantRoleOptions popt; + GrantRoleOptions popt; Oid currentUserId = GetUserId(); /* Parse options list. */ InitGrantRoleOptions(&popt); foreach(item, stmt->opt) { - DefElem *opt = (DefElem *) lfirst(item); + DefElem *opt = (DefElem *) lfirst(item); char *optval = defGetString(opt); if (strcmp(opt->defname, "admin") == 0) @@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt) /* * Step through all of the granted roles and add, update, or remove * entries in pg_auth_members as appropriate. If stmt->is_grant is true, - * we are adding new grants or, if they already exist, updating options - * on those grants. If stmt->is_grant is false, we are revoking grants or + * we are adding new grants or, if they already exist, updating options on + * those grants. If stmt->is_grant is false, we are revoking grants or * removing options from them. */ foreach(item, stmt->granted_roles) @@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, ObjectIdGetDatum(grantorId)); /* - * If we found a tuple, update it with new option values, unless - * there are no changes, in which case issue a WARNING. + * If we found a tuple, update it with new option values, unless there + * are no changes, in which case issue a WARNING. * * If we didn't find a tuple, just insert one. */ @@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid, popt->inherit; else { - HeapTuple mrtup; - Form_pg_authid mrform; + HeapTuple mrtup; + Form_pg_authid mrform; mrtup = SearchSysCache1(AUTHOID, memberid); if (!HeapTupleIsValid(mrtup)) @@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, /* * If popt.specified == 0, we're revoking the grant entirely; otherwise, * we expect just one bit to be set, and we're revoking the corresponding - * option. As of this writing, there's no syntax that would allow for - * an attempt to revoke multiple options at once, and the logic below + * option. As of this writing, there's no syntax that would allow for an + * attempt to revoke multiple options at once, and the logic below * wouldn't work properly if such syntax were added, so assert that our * caller isn't trying to do that. */ @@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions, } else { - bool revoke_admin_option_only; + bool revoke_admin_option_only; /* * Revoking the grant entirely, or ADMIN option on a grant, @@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source) void assign_createrole_self_grant(const char *newval, void *extra) { - unsigned options = * (unsigned *) extra; + unsigned options = *(unsigned *) extra; createrole_self_grant_enabled = (options != 0); createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index ff98c773f5..9bd77546b9 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(viewParse, true); + view_query_is_auto_updatable(viewParse, true); if (view_updatable_error) ereport(ERROR, diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index bf257a41c8..e6e616865c 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state, /* Check permission to call function */ aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(cmpfuncid)); @@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state, if (OidIsValid(opexpr->hashfuncid)) { aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid, - GetUserId(), - ACL_EXECUTE); + GetUserId(), + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(opexpr->hashfuncid)); @@ -3613,7 +3613,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase, * column sorted on. */ TargetEntry *source_tle = - (TargetEntry *) linitial(pertrans->aggref->args); + (TargetEntry *) linitial(pertrans->aggref->args); Assert(list_length(pertrans->aggref->args) == 1); diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 7cc443ec52..7a4d7a4eee 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1659,7 +1659,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerGroup pergroup_allaggs = - aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; + aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; if (pergroup_allaggs == NULL) EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull); @@ -1684,7 +1684,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1712,7 +1712,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1730,7 +1730,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1747,7 +1747,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1768,7 +1768,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1785,7 +1785,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; AggStatePerGroup pergroup = - &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index da28e5e40c..1d82b64b89 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, continue; /* - * Skip processing of non-summarizing indexes if we only - * update summarizing indexes + * Skip processing of non-summarizing indexes if we only update + * summarizing indexes */ if (onlySummarizing && !indexInfo->ii_Summarizing) continue; diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index d09a7758dc..73bf9152a4 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (first_time) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; @@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (tupdesc == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); /* * This is the first non-NULL result from the @@ -395,7 +395,7 @@ no_function_result: if (rsinfo.setResult == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index ad81a675aa..468db94fe5 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3690,7 +3690,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* Check permission to call aggregate function */ aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_AGGREGATE, get_func_name(aggref->aggfnoid)); @@ -3757,7 +3757,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); @@ -3766,7 +3766,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(serialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(serialfn_oid)); @@ -3775,7 +3775,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) if (OidIsValid(deserialfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(deserialfn_oid)); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 301e4acba3..8b5c35b82b 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -1339,7 +1339,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable) else { size_t tuple_size = - MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); /* It belongs in a later batch. */ hashtable->batches[batchno].estimated_size += tuple_size; @@ -1381,7 +1381,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable) for (i = 1; i < old_nbatch; ++i) { ParallelHashJoinBatch *shared = - NthParallelHashJoinBatch(old_batches, i); + NthParallelHashJoinBatch(old_batches, i); old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared), ParallelWorkerNumber + 1, @@ -3337,7 +3337,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable) while (DsaPointerIsValid(batch->chunks)) { HashMemoryChunk chunk = - dsa_get_address(hashtable->area, batch->chunks); + dsa_get_address(hashtable->area, batch->chunks); dsa_pointer next = chunk->next.shared; dsa_free(hashtable->area, batch->chunks); diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index e40436db38..980746128b 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -1216,7 +1216,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate) { SharedTuplestoreAccessor *inner_tuples; Barrier *batch_barrier = - &hashtable->batches[batchno].shared->batch_barrier; + &hashtable->batches[batchno].shared->batch_barrier; switch (BarrierAttach(batch_barrier)) { @@ -1330,22 +1330,22 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, BufFile *file = *fileptr; /* - * The batch file is lazily created. If this is the first tuple - * written to this batch, the batch file is created and its buffer is - * allocated in the spillCxt context, NOT in the batchCxt. + * The batch file is lazily created. If this is the first tuple written to + * this batch, the batch file is created and its buffer is allocated in + * the spillCxt context, NOT in the batchCxt. * - * During the build phase, buffered files are created for inner - * batches. Each batch's buffered file is closed (and its buffer freed) - * after the batch is loaded into memory during the outer side scan. - * Therefore, it is necessary to allocate the batch file buffer in a - * memory context which outlives the batch itself. + * During the build phase, buffered files are created for inner batches. + * Each batch's buffered file is closed (and its buffer freed) after the + * batch is loaded into memory during the outer side scan. Therefore, it + * is necessary to allocate the batch file buffer in a memory context + * which outlives the batch itself. * - * Also, we use spillCxt instead of hashCxt for a better accounting of - * the spilling memory consumption. + * Also, we use spillCxt instead of hashCxt for a better accounting of the + * spilling memory consumption. */ if (file == NULL) { - MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt); + MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt); file = BufFileCreateTemp(false); *fileptr = file; @@ -1622,7 +1622,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt) { int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pcxt->toc, plan_node_id, false); + shm_toc_lookup(pcxt->toc, plan_node_id, false); /* * It would be possible to reuse the shared hash table in single-batch @@ -1657,7 +1657,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state, HashState *hashNode; int plan_node_id = state->js.ps.plan->plan_node_id; ParallelHashJoinState *pstate = - shm_toc_lookup(pwcxt->toc, plan_node_id, false); + shm_toc_lookup(pwcxt->toc, plan_node_id, false); /* Attach to the space for shared temporary files. */ SharedFileSetAttach(&pstate->fileset, pwcxt->seg); diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 26ceafec5f..34257ce34b 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) if (incrsortstate->ss.ps.instrument != NULL) { IncrementalSortGroupInfo *fullsortGroupInfo = - &incrsortstate->incsort_info.fullsortGroupInfo; + &incrsortstate->incsort_info.fullsortGroupInfo; IncrementalSortGroupInfo *prefixsortGroupInfo = - &incrsortstate->incsort_info.prefixsortGroupInfo; + &incrsortstate->incsort_info.prefixsortGroupInfo; fullsortGroupInfo->groupCount = 0; fullsortGroupInfo->maxDiskSpaceUsed = 0; diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 7f5002527f..2a5fec8d01 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -111,7 +111,7 @@ typedef struct UpdateContext { bool updated; /* did UPDATE actually occur? */ bool crossPartUpdate; /* was it a cross-partition update? */ - TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ + TU_UpdateIndexes updateIndexes; /* Which index updates are required? */ /* * Lock mode to acquire on the latest tuple version before performing @@ -881,7 +881,7 @@ ExecInsert(ModifyTableContext *context, { TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc plan_tdesc = - CreateTupleDescCopy(planSlot->tts_tupleDescriptor); + CreateTupleDescCopy(planSlot->tts_tupleDescriptor); resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = MakeSingleTupleTableSlot(tdesc, slot->tts_ops); diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index 0c6c912778..791cbd2372 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) int colno; Datum value; int ordinalitycol = - ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; + ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; /* * Install the document as a possibly-toasted Datum into the tablefunc diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 4f0618f27a..310ac23e3a 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2582,7 +2582,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) /* Check permission to call window function */ aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(), - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(wfunc->winfnoid)); @@ -2821,7 +2821,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (!OidIsValid(aggform->aggminvtransfn)) use_ma_code = false; /* sine qua non */ else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY && - aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) + aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) use_ma_code = true; /* decision forced by safety */ else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) use_ma_code = false; /* non-moving frame head */ @@ -2871,7 +2871,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, ReleaseSysCache(procTuple); aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(transfn_oid)); @@ -2880,7 +2880,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(invtransfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(invtransfn_oid)); @@ -2890,7 +2890,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(finalfn_oid)) { aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner, - ACL_EXECUTE); + ACL_EXECUTE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 256632c985..33975687b3 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_newtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgnewtable; @@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_oldtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); + palloc(sizeof(EphemeralNamedRelationData)); int rc; enr->md.name = tdata->tg_trigger->tgoldtable; diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index a8b73a9cf1..04ae3052a8 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -799,9 +799,9 @@ llvm_session_initialize(void) LLVMInitializeNativeAsmParser(); /* - * When targeting an LLVM version with opaque pointers enabled by - * default, turn them off for the context we build our code in. We don't - * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is + * When targeting an LLVM version with opaque pointers enabled by default, + * turn them off for the context we build our code in. We don't need to + * do so for other contexts (e.g. llvm_ts_context). Once the IR is * generated, it carries the necessary information. */ #if LLVM_VERSION_MAJOR > 14 @@ -1118,7 +1118,7 @@ llvm_resolve_symbol(const char *symname, void *ctx) static LLVMErrorRef llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx, - LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind, + LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind, LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags, LLVMOrcCLookupSet LookupSet, size_t LookupSetSize) { @@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple) { LLVMOrcObjectLayerRef objlayer = - LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); + LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES); #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER if (jit_debugging_support) diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c index 6b15588da6..15d4a7b431 100644 --- a/src/backend/jit/llvm/llvmjit_deform.c +++ b/src/backend/jit/llvm/llvmjit_deform.c @@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, { LLVMValueRef v_tmp_loaddata; LLVMTypeRef vartypep = - LLVMPointerType(LLVMIntType(att->attlen * 8), 0); + LLVMPointerType(LLVMIntType(att->attlen * 8), 0); v_tmp_loaddata = LLVMBuildPointerCast(b, v_attdatap, vartypep, ""); diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index 774db57ae2..00d7b8110b 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state) else { LLVMValueRef v_value = - LLVMBuildLoad(b, v_resvaluep, ""); + LLVMBuildLoad(b, v_resvaluep, ""); v_value = LLVMBuildZExt(b, LLVMBuildICmp(b, LLVMIntEQ, @@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state) /* * pergroup = &aggstate->all_pergroups - * [op->d.agg_trans.setoff] - * [op->d.agg_trans.transno]; + * [op->d.agg_trans.setoff] [op->d.agg_trans.transno]; */ v_allpergroupsp = l_load_struct_gep(b, v_aggstatep, diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c index 7f52e1ee23..43d45810cd 100644 --- a/src/backend/libpq/be-secure-gssapi.c +++ b/src/backend/libpq/be-secure-gssapi.c @@ -527,8 +527,8 @@ secure_open_gssapi(Port *port) /* * Use the configured keytab, if there is one. As we now require MIT - * Kerberos, we might consider using the credential store extensions in the - * future instead of the environment variable. + * Kerberos, we might consider using the credential store extensions in + * the future instead of the environment variable. */ if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0') { diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index dc4153a2f2..05276ab95c 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -1104,8 +1104,8 @@ prepare_cert_name(char *name) if (namelen > MAXLEN) { /* - * Keep the end of the name, not the beginning, since the most specific - * field is likely to give users the most information. + * Keep the end of the name, not the beginning, since the most + * specific field is likely to give users the most information. */ truncated = name + namelen - MAXLEN; truncated[0] = truncated[1] = truncated[2] = '.'; @@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx) /* * Get the Subject and Issuer for logging, but don't let maliciously - * huge certs flood the logs, and don't reflect non-ASCII bytes into it - * either. + * huge certs flood the logs, and don't reflect non-ASCII bytes into + * it either. */ subject = X509_NAME_to_cstring(X509_get_subject_name(cert)); sub_prepared = prepare_cert_name(subject); diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index d786a01835..1ef113649f 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -2693,8 +2693,9 @@ load_hba(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(hbacxt); return false; @@ -3056,8 +3057,9 @@ load_ident(void) if (!ok) { /* - * File contained one or more errors, so bail out. MemoryContextDelete - * is enough to clean up everything, including regexes. + * File contained one or more errors, so bail out. + * MemoryContextDelete is enough to clean up everything, including + * regexes. */ MemoryContextDelete(ident_context); return false; diff --git a/src/backend/nodes/gen_node_support.pl b/src/backend/nodes/gen_node_support.pl index ecbcadb8bf..b89b491d35 100644 --- a/src/backend/nodes/gen_node_support.pl +++ b/src/backend/nodes/gen_node_support.pl @@ -106,7 +106,7 @@ my @nodetag_only_files = qw( # In HEAD, these variables should be left undef, since we don't promise # ABI stability during development. -my $last_nodetag = undef; +my $last_nodetag = undef; my $last_nodetag_no = undef; # output file names @@ -161,9 +161,9 @@ push @node_types, qw(List); # (Ideally we'd mark List as "special copy/equal" not "no copy/equal". # But until there's other use-cases for that, just hot-wire the tests # that would need to distinguish.) -push @no_copy, qw(List); -push @no_equal, qw(List); -push @no_query_jumble, qw(List); +push @no_copy, qw(List); +push @no_equal, qw(List); +push @no_query_jumble, qw(List); push @special_read_write, qw(List); # Nodes with custom copy/equal implementations are skipped from @@ -230,7 +230,7 @@ foreach my $infile (@ARGV) } $file_content .= $raw_file_content; - my $lineno = 0; + my $lineno = 0; my $prevline = ''; foreach my $line (split /\n/, $file_content) { @@ -247,7 +247,7 @@ foreach my $infile (@ARGV) if ($line =~ /;$/) { # found the end, re-attach any previous line(s) - $line = $prevline . $line; + $line = $prevline . $line; $prevline = ''; } elsif ($prevline eq '' @@ -272,7 +272,7 @@ foreach my $infile (@ARGV) if ($subline == 1) { $is_node_struct = 0; - $supertype = undef; + $supertype = undef; next if $line eq '{'; die "$infile:$lineno: expected opening brace\n"; } @@ -280,7 +280,7 @@ foreach my $infile (@ARGV) elsif ($subline == 2 && $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/) { - $node_attrs = $1; + $node_attrs = $1; $node_attrs_lineno = $lineno; # hack: don't count the line $subline--; @@ -296,8 +296,8 @@ foreach my $infile (@ARGV) } elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types) { - $is_node_struct = 1; - $supertype = $1; + $is_node_struct = 1; + $supertype = $1; $supertype_field = $2; next; } @@ -339,7 +339,7 @@ foreach my $infile (@ARGV) } elsif ($attr eq 'no_copy_equal') { - push @no_copy, $in_struct; + push @no_copy, $in_struct; push @no_equal, $in_struct; } elsif ($attr eq 'no_query_jumble') @@ -373,7 +373,7 @@ foreach my $infile (@ARGV) push @node_types, $in_struct; # field names, types, attributes - my @f = @my_fields; + my @f = @my_fields; my %ft = %my_field_types; my %fa = %my_field_attrs; @@ -405,7 +405,7 @@ foreach my $infile (@ARGV) unshift @f, @superfields; } # save in global info structure - $node_type_info{$in_struct}->{fields} = \@f; + $node_type_info{$in_struct}->{fields} = \@f; $node_type_info{$in_struct}->{field_types} = \%ft; $node_type_info{$in_struct}->{field_attrs} = \%fa; @@ -428,9 +428,9 @@ foreach my $infile (@ARGV) } # start new cycle - $in_struct = undef; - $node_attrs = ''; - @my_fields = (); + $in_struct = undef; + $node_attrs = ''; + @my_fields = (); %my_field_types = (); %my_field_attrs = (); } @@ -441,10 +441,10 @@ foreach my $infile (@ARGV) { if ($is_node_struct) { - my $type = $1; - my $name = $2; + my $type = $1; + my $name = $2; my $array_size = $3; - my $attrs = $4; + my $attrs = $4; # strip "const" $type =~ s/^const\s*//; @@ -499,9 +499,9 @@ foreach my $infile (@ARGV) { if ($is_node_struct) { - my $type = $1; - my $name = $2; - my $args = $3; + my $type = $1; + my $name = $2; + my $args = $3; my $attrs = $4; my @attrs; @@ -540,20 +540,20 @@ foreach my $infile (@ARGV) if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node') { $in_struct = $1; - $subline = 0; + $subline = 0; } # one node type typedef'ed directly from another elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types) { my $alias_of = $1; - my $n = $2; + my $n = $2; # copy everything over push @node_types, $n; - my @f = @{ $node_type_info{$alias_of}->{fields} }; + my @f = @{ $node_type_info{$alias_of}->{fields} }; my %ft = %{ $node_type_info{$alias_of}->{field_types} }; my %fa = %{ $node_type_info{$alias_of}->{field_attrs} }; - $node_type_info{$n}->{fields} = \@f; + $node_type_info{$n}->{fields} = \@f; $node_type_info{$n}->{field_types} = \%ft; $node_type_info{$n}->{field_attrs} = \%fa; } @@ -608,7 +608,7 @@ open my $nt, '>', "$output_path/nodetags.h$tmpext" printf $nt $header_comment, 'nodetags.h'; -my $tagno = 0; +my $tagno = 0; my $last_tag = undef; foreach my $n (@node_types, @extra_tags) { @@ -669,7 +669,7 @@ foreach my $n (@node_types) { next if elem $n, @abstract_types; next if elem $n, @nodetag_only; - my $struct_no_copy = (elem $n, @no_copy); + my $struct_no_copy = (elem $n, @no_copy); my $struct_no_equal = (elem $n, @no_equal); next if $struct_no_copy && $struct_no_equal; @@ -705,15 +705,15 @@ _equal${n}(const $n *a, const $n *b) # print instructions for each field foreach my $f (@{ $node_type_info{$n}->{fields} }) { - my $t = $node_type_info{$n}->{field_types}{$f}; - my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; - my $copy_ignore = $struct_no_copy; + my $t = $node_type_info{$n}->{field_types}{$f}; + my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; + my $copy_ignore = $struct_no_copy; my $equal_ignore = $struct_no_equal; # extract per-field attributes my $array_size_field; my $copy_as_field; - my $copy_as_scalar = 0; + my $copy_as_scalar = 0; my $equal_as_scalar = 0; foreach my $a (@a) { @@ -768,7 +768,7 @@ _equal${n}(const $n *a, const $n *b) # select instructions by field type if ($t eq 'char*') { - print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore; } elsif ($t eq 'Bitmapset*' || $t eq 'Relids') @@ -779,7 +779,7 @@ _equal${n}(const $n *a, const $n *b) } elsif ($t eq 'int' && $f =~ 'location$') { - print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore; } elsif (elem $t, @scalar_types or elem $t, @enum_types) @@ -828,7 +828,7 @@ _equal${n}(const $n *a, const $n *b) elsif ($t eq 'function pointer') { # we can copy and compare as a scalar - print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore; } # node type @@ -846,13 +846,13 @@ _equal${n}(const $n *a, const $n *b) and $1 ne 'List' and !$equal_ignore; - print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore; } # array (inline) elsif ($t =~ /^\w+\[\w+\]$/) { - print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore; } elsif ($t eq 'struct CustomPathMethods*' @@ -861,7 +861,7 @@ _equal${n}(const $n *a, const $n *b) # Fields of these types are required to be a pointer to a # static table of callback functions. So we don't copy # the table itself, just reference the original one. - print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; + print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore; print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore; } else @@ -1073,7 +1073,7 @@ _read${n}(void) { print $off "\tWRITE_FLOAT_FIELD($f.startup);\n"; print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n"; - print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read; + print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read; print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read; } elsif ($t eq 'Selectivity') @@ -1278,8 +1278,8 @@ _jumble${n}(JumbleState *jstate, Node *node) # print instructions for each field foreach my $f (@{ $node_type_info{$n}->{fields} }) { - my $t = $node_type_info{$n}->{field_types}{$f}; - my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; + my $t = $node_type_info{$n}->{field_types}{$f}; + my @a = @{ $node_type_info{$n}->{field_attrs}{$f} }; my $query_jumble_ignore = $struct_no_query_jumble; my $query_jumble_location = 0; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 0b271dae84..ef475d95a1 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path, { PathKey *key = (PathKey *) lfirst(l); EquivalenceMember *member = (EquivalenceMember *) - linitial(key->pk_eclass->ec_members); + linitial(key->pk_eclass->ec_members); /* * Check if the expression contains Var with "varno 0" so that we diff --git a/src/backend/optimizer/util/appendinfo.c b/src/backend/optimizer/util/appendinfo.c index c1b1557570..f456b3b0a4 100644 --- a/src/backend/optimizer/util/appendinfo.c +++ b/src/backend/optimizer/util/appendinfo.c @@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node, if (leaf_relid) { RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(context->root->row_identity_vars, var->varattno - 1); + list_nth(context->root->row_identity_vars, var->varattno - 1); if (bms_is_member(leaf_relid, ridinfo->rowidrels)) { diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 04ea04b5b6..32a407f54b 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -1158,7 +1158,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, { /* UPDATE/DELETE/MERGE row identity vars are always needed */ RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *) - list_nth(root->row_identity_vars, var->varattno - 1); + list_nth(root->row_identity_vars, var->varattno - 1); /* Update reltarget width estimate from RowIdentityVarInfo */ joinrel->reltarget->width += ridinfo->rowidwidth; diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl index ddfdf20d33..e9b6f40eaa 100644 --- a/src/backend/parser/check_keywords.pl +++ b/src/backend/parser/check_keywords.pl @@ -9,7 +9,7 @@ use strict; use warnings; -my $gram_filename = $ARGV[0]; +my $gram_filename = $ARGV[0]; my $kwlist_filename = $ARGV[1]; my $errors = 0; @@ -47,10 +47,10 @@ $, = ' '; # set output field separator $\ = "\n"; # set output record separator my %keyword_categories; -$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD'; -$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD'; +$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD'; +$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD'; $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD'; -$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD'; +$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD'; open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename"); @@ -183,7 +183,7 @@ kwlist_line: while (<$kwlist>) if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/) { my ($kwstring) = $1; - my ($kwname) = $2; + my ($kwname) = $2; my ($kwcat_id) = $3; my ($collabel) = $4; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 0b3632735b..346fd272b6 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3357,7 +3357,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format, if (format->format_type == JS_FORMAT_JSON) { JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ? - format->encoding : JS_ENC_UTF8; + format->encoding : JS_ENC_UTF8; if (targettype != BYTEAOID && format->encoding != JS_ENC_DEFAULT) diff --git a/src/backend/parser/parse_merge.c b/src/backend/parser/parse_merge.c index d8866373b8..91b1156d99 100644 --- a/src/backend/parser/parse_merge.c +++ b/src/backend/parser/parse_merge.c @@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt) /* * Set up the MERGE target table. The target table is added to the - * namespace below and to joinlist in transform_MERGE_to_join, so don't - * do it here. + * namespace below and to joinlist in transform_MERGE_to_join, so don't do + * it here. */ qry->resultRelation = setTargetTable(pstate, stmt->relation, stmt->relation->inh, diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index b1255e3b70..d67580fc77 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -993,7 +993,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) { aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TYPE, RelationGetRelationName(relation)); @@ -2355,7 +2355,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) * mentioned above. */ Datum attoptions = - get_attoptions(RelationGetRelid(index_rel), i + 1); + get_attoptions(RelationGetRelid(index_rel), i + 1); defopclass = GetDefaultOpClass(attform->atttypid, index_rel->rd_rel->relam); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index c685621416..7c5d9110fb 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map, /* * The default partitions have to be joined with each other, so merge * them. Note that each of the default partitions isn't merged yet - * (see, process_outer_partition()/process_inner_partition()), so - * they should be merged successfully. The merged partition will act - * as the default partition of the join relation. + * (see, process_outer_partition()/process_inner_partition()), so they + * should be merged successfully. The merged partition will act as + * the default partition of the join relation. */ Assert(outer_merged_index == -1); Assert(inner_merged_index == -1); @@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent, * datums list. */ PartitionRangeDatum *datum = - list_nth(spec->upperdatums, abs(cmpval) - 1); + list_nth(spec->upperdatums, abs(cmpval) - 1); /* * The new partition overlaps with the diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c index 509587636e..6f9c2765d6 100644 --- a/src/backend/postmaster/fork_process.c +++ b/src/backend/postmaster/fork_process.c @@ -58,8 +58,8 @@ fork_process(void) /* * We start postmaster children with signals blocked. This allows them to * install their own handlers before unblocking, to avoid races where they - * might run the postmaster's handler and miss an important control signal. - * With more analysis this could potentially be relaxed. + * might run the postmaster's handler and miss an important control + * signal. With more analysis this could potentially be relaxed. */ sigprocmask(SIG_SETMASK, &BlockSig, &save_mask); result = fork(); diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index 38c09b1123..9087ef95af 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -759,6 +759,7 @@ lexescape(struct vars *v) RETV(PLAIN, c); break; default: + /* * Throw an error for unrecognized ASCII alpha escape sequences, * which reserves them for future use if needed. diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 052505e46f..dc9c5c82d9 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password) if (must_use_password) { - bool uses_password = false; + bool uses_password = false; for (opt = opts; opt->keyword != NULL; ++opt) { diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index beef399b42..d91055a440 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_PARAMETER_CHANGE: { xl_parameter_change *xlrec = - (xl_parameter_change *) XLogRecGetData(buf->record); + (xl_parameter_change *) XLogRecGetData(buf->record); /* * If wal_level on the primary is reduced to less than @@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) * invalidated when this WAL record is replayed; and further, * slot creation fails when wal_level is not sufficient; but * all these operations are not synchronized, so a logical - * slot may creep in while the wal_level is being - * reduced. Hence this extra check. + * slot may creep in while the wal_level is being reduced. + * Hence this extra check. */ if (xlrec->wal_level < WAL_LEVEL_LOGICAL) { @@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, SnapBuild *builder = ctx->snapshot_builder; XLogRecPtr origin_lsn = parsed->origin_lsn; TimestampTz prepare_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); int i; TransactionId xid = parsed->twophase_xid; @@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, int i; XLogRecPtr origin_lsn = InvalidXLogRecPtr; TimestampTz abort_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + RepOriginId origin_id = XLogRecGetOrigin(buf->record); bool skip_xact; if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 7e1f677f7a..41243d0187 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin, MemoryContext old_context; /* - * On a standby, this check is also required while creating the - * slot. Check the comments in the function. + * On a standby, this check is also required while creating the slot. + * Check the comments in the function. */ CheckLogicalDecodingRequirements(); diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 2c04c8707d..b0255ffd25 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record) case XLOG_REPLORIGIN_SET: { xl_replorigin_set *xlrec = - (xl_replorigin_set *) XLogRecGetData(record); + (xl_replorigin_set *) XLogRecGetData(record); replorigin_advance(xlrec->node_id, xlrec->remote_lsn, record->EndRecPtr, diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index b85b890010..26d252bd87 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node); ReorderBufferChange *next_change = - dlist_container(ReorderBufferChange, node, next); + dlist_container(ReorderBufferChange, node, next); /* txn stays the same */ state->entries[off].lsn = next_change->lsn; @@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state) { /* successfully restored changes from disk */ ReorderBufferChange *next_change = - dlist_head_element(ReorderBufferChange, node, - &entry->txn->changes); + dlist_head_element(ReorderBufferChange, node, + &entry->txn->changes); elog(DEBUG2, "restored %u/%u changes from disk", (uint32) entry->txn->nentries_mem, @@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) dclist_delete_from(&rb->catchange_txns, &txn->catchange_node); /* now remove reference from buffer */ - hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); + hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found); Assert(found); /* remove entries spilled to disk */ @@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb) ReorderBufferTXN *txn; /* - * Bail out if logical_replication_mode is buffered and we haven't exceeded - * the memory limit. + * Bail out if logical_replication_mode is buffered and we haven't + * exceeded the memory limit. */ if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED && rb->size < logical_decoding_work_mem * 1024L) @@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, { char *data; Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; sz += inval_size; @@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) * After that we need to reuse the snapshot from the previous run. * * Unlike DecodeCommit which adds xids of all the subtransactions in - * snapshot's xip array via SnapBuildCommitTxn, we can't do that here - * but we do add them to subxip array instead via ReorderBufferCopySnap. - * This allows the catalog changes made in subtransactions decoded till - * now to be visible. + * snapshot's xip array via SnapBuildCommitTxn, we can't do that here but + * we do add them to subxip array instead via ReorderBufferCopySnap. This + * allows the catalog changes made in subtransactions decoded till now to + * be visible. */ if (txn->snapshot_now == NULL) { @@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, dlist_foreach_modify(cleanup_iter, &txn->changes) { ReorderBufferChange *cleanup = - dlist_container(ReorderBufferChange, node, cleanup_iter.cur); + dlist_container(ReorderBufferChange, node, cleanup_iter.cur); dlist_delete(&cleanup->node); ReorderBufferReturnChange(rb, cleanup, true); @@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_INVALIDATION: { Size inval_size = sizeof(SharedInvalidationMessage) * - change->data.inval.ninvalidations; + change->data.inval.ninvalidations; change->data.inval.invalidations = MemoryContextAlloc(rb->context, inval_size); @@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn) dlist_foreach_modify(it, &ent->chunks) { ReorderBufferChange *change = - dlist_container(ReorderBufferChange, node, it.cur); + dlist_container(ReorderBufferChange, node, it.cur); dlist_delete(&change->node); ReorderBufferReturnChange(rb, change, true); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 62542827e4..0786bb0ab7 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder) Assert(builder->building_full_snapshot); /* don't allow older snapshots */ - InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ + InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */ if (HaveRegisteredOrActiveSnapshot()) elog(ERROR, "cannot build an initial slot snapshot when snapshots exist"); Assert(!HistoricSnapshotActive()); @@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn */ /* - * xl_running_xacts record is older than what we can use, we might not have - * all necessary catalog rows anymore. + * xl_running_xacts record is older than what we can use, we might not + * have all necessary catalog rows anymore. */ if (TransactionIdIsNormal(builder->initial_xmin_horizon) && NormalTransactionIdPrecedes(running->oldestRunningXid, diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 0c71ae9ba7..c56d42dcd2 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * the lock. */ int nsyncworkers = - logicalrep_sync_worker_count(MyLogicalRepWorker->subid); + logicalrep_sync_worker_count(MyLogicalRepWorker->subid); /* Now safe to release the LWLock */ LWLockRelease(LogicalRepWorkerLock); diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 4b67098814..78926f8647 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2399,7 +2399,7 @@ apply_handle_insert(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData newtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -2547,7 +2547,7 @@ apply_handle_update(StringInfo s) { LogicalRepRelMapEntry *rel; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; LogicalRepTupleData oldtup; @@ -2732,7 +2732,7 @@ apply_handle_delete(StringInfo s) LogicalRepRelMapEntry *rel; LogicalRepTupleData oldtup; LogicalRepRelId relid; - UserContext ucxt; + UserContext ucxt; ApplyExecutionData *edata; EState *estate; TupleTableSlot *remoteslot; @@ -3079,8 +3079,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata, if (map) { TupleConversionMap *PartitionToRootMap = - convert_tuples_by_name(RelationGetDescr(partrel), - RelationGetDescr(parentrel)); + convert_tuples_by_name(RelationGetDescr(partrel), + RelationGetDescr(parentrel)); remoteslot = execute_attr_map_slot(PartitionToRootMap->attrMap, @@ -3414,7 +3414,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush, dlist_foreach_modify(iter, &lsn_mapping) { FlushPosition *pos = - dlist_container(FlushPosition, node, iter.cur); + dlist_container(FlushPosition, node, iter.cur); *write = pos->remote_end; @@ -4702,11 +4702,11 @@ ApplyWorkerMain(Datum main_arg) ereport(DEBUG1, (errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s", - MySubscription->name, - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : - MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : - "?"))); + MySubscription->name, + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" : + MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" : + "?"))); } else { @@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If we are processing this transaction using a parallel apply worker then - * either we send the changes to the parallel worker or if the worker is busy - * then serialize the changes to the file which will later be processed by - * the parallel worker. + * If we are processing this transaction using a parallel apply worker + * then either we send the changes to the parallel worker or if the worker + * is busy then serialize the changes to the file which will later be + * processed by the parallel worker. */ *winfo = pa_find_worker(xid); @@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo) } /* - * If there is no parallel worker involved to process this transaction then - * we either directly apply the change or serialize it to a file which will - * later be applied when the transaction finish message is processed. + * If there is no parallel worker involved to process this transaction + * then we either directly apply the change or serialize it to a file + * which will later be applied when the transaction finish message is + * processed. */ else if (in_streamed_transaction) { diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index f88389de84..b08ca55041 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications, * are multiple lists (one for each operation) to which row filters will * be appended. * - * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row - * filter expression" so it takes precedence. + * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter + * expression" so it takes precedence. */ foreach(lc, publications) { diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index c263a59690..0ea71b5c43 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -330,7 +330,7 @@ static void SyncRepQueueInsert(int mode) { dlist_head *queue; - dlist_iter iter; + dlist_iter iter; Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE); queue = &WalSndCtl->SyncRepQueue[mode]; @@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode) dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode]) { - PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); + PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur); /* * Assume the queue is ordered by LSN diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 980dc1816f..0e4f76efa8 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -3548,7 +3548,7 @@ rewriteTargetView(Query *parsetree, Relation view) if (parsetree->withCheckOptions != NIL) { WithCheckOption *parent_wco = - (WithCheckOption *) linitial(parsetree->withCheckOptions); + (WithCheckOption *) linitial(parsetree->withCheckOptions); if (parent_wco->cascaded) { diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 569c1c9467..5c3fe4eda2 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -581,7 +581,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_restrictive) { List *hook_policies = - (*row_security_policy_hook_restrictive) (cmd, relation); + (*row_security_policy_hook_restrictive) (cmd, relation); /* * As with built-in restrictive policies, we sort any hook-provided @@ -603,7 +603,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_permissive) { List *hook_policies = - (*row_security_policy_hook_permissive) (cmd, relation); + (*row_security_policy_hook_permissive) (cmd, relation); foreach(item, hook_policies) { diff --git a/src/backend/snowball/snowball_create.pl b/src/backend/snowball/snowball_create.pl index f4b58ada1c..35d1cd9621 100644 --- a/src/backend/snowball/snowball_create.pl +++ b/src/backend/snowball/snowball_create.pl @@ -10,34 +10,34 @@ my $input_path = ''; my $depfile; our @languages = qw( - arabic - armenian - basque - catalan - danish - dutch - english - finnish - french - german - greek - hindi - hungarian - indonesian - irish - italian - lithuanian - nepali - norwegian - portuguese - romanian - russian - serbian - spanish - swedish - tamil - turkish - yiddish + arabic + armenian + basque + catalan + danish + dutch + english + finnish + french + german + greek + hindi + hungarian + indonesian + irish + italian + lithuanian + nepali + norwegian + portuguese + romanian + russian + serbian + spanish + swedish + tamil + turkish + yiddish ); # Names of alternative dictionaries for all-ASCII words. If not @@ -48,13 +48,12 @@ our @languages = qw( our %ascii_languages = ( 'hindi' => 'english', - 'russian' => 'english', -); + 'russian' => 'english',); GetOptions( - 'depfile' => \$depfile, - 'outdir:s' => \$outdir_path, - 'input:s' => \$input_path) || usage(); + 'depfile' => \$depfile, + 'outdir:s' => \$outdir_path, + 'input:s' => \$input_path) || usage(); # Make sure input_path ends in a slash if needed. if ($input_path ne '' && substr($input_path, -1) ne '/') @@ -110,8 +109,8 @@ sub GenerateTsearchFiles foreach my $lang (@languages) { my $asclang = $ascii_languages{$lang} || $lang; - my $txt = $tmpl; - my $stop = ''; + my $txt = $tmpl; + my $stop = ''; my $stopword_path = "$input_path/stopwords/$lang.stop"; if (-s "$stopword_path") diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index 54e3bb4aa2..28b52d8aa1 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -2237,8 +2237,8 @@ compute_expr_stats(Relation onerel, double totalrows, if (tcnt > 0) { AttributeOpts *aopt = - get_attribute_options(stats->attr->attrelid, - stats->attr->attnum); + get_attribute_options(stats->attr->attrelid, + stats->attr->attnum); stats->exprvals = exprvals; stats->exprnulls = exprnulls; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 0bbf09564a..aafec4a09d 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2667,7 +2667,7 @@ BufferSync(int flags) { BufferDesc *bufHdr = NULL; CkptTsStatus *ts_stat = (CkptTsStatus *) - DatumGetPointer(binaryheap_first(ts_heap)); + DatumGetPointer(binaryheap_first(ts_heap)); buf_id = CkptBufferIds[ts_stat->index].buf_id; Assert(buf_id != -1); diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index 84ead85942..41ab64100e 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -98,8 +98,7 @@ struct BufFile /* * XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid - * wasting per-file alignment padding when some users create many - * files. + * wasting per-file alignment padding when some users create many files. */ PGAlignedBlock buffer; }; diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index f0965c3481..6399fa2ad5 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size) /* * Block all blockable signals, except SIGQUIT. posix_fallocate() can run * for quite a long time, and is an all-or-nothing operation. If we - * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery - * conflicts), the retry loop might never succeed. + * allowed SIGUSR1 to interrupt us repeatedly (for example, due to + * recovery conflicts), the retry loop might never succeed. */ if (IsUnderPostmaster) sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask); pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE); #if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__) + /* * On Linux, a shm_open fd is backed by a tmpfs file. If we were to use * ftruncate, the file would contain a hole. Accessing memory backed by a @@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size) * SIGBUS later. * * We still use a traditional EINTR retry loop to handle SIGCONT. - * posix_fallocate() doesn't restart automatically, and we don't want - * this to fail if you attach a debugger. + * posix_fallocate() doesn't restart automatically, and we don't want this + * to fail if you attach a debugger. */ do { @@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size) } while (rc == EINTR); /* - * The caller expects errno to be set, but posix_fallocate() doesn't - * set it. Instead it returns error numbers directly. So set errno, - * even though we'll also return rc to indicate success or failure. + * The caller expects errno to be set, but posix_fallocate() doesn't set + * it. Instead it returns error numbers directly. So set errno, even + * though we'll also return rc to indicate success or failure. */ errno = rc; #else diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl index c124f49d80..863c88252b 100644 --- a/src/backend/storage/lmgr/generate-lwlocknames.pl +++ b/src/backend/storage/lmgr/generate-lwlocknames.pl @@ -10,10 +10,9 @@ use Getopt::Long; my $output_path = '.'; my $lastlockidx = -1; -my $continue = "\n"; +my $continue = "\n"; -GetOptions( - 'outdir:s' => \$output_path); +GetOptions('outdir:s' => \$output_path); open my $lwlocknames, '<', $ARGV[0] or die; @@ -48,7 +47,7 @@ while (<$lwlocknames>) $trimmedlockname =~ s/Lock$//; die "lock names must end with 'Lock'" if $trimmedlockname eq $lockname; - die "lwlocknames.txt not in order" if $lockidx < $lastlockidx; + die "lwlocknames.txt not in order" if $lockidx < $lastlockidx; die "lwlocknames.txt has duplicates" if $lockidx == $lastlockidx; while ($lastlockidx < $lockidx - 1) @@ -59,7 +58,7 @@ while (<$lwlocknames>) } printf $c "%s \"%s\"", $continue, $trimmedlockname; $lastlockidx = $lockidx; - $continue = ",\n"; + $continue = ",\n"; print $h "#define $lockname (&MainLWLockArray[$lockidx].lock)\n"; } @@ -71,7 +70,8 @@ printf $h "#define NUM_INDIVIDUAL_LWLOCKS %s\n", $lastlockidx + 1; close $h; close $c; -rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!"; +rename($htmp, "$output_path/lwlocknames.h") + || die "rename: $htmp to $output_path/lwlocknames.h: $!"; rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!"; close $lwlocknames; diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 42595b38b2..193f50fc0f 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data) dclist_foreach(proc_iter, waitQueue) { PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur); + if (queued_proc == blocked_proc) break; data->waiter_pids[data->npids++] = queued_proc->pid; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 59347ab951..01d738f306 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock) LWLockWaitListLock(lock); /* - * Remove ourselves from the waitlist, unless we've already been - * removed. The removal happens with the wait list lock held, so there's - * no race in this check. + * Remove ourselves from the waitlist, unless we've already been removed. + * The removal happens with the wait list lock held, so there's no race in + * this check. */ on_waitlist = MyProc->lwWaiting == LW_WS_WAITING; if (on_waitlist) diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 203b189559..533f616541 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -625,7 +625,7 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer) dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (conflict->sxactIn == writer) return true; @@ -708,7 +708,7 @@ FlagSxactUnsafe(SERIALIZABLEXACT *sxact) dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(conflict->sxactOut)); Assert(sxact == conflict->sxactIn); @@ -1587,7 +1587,7 @@ GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size) dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); output[num_written++] = possibleUnsafeConflict->sxactOut->pid; @@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot, /* * If we didn't find any possibly unsafe conflicts because every * uncommitted writable transaction turned out to be doomed, then we - * can "opt out" immediately. See comments above the earlier check for - * PredXact->WritableSxactCount == 0. + * can "opt out" immediately. See comments above the earlier check + * for PredXact->WritableSxactCount == 0. */ if (dlist_is_empty(&sxact->possibleUnsafeConflicts)) { @@ -2613,7 +2613,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); bool found; dlist_delete(&(predlock->xactLink)); @@ -2754,7 +2754,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo; @@ -2976,7 +2976,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) dlist_foreach_modify(iter, &oldtarget->predicateLocks) { PREDICATELOCK *oldpredlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); PREDICATELOCK *newpredlock; SerCommitSeqNo oldCommitSeqNo; SERIALIZABLEXACT *oldXact; @@ -3194,7 +3194,7 @@ SetNewSxactGlobalXmin(void) dlist_foreach(iter, &PredXact->activeList) { SERIALIZABLEXACT *sxact = - dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); + dlist_container(SERIALIZABLEXACT, xactLink, iter.cur); if (!SxactIsRolledBack(sxact) && !SxactIsCommitted(sxact) @@ -3440,7 +3440,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut)); Assert(MySerializableXact == possibleUnsafeConflict->sxactIn); @@ -3471,7 +3471,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (isCommit && !SxactIsReadOnly(MySerializableXact) @@ -3496,7 +3496,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (!isCommit || SxactIsCommitted(conflict->sxactOut) @@ -3515,7 +3515,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts) { RWConflict possibleUnsafeConflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); roXact = possibleUnsafeConflict->sxactIn; Assert(MySerializableXact == possibleUnsafeConflict->sxactOut); @@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe) * xmin and purge any transactions which finished before this transaction * was launched. * - * For parallel queries in read-only transactions, it might run twice. - * We only release the reference on the first call. + * For parallel queries in read-only transactions, it might run twice. We + * only release the reference on the first call. */ needToClear = false; if ((partiallyReleasing || @@ -3641,7 +3641,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, FinishedSerializableTransactions) { SERIALIZABLEXACT *finishedSxact = - dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); + dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur); if (!TransactionIdIsValid(PredXact->SxactGlobalXmin) || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore, @@ -3700,7 +3700,7 @@ ClearOldPredicateLocks(void) dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); bool canDoPartialCleanup; LWLockAcquire(SerializableXactHashLock, LW_SHARED); @@ -3787,7 +3787,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); PREDICATELOCKTAG tag; PREDICATELOCKTARGET *target; PREDICATELOCKTARGETTAG targettag; @@ -3864,7 +3864,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); if (summarize) conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN; @@ -3876,7 +3876,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, dlist_foreach_modify(iter, &sxact->inConflicts) { RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); if (summarize) conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT; @@ -4134,7 +4134,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); SERIALIZABLEXACT *sxact = predlock->tag.myXact; if (sxact == MySerializableXact) @@ -4407,7 +4407,7 @@ CheckTableForSerializableConflictIn(Relation relation) dlist_foreach_modify(iter, &target->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, targetLink, iter.cur); + dlist_container(PREDICATELOCK, targetLink, iter.cur); if (predlock->tag.myXact != MySerializableXact && !RWConflictExists(predlock->tag.myXact, MySerializableXact)) @@ -4519,7 +4519,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &writer->outConflicts) { RWConflict conflict = - dlist_container(RWConflictData, outLink, iter.cur); + dlist_container(RWConflictData, outLink, iter.cur); SERIALIZABLEXACT *t2 = conflict->sxactIn; if (SxactIsPrepared(t2) @@ -4566,7 +4566,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts) { const RWConflict conflict = - dlist_container(RWConflictData, inLink, iter.cur); + dlist_container(RWConflictData, inLink, iter.cur); const SERIALIZABLEXACT *t0 = conflict->sxactOut; if (!SxactIsDoomed(t0) @@ -4664,7 +4664,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(near_iter, &MySerializableXact->inConflicts) { RWConflict nearConflict = - dlist_container(RWConflictData, inLink, near_iter.cur); + dlist_container(RWConflictData, inLink, near_iter.cur); if (!SxactIsCommitted(nearConflict->sxactOut) && !SxactIsDoomed(nearConflict->sxactOut)) @@ -4674,7 +4674,7 @@ PreCommit_CheckForSerializationFailure(void) dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts) { RWConflict farConflict = - dlist_container(RWConflictData, inLink, far_iter.cur); + dlist_container(RWConflictData, inLink, far_iter.cur); if (farConflict->sxactOut == MySerializableXact || (!SxactIsCommitted(farConflict->sxactOut) @@ -4770,7 +4770,7 @@ AtPrepare_PredicateLocks(void) dlist_foreach(iter, &sxact->predicateLocks) { PREDICATELOCK *predlock = - dlist_container(PREDICATELOCK, xactLink, iter.cur); + dlist_container(PREDICATELOCK, xactLink, iter.cur); record.type = TWOPHASEPREDICATERECORD_LOCK; lockRecord->target = predlock->tag.myTarget->tag; diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 22b4278610..dac921219f 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -101,7 +101,7 @@ ProcGlobalShmemSize(void) { Size size = 0; Size TotalProcs = - add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); + add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts)); /* ProcGlobal */ size = add_size(size, sizeof(PROC_HDR)); @@ -331,7 +331,7 @@ InitProcess(void) if (!dlist_is_empty(procgloballist)) { - MyProc = (PGPROC*) dlist_pop_head_node(procgloballist); + MyProc = (PGPROC *) dlist_pop_head_node(procgloballist); SpinLockRelease(ProcStructLock); } else @@ -1009,7 +1009,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) uint32 hashcode = locallock->hashcode; LWLock *partitionLock = LockHashPartitionLock(hashcode); dclist_head *waitQueue = &lock->waitProcs; - PGPROC *insert_before = NULL; + PGPROC *insert_before = NULL; LOCKMASK myHeldLocks = MyProc->heldLocks; TimestampTz standbyWaitStart = 0; bool early_deadlock = false; @@ -1244,7 +1244,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) if (InHotStandby) { bool maybe_log_conflict = - (standbyWaitStart != 0 && !logged_recovery_conflict); + (standbyWaitStart != 0 && !logged_recovery_conflict); /* Set a timer and wait for that or for the lock to be granted */ ResolveRecoveryConflictWithLock(locallock->tag.lock, diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index 42e3501255..65bb22541c 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -549,7 +549,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, while (remblocks > 0) { - BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); + BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); off_t seekpos = (off_t) BLCKSZ * segstartblock; int numblocks; @@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, /* * Even if we don't want to use fallocate, we can still extend a * bit more efficiently than writing each 8kB block individually. - * pg_pwrite_zeros() (via FileZero()) uses - * pg_pwritev_with_retry() to avoid multiple writes or needing a - * zeroed buffer for the whole length of the extension. + * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry() + * to avoid multiple writes or needing a zeroed buffer for the + * whole length of the extension. */ ret = FileZero(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * numblocks, diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index fe4fd3a929..8a2cb55876 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -2256,7 +2256,7 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag) { /* prefix success */ char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? - VoidString : prefix->aff[j]->flag; + VoidString : prefix->aff[j]->flag; if (FindWord(Conf, pnewword, ff, flag)) cur += addToResult(forms, cur, pnewword); diff --git a/src/backend/utils/Gen_dummy_probes.pl b/src/backend/utils/Gen_dummy_probes.pl index 668b915a4d..f289b19344 100644 --- a/src/backend/utils/Gen_dummy_probes.pl +++ b/src/backend/utils/Gen_dummy_probes.pl @@ -44,7 +44,7 @@ sub Run() # Initialize. openARGV(); - $Hold = ''; + $Hold = ''; $CondReg = 0; $doPrint = $doAutoPrint; CYCLE: diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl index 2792373fed..764216c56d 100644 --- a/src/backend/utils/Gen_fmgrtab.pl +++ b/src/backend/utils/Gen_fmgrtab.pl @@ -24,7 +24,7 @@ my $output_path = ''; my $include_path; GetOptions( - 'output:s' => \$output_path, + 'output:s' => \$output_path, 'include-path:s' => \$include_path) || usage(); # Make sure output_path ends in a slash. @@ -34,7 +34,7 @@ if ($output_path ne '' && substr($output_path, -1) ne '/') } # Sanity check arguments. -die "No input files.\n" unless @ARGV; +die "No input files.\n" unless @ARGV; die "--include-path must be specified.\n" unless $include_path; # Read all the input files into internal data structures. @@ -56,7 +56,7 @@ foreach my $datfile (@ARGV) my $catalog = Catalog::ParseHeader($header); my $catname = $catalog->{catname}; - my $schema = $catalog->{columns}; + my $schema = $catalog->{columns}; $catalogs{$catname} = $catalog; $catalog_data{$catname} = Catalog::ParseData($datfile, $schema, 0); @@ -72,14 +72,14 @@ foreach my $row (@{ $catalog_data{pg_proc} }) push @fmgr, { - oid => $bki_values{oid}, - name => $bki_values{proname}, - lang => $bki_values{prolang}, - kind => $bki_values{prokind}, + oid => $bki_values{oid}, + name => $bki_values{proname}, + lang => $bki_values{prolang}, + kind => $bki_values{prokind}, strict => $bki_values{proisstrict}, retset => $bki_values{proretset}, - nargs => $bki_values{pronargs}, - args => $bki_values{proargtypes}, + nargs => $bki_values{pronargs}, + args => $bki_values{proargtypes}, prosrc => $bki_values{prosrc}, }; @@ -88,10 +88,10 @@ foreach my $row (@{ $catalog_data{pg_proc} }) } # Emit headers for both files -my $tmpext = ".tmp$$"; -my $oidsfile = $output_path . 'fmgroids.h'; +my $tmpext = ".tmp$$"; +my $oidsfile = $output_path . 'fmgroids.h'; my $protosfile = $output_path . 'fmgrprotos.h'; -my $tabfile = $output_path . 'fmgrtab.c'; +my $tabfile = $output_path . 'fmgrtab.c'; open my $ofh, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!"; @@ -213,7 +213,8 @@ $bmap{'t'} = 'true'; $bmap{'f'} = 'false'; my @fmgr_builtin_oid_index; my $last_builtin_oid = 0; -my $fmgr_count = 0; +my $fmgr_count = 0; + foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr) { next if $s->{lang} ne 'internal'; @@ -273,9 +274,9 @@ close($pfh); close($tfh); # Finally, rename the completed files into place. -Catalog::RenameTempFile($oidsfile, $tmpext); +Catalog::RenameTempFile($oidsfile, $tmpext); Catalog::RenameTempFile($protosfile, $tmpext); -Catalog::RenameTempFile($tabfile, $tmpext); +Catalog::RenameTempFile($tabfile, $tmpext); sub usage { diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index f6edfc76ac..0cdb552631 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -1186,7 +1186,7 @@ pgstat_flush_pending_entries(bool nowait) while (cur) { PgStat_EntryRef *entry_ref = - dlist_container(PgStat_EntryRef, pending_node, cur); + dlist_container(PgStat_EntryRef, pending_node, cur); PgStat_HashKey key = entry_ref->shared_entry->key; PgStat_Kind kind = key.kind; const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c index 09fffd0e82..d1149adf70 100644 --- a/src/backend/utils/activity/pgstat_shmem.c +++ b/src/backend/utils/activity/pgstat_shmem.c @@ -865,7 +865,7 @@ pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid) if (pgStatEntryRefHash) { PgStat_EntryRefHashEntry *lohashent = - pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); + pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key); if (lohashent) pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref, diff --git a/src/backend/utils/activity/pgstat_xact.c b/src/backend/utils/activity/pgstat_xact.c index 91cdd9222e..369239d501 100644 --- a/src/backend/utils/activity/pgstat_xact.c +++ b/src/backend/utils/activity/pgstat_xact.c @@ -76,7 +76,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit) dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; if (isCommit && !pending->is_create) @@ -148,7 +148,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, dclist_foreach_modify(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); xl_xact_stats_item *it = &pending->item; dclist_delete_from(&xact_state->pending_drops, &pending->node); @@ -290,7 +290,7 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items) dclist_foreach(iter, &xact_state->pending_drops) { PgStat_PendingDroppedStatsItem *pending = - dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); + dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur); if (isCommit && pending->is_create) continue; @@ -335,7 +335,7 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool int nest_level = GetCurrentTransactionNestLevel(); PgStat_SubXactStatus *xact_state; PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *) - MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); + MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem)); xact_state = pgstat_get_xact_stack_level(nest_level); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index be2e55bb29..5d8d583ddc 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -4482,17 +4482,17 @@ EncodeInterval(struct pg_itm *itm, int style, char *str) case INTSTYLE_SQL_STANDARD: { bool has_negative = year < 0 || mon < 0 || - mday < 0 || hour < 0 || - min < 0 || sec < 0 || fsec < 0; + mday < 0 || hour < 0 || + min < 0 || sec < 0 || fsec < 0; bool has_positive = year > 0 || mon > 0 || - mday > 0 || hour > 0 || - min > 0 || sec > 0 || fsec > 0; + mday > 0 || hour > 0 || + min > 0 || sec > 0 || fsec > 0; bool has_year_month = year != 0 || mon != 0; bool has_day_time = mday != 0 || hour != 0 || - min != 0 || sec != 0 || fsec != 0; + min != 0 || sec != 0 || fsec != 0; bool has_day = mday != 0; bool sql_standard_value = !(has_negative && has_positive) && - !(has_year_month && has_day_time); + !(has_year_month && has_day_time); /* * SQL Standard wants only 1 "" preceding the whole diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 9b51da2382..dfa90a04fb 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p, /* * endptr points to the first character _after_ the sequence we recognized * as a valid floating point number. orig_string points to the original - * input - * string. + * input string. */ /* skip leading whitespace */ diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 4c5abaff25..70cb922e6b 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3219,9 +3219,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 0021b01830..7891fde310 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -76,7 +76,7 @@ static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext); static char *jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len); -static bool flattenJsonPathParseItem(StringInfo buf, int *result, +static bool flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript); @@ -234,7 +234,7 @@ jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len) * children into a binary representation. */ static bool -flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, +flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, JsonPathParseItem *item, int nestingLevel, bool insideArraySubscript) { @@ -306,19 +306,19 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.args.left) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.left, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.left, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + left) = chld - pos; if (!item->value.args.right) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.args.right, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.args.right, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + right) = chld - pos; } @@ -338,10 +338,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, item->value.like_regex.patternlen); appendStringInfoChar(buf, '\0'); - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.like_regex.expr, - nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.like_regex.expr, + nestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + offs) = chld - pos; } @@ -360,10 +360,10 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (!item->value.arg) chld = pos; - else if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->value.arg, - nestingLevel + argNestingLevel, - insideArraySubscript)) + else if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->value.arg, + nestingLevel + argNestingLevel, + insideArraySubscript)) return false; *(int32 *) (buf->data + arg) = chld - pos; } @@ -405,17 +405,17 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, int32 topos; int32 frompos; - if (! flattenJsonPathParseItem(buf, &frompos, escontext, - item->value.array.elems[i].from, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &frompos, escontext, + item->value.array.elems[i].from, + nestingLevel, true)) return false; frompos -= pos; if (item->value.array.elems[i].to) { - if (! flattenJsonPathParseItem(buf, &topos, escontext, - item->value.array.elems[i].to, - nestingLevel, true)) + if (!flattenJsonPathParseItem(buf, &topos, escontext, + item->value.array.elems[i].to, + nestingLevel, true)) return false; topos -= pos; } @@ -451,9 +451,9 @@ flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext, if (item->next) { - if (! flattenJsonPathParseItem(buf, &chld, escontext, - item->next, nestingLevel, - insideArraySubscript)) + if (!flattenJsonPathParseItem(buf, &chld, escontext, + item->next, nestingLevel, + insideArraySubscript)) return false; chld -= pos; *(int32 *) (buf->data + next) = chld; diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index b561f0e7e8..41430bab7e 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -1326,8 +1326,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, */ JsonValueList vals = {0}; JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, &vals); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, &vals); if (jperIsError(res)) return jpbUnknown; @@ -1337,8 +1337,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp, else { JsonPathExecResult res = - executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, - false, NULL); + executeItemOptUnwrapResultNoThrow(cxt, &larg, jb, + false, NULL); if (jperIsError(res)) return jpbUnknown; @@ -1869,7 +1869,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, if (!fmt_txt[i]) { MemoryContext oldcxt = - MemoryContextSwitchTo(TopMemoryContext); + MemoryContextSwitchTo(TopMemoryContext); fmt_txt[i] = cstring_to_text(fmt_str[i]); MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/utils/adt/jsonpath_internal.h b/src/backend/utils/adt/jsonpath_internal.h index 2e12de038c..90eea6e961 100644 --- a/src/backend/utils/adt/jsonpath_internal.h +++ b/src/backend/utils/adt/jsonpath_internal.h @@ -20,7 +20,7 @@ typedef struct JsonPathString char *val; int len; int total; -} JsonPathString; +} JsonPathString; #include "utils/jsonpath.h" #include "jsonpath_gram.h" @@ -29,8 +29,8 @@ typedef struct JsonPathString JsonPathParseResult **result, \ struct Node *escontext) YY_DECL; -extern int jsonpath_yyparse(JsonPathParseResult **result, - struct Node *escontext); +extern int jsonpath_yyparse(JsonPathParseResult **result, + struct Node *escontext); extern void jsonpath_yyerror(JsonPathParseResult **result, struct Node *escontext, const char *message); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index eea1d1ae0f..31e3b16ae0 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, else #endif result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p); - if (result == 2147483647) /* _NLSCMPERROR; missing from mingw - * headers */ + if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */ ereport(ERROR, (errmsg("could not compare Unicode strings: %m"))); @@ -1818,14 +1817,15 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2, static int pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale) { - int result; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); #ifdef WIN32 if (GetDatabaseEncoding() == PG_UTF8) { - size_t len1 = strlen(arg1); - size_t len2 = strlen(arg2); + size_t len1 = strlen(arg1); + size_t len2 = strlen(arg2); + result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale); } else @@ -1854,13 +1854,13 @@ static int pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize1 = len1 + 1; - size_t bufsize2 = len2 + 1; - char *arg1n; - char *arg2n; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize1 = len1 + 1; + size_t bufsize2 = len2 + 1; + char *arg1n; + char *arg2n; + int result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -1906,15 +1906,15 @@ static int pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - int32_t ulen1; - int32_t ulen2; - size_t bufsize1; - size_t bufsize2; - UChar *uchar1, - *uchar2; - int result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + int32_t ulen1; + int32_t ulen2; + size_t bufsize1; + size_t bufsize2; + UChar *uchar1, + *uchar2; + int result; Assert(locale->provider == COLLPROVIDER_ICU); #ifdef HAVE_UCOL_STRCOLLUTF8 @@ -1961,7 +1961,7 @@ static int pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2, pg_locale_t locale) { - int result; + int result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2042,7 +2042,7 @@ int pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2, pg_locale_t locale) { - int result; + int result; if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strncoll_libc(arg1, len1, arg2, len2, locale); @@ -2074,7 +2074,7 @@ pg_strxfrm_libc(char *dest, const char *src, size_t destsize, #else /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ #endif } @@ -2082,10 +2082,10 @@ static size_t pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - size_t bufsize = srclen + 1; - size_t result; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + size_t bufsize = srclen + 1; + size_t result; Assert(!locale || locale->provider == COLLPROVIDER_LIBC); @@ -2114,12 +2114,12 @@ static size_t pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UChar *uchar; - int32_t ulen; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UChar *uchar; + int32_t ulen; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2161,15 +2161,15 @@ static size_t pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - char sbuf[TEXTBUFLEN]; - char *buf = sbuf; - UCharIterator iter; - uint32_t state[2]; - UErrorCode status; - int32_t ulen = -1; - UChar *uchar = NULL; - size_t uchar_bsize; - Size result_bsize; + char sbuf[TEXTBUFLEN]; + char *buf = sbuf; + UCharIterator iter; + uint32_t state[2]; + UErrorCode status; + int32_t ulen = -1; + UChar *uchar = NULL; + size_t uchar_bsize; + Size result_bsize; Assert(locale->provider == COLLPROVIDER_ICU); Assert(GetDatabaseEncoding() != PG_UTF8); @@ -2209,7 +2209,7 @@ static size_t pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen, int32_t destsize, pg_locale_t locale) { - size_t result; + size_t result; Assert(locale->provider == COLLPROVIDER_ICU); @@ -2271,7 +2271,7 @@ pg_strxfrm_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2291,7 +2291,7 @@ pg_strxfrm_enabled(pg_locale_t locale) size_t pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strxfrm_libc(dest, src, destsize, locale); @@ -2328,7 +2328,7 @@ size_t pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale); @@ -2358,7 +2358,7 @@ pg_strxfrm_prefix_enabled(pg_locale_t locale) /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", locale->provider); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } /* @@ -2378,7 +2378,7 @@ size_t pg_strxfrm_prefix(char *dest, const char *src, size_t destsize, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()", @@ -2415,7 +2415,7 @@ size_t pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src, size_t srclen, pg_locale_t locale) { - size_t result = 0; /* keep compiler quiet */ + size_t result = 0; /* keep compiler quiet */ if (!locale || locale->provider == COLLPROVIDER_LIBC) elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()", @@ -2491,7 +2491,7 @@ pg_ucol_open(const char *loc_str) collator = ucol_open(loc_str, &status); if (U_FAILURE(status)) ereport(ERROR, - /* use original string for error report */ + /* use original string for error report */ (errmsg("could not open collator for locale \"%s\": %s", orig_str, u_errorName(status)))); @@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len) { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status); if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR) ereport(ERROR, @@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, { UErrorCode status = U_ZERO_ERROR; int32_t ulen; + status = U_ZERO_ERROR; ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status); if (U_FAILURE(status)) @@ -2594,7 +2596,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen, int32_t icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) { - int32_t len_uchar; + int32_t len_uchar; init_icu_converter(); @@ -2781,11 +2783,11 @@ char * icu_language_tag(const char *loc_str, int elevel) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - char *langtag; - size_t buflen = 32; /* arbitrary starting buffer size */ - const bool strict = true; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + char *langtag; + size_t buflen = 32; /* arbitrary starting buffer size */ + const bool strict = true; status = U_ZERO_ERROR; uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status); @@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel) return pstrdup("en-US-u-va-posix"); /* - * A BCP47 language tag doesn't have a clearly-defined upper limit - * (cf. RFC5646 section 4.4). Additionally, in older ICU versions, + * A BCP47 language tag doesn't have a clearly-defined upper limit (cf. + * RFC5646 section 4.4). Additionally, in older ICU versions, * uloc_toLanguageTag() doesn't always return the ultimate length on the * first call, necessitating a loop. */ @@ -2843,7 +2845,7 @@ icu_language_tag(const char *loc_str, int elevel) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"))); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ #endif /* not USE_ICU */ } @@ -2854,11 +2856,11 @@ void icu_validate_locale(const char *loc_str) { #ifdef USE_ICU - UCollator *collator; - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - bool found = false; - int elevel = icu_validation_level; + UCollator *collator; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + bool found = false; + int elevel = icu_validation_level; /* no validation */ if (elevel < 0) @@ -2889,8 +2891,8 @@ icu_validate_locale(const char *loc_str) /* search for matching language within ICU */ for (int32_t i = 0; !found && i < uloc_countAvailable(); i++) { - const char *otherloc = uloc_getAvailable(i); - char otherlang[ULOC_LANG_CAPACITY]; + const char *otherloc = uloc_getAvailable(i); + char otherlang[ULOC_LANG_CAPACITY]; status = U_ZERO_ERROR; uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 6d673493cb..d3a973d86b 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -12587,7 +12587,7 @@ get_range_partbound_string(List *bound_datums) foreach(cell, bound_datums) { PartitionRangeDatum *datum = - lfirst_node(PartitionRangeDatum, cell); + lfirst_node(PartitionRangeDatum, cell); appendStringInfoString(buf, sep); if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c index 7e3bd51c1f..2bc4ec904f 100644 --- a/src/backend/utils/adt/tsquery_op.c +++ b/src/backend/utils/adt/tsquery_op.c @@ -150,9 +150,9 @@ Datum tsquery_phrase(PG_FUNCTION_ARGS) { PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1), - Int32GetDatum(1))); + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1), + Int32GetDatum(1))); } Datum diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index a38db4697d..4457c5d4f9 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -525,7 +525,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, if (arrin[i].haspos) { int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) - + sizeof(uint16); + + sizeof(uint16); curoff = SHORTALIGN(curoff); memcpy(dataout + curoff, diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 592afc18ec..b92ff4d266 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any((uint8_t *) buf, bsize + 1); @@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) } else { - Size bsize, rsize; + Size bsize, + rsize; char *buf; bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale); @@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS) /* * In principle, there's no reason to include the terminating NUL - * character in the hash, but it was done before and the behavior - * must be preserved. + * character in the hash, but it was done before and the behavior must + * be preserved. */ result = hash_any_extended((uint8_t *) buf, bsize + 1, PG_GETARG_INT64(1)); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b571876468..884bfbc8ce 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) memcpy(sss->buf1, authoritative_data, len); /* - * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated - * strings. + * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings. */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -4523,7 +4522,7 @@ text_to_array(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID)); PG_RETURN_DATUM(makeArrayResult(tstate.astate, - CurrentMemoryContext)); + CurrentMemoryContext)); } /* diff --git a/src/backend/utils/adt/xid8funcs.c b/src/backend/utils/adt/xid8funcs.c index 24271dfff7..06ae940df6 100644 --- a/src/backend/utils/adt/xid8funcs.c +++ b/src/backend/utils/adt/xid8funcs.c @@ -519,7 +519,7 @@ pg_snapshot_recv(PG_FUNCTION_ARGS) for (i = 0; i < nxip; i++) { FullTransactionId cur = - FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); + FullTransactionIdFromU64((uint64) pq_getmsgint64(buf)); if (FullTransactionIdPrecedes(cur, last) || FullTransactionIdPrecedes(cur, xmin) || diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 15adbd6a01..866d0d649a 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -630,7 +630,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent) XmlOptionType parsed_xmloptiontype; xmlNodePtr content_nodes; volatile xmlBufferPtr buf = NULL; - volatile xmlSaveCtxtPtr ctxt = NULL; + volatile xmlSaveCtxtPtr ctxt = NULL; ErrorSaveContext escontext = {T_ErrorSaveContext}; PgXmlErrorContext *xmlerrcxt; #endif diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index c7607895cd..60978f9415 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -3603,7 +3603,7 @@ char * get_publication_name(Oid pubid, bool missing_ok) { HeapTuple tup; - char *pubname; + char *pubname; Form_pg_publication pubform; tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); @@ -3630,16 +3630,16 @@ get_publication_name(Oid pubid, bool missing_ok) * return InvalidOid. */ Oid -get_subscription_oid(const char* subname, bool missing_ok) +get_subscription_oid(const char *subname, bool missing_ok) { Oid oid; oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid, - MyDatabaseId, CStringGetDatum(subname)); + MyDatabaseId, CStringGetDatum(subname)); if (!OidIsValid(oid) && !missing_ok) ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("subscription \"%s\" does not exist", subname))); + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("subscription \"%s\" does not exist", subname))); return oid; } @@ -3653,7 +3653,7 @@ char * get_subscription_name(Oid subid, bool missing_ok) { HeapTuple tup; - char* subname; + char *subname; Form_pg_subscription subform; tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 40140de958..8a08463c2b 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -3084,10 +3084,10 @@ static void AssertPendingSyncConsistency(Relation relation) { bool relcache_verdict = - RelationIsPermanent(relation) && - ((relation->rd_createSubid != InvalidSubTransactionId && - RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || - relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); + RelationIsPermanent(relation) && + ((relation->rd_createSubid != InvalidSubTransactionId && + RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) || + relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId); Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator)); @@ -3765,12 +3765,12 @@ RelationSetNewRelfilenumber(Relation relation, char persistence) */ if (IsBinaryUpgrade) { - SMgrRelation srel; + SMgrRelation srel; /* * During a binary upgrade, we use this code path to ensure that - * pg_largeobject and its index have the same relfilenumbers as in - * the old cluster. This is necessary because pg_upgrade treats + * pg_largeobject and its index have the same relfilenumbers as in the + * old cluster. This is necessary because pg_upgrade treats * pg_largeobject like a user table, not a system table. It is however * possible that a table or index may need to end up with the same * relfilenumber in the new cluster as what it had in the old cluster. @@ -5171,8 +5171,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) Bitmapset *uindexattrs; /* columns in unique indexes */ Bitmapset *pkindexattrs; /* columns in the primary index */ Bitmapset *idindexattrs; /* columns in the replica identity */ - Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ - Bitmapset *summarizedattrs; /* columns with summarizing indexes */ + Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ + Bitmapset *summarizedattrs; /* columns with summarizing indexes */ List *indexoidlist; List *newindexoidlist; Oid relpkindex; @@ -5314,8 +5314,8 @@ restart: * when the column value changes, thus require a separate * attribute bitmapset. * - * Obviously, non-key columns couldn't be referenced by - * foreign key or identity key. Hence we do not include them into + * Obviously, non-key columns couldn't be referenced by foreign + * key or identity key. Hence we do not include them into * uindexattrs, pkindexattrs and idindexattrs bitmaps. */ if (attrnum != 0) diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 4c21129707..26575cae6c 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel) /* * Open the target file. * - * Because Windows isn't happy about the idea of renaming over a file - * that someone has open, we only open this file after acquiring the lock, - * and for the same reason, we close it before releasing the lock. That - * way, by the time write_relmap_file() acquires an exclusive lock, no - * one else will have it open. + * Because Windows isn't happy about the idea of renaming over a file that + * someone has open, we only open this file after acquiring the lock, and + * for the same reason, we close it before releasing the lock. That way, + * by the time write_relmap_file() acquires an exclusive lock, no one else + * will have it open. */ snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath, RELMAPPER_FILENAME); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 7458ef5c90..9208c31fe0 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -2150,7 +2150,7 @@ CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid) /* first validate that we have permissions to use the language */ aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(), - ACL_USAGE); + ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(langStruct->lanname)); diff --git a/src/backend/utils/generate-errcodes.pl b/src/backend/utils/generate-errcodes.pl index dd8ac6d56d..34d0f25c23 100644 --- a/src/backend/utils/generate-errcodes.pl +++ b/src/backend/utils/generate-errcodes.pl @@ -7,10 +7,9 @@ use strict; use warnings; use Getopt::Long; -my $outfile = ''; +my $outfile = ''; -GetOptions( - 'outfile=s' => \$outfile) or die "$0: wrong arguments"; +GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments"; open my $errcodes, '<', $ARGV[0] or die "$0: could not open input file '$ARGV[0]': $!\n"; diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 53420f4974..88434c3e5d 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -362,7 +362,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect */ if (!am_superuser && object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(), - ACL_CONNECT) != ACLCHECK_OK) + ACL_CONNECT) != ACLCHECK_OK) ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for database \"%s\"", name), @@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid, } /* - * The last few connection slots are reserved for superusers and roles with - * privileges of pg_use_reserved_connections. Replication connections are - * drawn from slots reserved with max_wal_senders and are not limited by - * max_connections, superuser_reserved_connections, or + * The last few connection slots are reserved for superusers and roles + * with privileges of pg_use_reserved_connections. Replication + * connections are drawn from slots reserved with max_wal_senders and are + * not limited by max_connections, superuser_reserved_connections, or * reserved_connections. * * Note: At this point, the new backend has already claimed a proc struct, diff --git a/src/backend/utils/init/usercontext.c b/src/backend/utils/init/usercontext.c index 38bcfa60df..dd9a0dd6a8 100644 --- a/src/backend/utils/init/usercontext.c +++ b/src/backend/utils/init/usercontext.c @@ -61,15 +61,15 @@ SwitchToUntrustedUser(Oid userid, UserContext *context) } else { - int sec_context = context->save_sec_context; + int sec_context = context->save_sec_context; /* * This user can SET ROLE to the target user, but not the other way * around, so protect ourselves against the target user by setting * SECURITY_RESTRICTED_OPERATION to prevent certain changes to the - * session state. Also set up a new GUC nest level, so that we can roll - * back any GUC changes that may be made by code running as the target - * user, inasmuch as they could be malicious. + * session state. Also set up a new GUC nest level, so that we can + * roll back any GUC changes that may be made by code running as the + * target user, inasmuch as they could be malicious. */ sec_context |= SECURITY_RESTRICTED_OPERATION; SetUserIdAndSecContext(userid, sec_context); diff --git a/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl b/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl index 40b3fb6db6..4c5724b8b7 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_BIG5.pl @@ -40,7 +40,7 @@ my $cp950txt = &read_source("CP950.TXT"); foreach my $i (@$cp950txt) { my $code = $i->{code}; - my $ucs = $i->{ucs}; + my $ucs = $i->{ucs}; # Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc # from CP950.TXT @@ -51,12 +51,12 @@ foreach my $i (@$cp950txt) { push @$all, { - code => $code, - ucs => $ucs, - comment => $i->{comment}, + code => $code, + ucs => $ucs, + comment => $i->{comment}, direction => BOTH, - f => $i->{f}, - l => $i->{l} + f => $i->{f}, + l => $i->{l} }; } } @@ -64,7 +64,7 @@ foreach my $i (@$cp950txt) foreach my $i (@$all) { my $code = $i->{code}; - my $ucs = $i->{ucs}; + my $ucs = $i->{ucs}; # BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can # contain only one of them. XXX: Doesn't really make sense to include any of them, diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl index adfdca24f7..f9ff2bd3d2 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl @@ -33,7 +33,7 @@ while (<$in>) next if (!m/) push @mapping, { - ucs => $ucs, - code => $code, + ucs => $ucs, + code => $code, direction => BOTH, - f => $in_file, - l => $. + f => $in_file, + l => $. }; } close($in); diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl index b7715ed419..2d0e05fb79 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl @@ -37,13 +37,13 @@ while (my $line = <$in>) push @all, { - direction => BOTH, - ucs => $ucs1, + direction => BOTH, + ucs => $ucs1, ucs_second => $ucs2, - code => $code, - comment => $rest, - f => $in_file, - l => $. + code => $code, + comment => $rest, + f => $in_file, + l => $. }; } elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/) @@ -51,7 +51,7 @@ while (my $line = <$in>) # non-combined characters my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3); - my $ucs = hex($u); + my $ucs = hex($u); my $code = hex($c); next if ($code < 0x80 && $ucs < 0x80); @@ -59,11 +59,11 @@ while (my $line = <$in>) push @all, { direction => BOTH, - ucs => $ucs, - code => $code, - comment => $rest, - f => $in_file, - l => $. + ucs => $ucs, + code => $code, + comment => $rest, + f => $in_file, + l => $. }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl index 9c949f95b1..4073578027 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl @@ -120,521 +120,521 @@ foreach my $i (grep defined $_->{sjis}, @mapping) push @mapping, ( { direction => BOTH, - ucs => 0x4efc, - code => 0x8ff4af, - comment => '# CJK(4EFC)' + ucs => 0x4efc, + code => 0x8ff4af, + comment => '# CJK(4EFC)' }, { direction => BOTH, - ucs => 0x50f4, - code => 0x8ff4b0, - comment => '# CJK(50F4)' + ucs => 0x50f4, + code => 0x8ff4b0, + comment => '# CJK(50F4)' }, { direction => BOTH, - ucs => 0x51EC, - code => 0x8ff4b1, - comment => '# CJK(51EC)' + ucs => 0x51EC, + code => 0x8ff4b1, + comment => '# CJK(51EC)' }, { direction => BOTH, - ucs => 0x5307, - code => 0x8ff4b2, - comment => '# CJK(5307)' + ucs => 0x5307, + code => 0x8ff4b2, + comment => '# CJK(5307)' }, { direction => BOTH, - ucs => 0x5324, - code => 0x8ff4b3, - comment => '# CJK(5324)' + ucs => 0x5324, + code => 0x8ff4b3, + comment => '# CJK(5324)' }, { direction => BOTH, - ucs => 0x548A, - code => 0x8ff4b5, - comment => '# CJK(548A)' + ucs => 0x548A, + code => 0x8ff4b5, + comment => '# CJK(548A)' }, { direction => BOTH, - ucs => 0x5759, - code => 0x8ff4b6, - comment => '# CJK(5759)' + ucs => 0x5759, + code => 0x8ff4b6, + comment => '# CJK(5759)' }, { direction => BOTH, - ucs => 0x589E, - code => 0x8ff4b9, - comment => '# CJK(589E)' + ucs => 0x589E, + code => 0x8ff4b9, + comment => '# CJK(589E)' }, { direction => BOTH, - ucs => 0x5BEC, - code => 0x8ff4ba, - comment => '# CJK(5BEC)' + ucs => 0x5BEC, + code => 0x8ff4ba, + comment => '# CJK(5BEC)' }, { direction => BOTH, - ucs => 0x5CF5, - code => 0x8ff4bb, - comment => '# CJK(5CF5)' + ucs => 0x5CF5, + code => 0x8ff4bb, + comment => '# CJK(5CF5)' }, { direction => BOTH, - ucs => 0x5D53, - code => 0x8ff4bc, - comment => '# CJK(5D53)' + ucs => 0x5D53, + code => 0x8ff4bc, + comment => '# CJK(5D53)' }, { direction => BOTH, - ucs => 0x5FB7, - code => 0x8ff4be, - comment => '# CJK(5FB7)' + ucs => 0x5FB7, + code => 0x8ff4be, + comment => '# CJK(5FB7)' }, { direction => BOTH, - ucs => 0x6085, - code => 0x8ff4bf, - comment => '# CJK(6085)' + ucs => 0x6085, + code => 0x8ff4bf, + comment => '# CJK(6085)' }, { direction => BOTH, - ucs => 0x6120, - code => 0x8ff4c0, - comment => '# CJK(6120)' + ucs => 0x6120, + code => 0x8ff4c0, + comment => '# CJK(6120)' }, { direction => BOTH, - ucs => 0x654E, - code => 0x8ff4c1, - comment => '# CJK(654E)' + ucs => 0x654E, + code => 0x8ff4c1, + comment => '# CJK(654E)' }, { direction => BOTH, - ucs => 0x663B, - code => 0x8ff4c2, - comment => '# CJK(663B)' + ucs => 0x663B, + code => 0x8ff4c2, + comment => '# CJK(663B)' }, { direction => BOTH, - ucs => 0x6665, - code => 0x8ff4c3, - comment => '# CJK(6665)' + ucs => 0x6665, + code => 0x8ff4c3, + comment => '# CJK(6665)' }, { direction => BOTH, - ucs => 0x6801, - code => 0x8ff4c6, - comment => '# CJK(6801)' + ucs => 0x6801, + code => 0x8ff4c6, + comment => '# CJK(6801)' }, { direction => BOTH, - ucs => 0x6A6B, - code => 0x8ff4c9, - comment => '# CJK(6A6B)' + ucs => 0x6A6B, + code => 0x8ff4c9, + comment => '# CJK(6A6B)' }, { direction => BOTH, - ucs => 0x6AE2, - code => 0x8ff4ca, - comment => '# CJK(6AE2)' + ucs => 0x6AE2, + code => 0x8ff4ca, + comment => '# CJK(6AE2)' }, { direction => BOTH, - ucs => 0x6DF2, - code => 0x8ff4cc, - comment => '# CJK(6DF2)' + ucs => 0x6DF2, + code => 0x8ff4cc, + comment => '# CJK(6DF2)' }, { direction => BOTH, - ucs => 0x6DF8, - code => 0x8ff4cb, - comment => '# CJK(6DF8)' + ucs => 0x6DF8, + code => 0x8ff4cb, + comment => '# CJK(6DF8)' }, { direction => BOTH, - ucs => 0x7028, - code => 0x8ff4cd, - comment => '# CJK(7028)' + ucs => 0x7028, + code => 0x8ff4cd, + comment => '# CJK(7028)' }, { direction => BOTH, - ucs => 0x70BB, - code => 0x8ff4ae, - comment => '# CJK(70BB)' + ucs => 0x70BB, + code => 0x8ff4ae, + comment => '# CJK(70BB)' }, { direction => BOTH, - ucs => 0x7501, - code => 0x8ff4d0, - comment => '# CJK(7501)' + ucs => 0x7501, + code => 0x8ff4d0, + comment => '# CJK(7501)' }, { direction => BOTH, - ucs => 0x7682, - code => 0x8ff4d1, - comment => '# CJK(7682)' + ucs => 0x7682, + code => 0x8ff4d1, + comment => '# CJK(7682)' }, { direction => BOTH, - ucs => 0x769E, - code => 0x8ff4d2, - comment => '# CJK(769E)' + ucs => 0x769E, + code => 0x8ff4d2, + comment => '# CJK(769E)' }, { direction => BOTH, - ucs => 0x7930, - code => 0x8ff4d4, - comment => '# CJK(7930)' + ucs => 0x7930, + code => 0x8ff4d4, + comment => '# CJK(7930)' }, { direction => BOTH, - ucs => 0x7AE7, - code => 0x8ff4d9, - comment => '# CJK(7AE7)' + ucs => 0x7AE7, + code => 0x8ff4d9, + comment => '# CJK(7AE7)' }, { direction => BOTH, - ucs => 0x7DA0, - code => 0x8ff4dc, - comment => '# CJK(7DA0)' + ucs => 0x7DA0, + code => 0x8ff4dc, + comment => '# CJK(7DA0)' }, { direction => BOTH, - ucs => 0x7DD6, - code => 0x8ff4dd, - comment => '# CJK(7DD6)' + ucs => 0x7DD6, + code => 0x8ff4dd, + comment => '# CJK(7DD6)' }, { direction => BOTH, - ucs => 0x8362, - code => 0x8ff4df, - comment => '# CJK(8362)' + ucs => 0x8362, + code => 0x8ff4df, + comment => '# CJK(8362)' }, { direction => BOTH, - ucs => 0x85B0, - code => 0x8ff4e1, - comment => '# CJK(85B0)' + ucs => 0x85B0, + code => 0x8ff4e1, + comment => '# CJK(85B0)' }, { direction => BOTH, - ucs => 0x8807, - code => 0x8ff4e4, - comment => '# CJK(8807)' + ucs => 0x8807, + code => 0x8ff4e4, + comment => '# CJK(8807)' }, { direction => BOTH, - ucs => 0x8B7F, - code => 0x8ff4e6, - comment => '# CJK(8B7F)' + ucs => 0x8B7F, + code => 0x8ff4e6, + comment => '# CJK(8B7F)' }, { direction => BOTH, - ucs => 0x8CF4, - code => 0x8ff4e7, - comment => '# CJK(8CF4)' + ucs => 0x8CF4, + code => 0x8ff4e7, + comment => '# CJK(8CF4)' }, { direction => BOTH, - ucs => 0x8D76, - code => 0x8ff4e8, - comment => '# CJK(8D76)' + ucs => 0x8D76, + code => 0x8ff4e8, + comment => '# CJK(8D76)' }, { direction => BOTH, - ucs => 0x90DE, - code => 0x8ff4ec, - comment => '# CJK(90DE)' + ucs => 0x90DE, + code => 0x8ff4ec, + comment => '# CJK(90DE)' }, { direction => BOTH, - ucs => 0x9115, - code => 0x8ff4ee, - comment => '# CJK(9115)' + ucs => 0x9115, + code => 0x8ff4ee, + comment => '# CJK(9115)' }, { direction => BOTH, - ucs => 0x9592, - code => 0x8ff4f1, - comment => '# CJK(9592)' + ucs => 0x9592, + code => 0x8ff4f1, + comment => '# CJK(9592)' }, { direction => BOTH, - ucs => 0x973B, - code => 0x8ff4f4, - comment => '# CJK(973B)' + ucs => 0x973B, + code => 0x8ff4f4, + comment => '# CJK(973B)' }, { direction => BOTH, - ucs => 0x974D, - code => 0x8ff4f5, - comment => '# CJK(974D)' + ucs => 0x974D, + code => 0x8ff4f5, + comment => '# CJK(974D)' }, { direction => BOTH, - ucs => 0x9751, - code => 0x8ff4f6, - comment => '# CJK(9751)' + ucs => 0x9751, + code => 0x8ff4f6, + comment => '# CJK(9751)' }, { direction => BOTH, - ucs => 0x999E, - code => 0x8ff4fa, - comment => '# CJK(999E)' + ucs => 0x999E, + code => 0x8ff4fa, + comment => '# CJK(999E)' }, { direction => BOTH, - ucs => 0x9AD9, - code => 0x8ff4fb, - comment => '# CJK(9AD9)' + ucs => 0x9AD9, + code => 0x8ff4fb, + comment => '# CJK(9AD9)' }, { direction => BOTH, - ucs => 0x9B72, - code => 0x8ff4fc, - comment => '# CJK(9B72)' + ucs => 0x9B72, + code => 0x8ff4fc, + comment => '# CJK(9B72)' }, { direction => BOTH, - ucs => 0x9ED1, - code => 0x8ff4fe, - comment => '# CJK(9ED1)' + ucs => 0x9ED1, + code => 0x8ff4fe, + comment => '# CJK(9ED1)' }, { direction => BOTH, - ucs => 0xF929, - code => 0x8ff4c5, - comment => '# CJK COMPATIBILITY IDEOGRAPH-F929' + ucs => 0xF929, + code => 0x8ff4c5, + comment => '# CJK COMPATIBILITY IDEOGRAPH-F929' }, { direction => BOTH, - ucs => 0xF9DC, - code => 0x8ff4f2, - comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC' + ucs => 0xF9DC, + code => 0x8ff4f2, + comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC' }, { direction => BOTH, - ucs => 0xFA0E, - code => 0x8ff4b4, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E' + ucs => 0xFA0E, + code => 0x8ff4b4, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E' }, { direction => BOTH, - ucs => 0xFA0F, - code => 0x8ff4b7, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F' + ucs => 0xFA0F, + code => 0x8ff4b7, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F' }, { direction => BOTH, - ucs => 0xFA10, - code => 0x8ff4b8, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10' + ucs => 0xFA10, + code => 0x8ff4b8, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10' }, { direction => BOTH, - ucs => 0xFA11, - code => 0x8ff4bd, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11' + ucs => 0xFA11, + code => 0x8ff4bd, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11' }, { direction => BOTH, - ucs => 0xFA12, - code => 0x8ff4c4, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12' + ucs => 0xFA12, + code => 0x8ff4c4, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12' }, { direction => BOTH, - ucs => 0xFA13, - code => 0x8ff4c7, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13' + ucs => 0xFA13, + code => 0x8ff4c7, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13' }, { direction => BOTH, - ucs => 0xFA14, - code => 0x8ff4c8, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14' + ucs => 0xFA14, + code => 0x8ff4c8, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14' }, { direction => BOTH, - ucs => 0xFA15, - code => 0x8ff4ce, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15' + ucs => 0xFA15, + code => 0x8ff4ce, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15' }, { direction => BOTH, - ucs => 0xFA16, - code => 0x8ff4cf, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16' + ucs => 0xFA16, + code => 0x8ff4cf, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16' }, { direction => BOTH, - ucs => 0xFA17, - code => 0x8ff4d3, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17' + ucs => 0xFA17, + code => 0x8ff4d3, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17' }, { direction => BOTH, - ucs => 0xFA18, - code => 0x8ff4d5, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18' + ucs => 0xFA18, + code => 0x8ff4d5, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18' }, { direction => BOTH, - ucs => 0xFA19, - code => 0x8ff4d6, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19' + ucs => 0xFA19, + code => 0x8ff4d6, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19' }, { direction => BOTH, - ucs => 0xFA1A, - code => 0x8ff4d7, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A' + ucs => 0xFA1A, + code => 0x8ff4d7, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A' }, { direction => BOTH, - ucs => 0xFA1B, - code => 0x8ff4d8, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B' + ucs => 0xFA1B, + code => 0x8ff4d8, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B' }, { direction => BOTH, - ucs => 0xFA1C, - code => 0x8ff4da, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C' + ucs => 0xFA1C, + code => 0x8ff4da, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C' }, { direction => BOTH, - ucs => 0xFA1D, - code => 0x8ff4db, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D' + ucs => 0xFA1D, + code => 0x8ff4db, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D' }, { direction => BOTH, - ucs => 0xFA1E, - code => 0x8ff4de, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E' + ucs => 0xFA1E, + code => 0x8ff4de, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E' }, { direction => BOTH, - ucs => 0xFA1F, - code => 0x8ff4e0, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F' + ucs => 0xFA1F, + code => 0x8ff4e0, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F' }, { direction => BOTH, - ucs => 0xFA20, - code => 0x8ff4e2, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20' + ucs => 0xFA20, + code => 0x8ff4e2, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20' }, { direction => BOTH, - ucs => 0xFA21, - code => 0x8ff4e3, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21' + ucs => 0xFA21, + code => 0x8ff4e3, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21' }, { direction => BOTH, - ucs => 0xFA22, - code => 0x8ff4e5, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22' + ucs => 0xFA22, + code => 0x8ff4e5, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22' }, { direction => BOTH, - ucs => 0xFA23, - code => 0x8ff4e9, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23' + ucs => 0xFA23, + code => 0x8ff4e9, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23' }, { direction => BOTH, - ucs => 0xFA24, - code => 0x8ff4ea, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24' + ucs => 0xFA24, + code => 0x8ff4ea, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24' }, { direction => BOTH, - ucs => 0xFA25, - code => 0x8ff4eb, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25' + ucs => 0xFA25, + code => 0x8ff4eb, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25' }, { direction => BOTH, - ucs => 0xFA26, - code => 0x8ff4ed, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26' + ucs => 0xFA26, + code => 0x8ff4ed, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26' }, { direction => BOTH, - ucs => 0xFA27, - code => 0x8ff4ef, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27' + ucs => 0xFA27, + code => 0x8ff4ef, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27' }, { direction => BOTH, - ucs => 0xFA28, - code => 0x8ff4f0, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28' + ucs => 0xFA28, + code => 0x8ff4f0, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28' }, { direction => BOTH, - ucs => 0xFA29, - code => 0x8ff4f3, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29' + ucs => 0xFA29, + code => 0x8ff4f3, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29' }, { direction => BOTH, - ucs => 0xFA2A, - code => 0x8ff4f7, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A' + ucs => 0xFA2A, + code => 0x8ff4f7, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A' }, { direction => BOTH, - ucs => 0xFA2B, - code => 0x8ff4f8, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B' + ucs => 0xFA2B, + code => 0x8ff4f8, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B' }, { direction => BOTH, - ucs => 0xFA2C, - code => 0x8ff4f9, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C' + ucs => 0xFA2C, + code => 0x8ff4f9, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C' }, { direction => BOTH, - ucs => 0xFA2D, - code => 0x8ff4fd, - comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D' + ucs => 0xFA2D, + code => 0x8ff4fd, + comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D' }, { direction => BOTH, - ucs => 0xFF07, - code => 0x8ff4a9, - comment => '# FULLWIDTH APOSTROPHE' + ucs => 0xFF07, + code => 0x8ff4a9, + comment => '# FULLWIDTH APOSTROPHE' }, { direction => BOTH, - ucs => 0xFFE4, - code => 0x8fa2c3, - comment => '# FULLWIDTH BROKEN BAR' + ucs => 0xFFE4, + code => 0x8fa2c3, + comment => '# FULLWIDTH BROKEN BAR' }, # additional conversions for EUC_JP -> UTF-8 conversion { direction => TO_UNICODE, - ucs => 0x2116, - code => 0x8ff4ac, - comment => '# NUMERO SIGN' + ucs => 0x2116, + code => 0x8ff4ac, + comment => '# NUMERO SIGN' }, { direction => TO_UNICODE, - ucs => 0x2121, - code => 0x8ff4ad, - comment => '# TELEPHONE SIGN' + ucs => 0x2121, + code => 0x8ff4ad, + comment => '# TELEPHONE SIGN' }, { direction => TO_UNICODE, - ucs => 0x3231, - code => 0x8ff4ab, - comment => '# PARENTHESIZED IDEOGRAPH STOCK' + ucs => 0x3231, + code => 0x8ff4ab, + comment => '# PARENTHESIZED IDEOGRAPH STOCK' }); print_conversion_tables($this_script, "EUC_JP", \@mapping); diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl index 4c3989d2c5..9112e1cfe9 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl @@ -36,27 +36,27 @@ foreach my $i (@$mapping) push @$mapping, ( { direction => BOTH, - ucs => 0x20AC, - code => 0xa2e6, - comment => '# EURO SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x20AC, + code => 0xa2e6, + comment => '# EURO SIGN', + f => $this_script, + l => __LINE__ }, { direction => BOTH, - ucs => 0x00AE, - code => 0xa2e7, - comment => '# REGISTERED SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00AE, + code => 0xa2e7, + comment => '# REGISTERED SIGN', + f => $this_script, + l => __LINE__ }, { direction => BOTH, - ucs => 0x327E, - code => 0xa2e8, - comment => '# CIRCLED HANGUL IEUNG U', - f => $this_script, - l => __LINE__ + ucs => 0x327E, + code => 0xa2e8, + comment => '# CIRCLED HANGUL IEUNG U', + f => $this_script, + l => __LINE__ }); print_conversion_tables($this_script, "EUC_KR", $mapping); diff --git a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl index ecc175528e..4ad17064ab 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl @@ -30,8 +30,8 @@ my @extras; foreach my $i (@$mapping) { - my $ucs = $i->{ucs}; - my $code = $i->{code}; + my $ucs = $i->{ucs}; + my $code = $i->{code}; my $origcode = $i->{code}; my $plane = ($code & 0x1f0000) >> 16; @@ -56,12 +56,12 @@ foreach my $i (@$mapping) { push @extras, { - ucs => $i->{ucs}, - code => ($i->{code} + 0x8ea10000), - rest => $i->{rest}, + ucs => $i->{ucs}, + code => ($i->{code} + 0x8ea10000), + rest => $i->{rest}, direction => TO_UNICODE, - f => $i->{f}, - l => $i->{l} + f => $i->{f}, + l => $i->{l} }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl index fb401e6099..9c8a983bf7 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl @@ -33,17 +33,17 @@ while (<$in>) next if (!m/= 0x80 && $ucs >= 0x0080) { push @mapping, { - ucs => $ucs, - code => $code, + ucs => $ucs, + code => $code, direction => BOTH, - f => $in_file, - l => $. + f => $in_file, + l => $. }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl b/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl index 370c5b801c..f50baa8f1f 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl @@ -30,27 +30,27 @@ my $mapping = &read_source("JOHAB.TXT"); push @$mapping, ( { direction => BOTH, - ucs => 0x20AC, - code => 0xd9e6, - comment => '# EURO SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x20AC, + code => 0xd9e6, + comment => '# EURO SIGN', + f => $this_script, + l => __LINE__ }, { direction => BOTH, - ucs => 0x00AE, - code => 0xd9e7, - comment => '# REGISTERED SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00AE, + code => 0xd9e7, + comment => '# REGISTERED SIGN', + f => $this_script, + l => __LINE__ }, { direction => BOTH, - ucs => 0x327E, - code => 0xd9e8, - comment => '# CIRCLED HANGUL IEUNG U', - f => $this_script, - l => __LINE__ + ucs => 0x327E, + code => 0xd9e8, + comment => '# CIRCLED HANGUL IEUNG U', + f => $this_script, + l => __LINE__ }); print_conversion_tables($this_script, "JOHAB", $mapping); diff --git a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl index 6431aba555..ed010a58fa 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl @@ -37,13 +37,13 @@ while (my $line = <$in>) push @mapping, { - code => $code, - ucs => $ucs1, + code => $code, + ucs => $ucs1, ucs_second => $ucs2, - comment => $rest, - direction => BOTH, - f => $in_file, - l => $. + comment => $rest, + direction => BOTH, + f => $in_file, + l => $. }; } elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/) @@ -51,7 +51,7 @@ while (my $line = <$in>) # non-combined characters my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3); - my $ucs = hex($u); + my $ucs = hex($u); my $code = hex($c); my $direction; @@ -74,12 +74,12 @@ while (my $line = <$in>) push @mapping, { - code => $code, - ucs => $ucs, - comment => $rest, + code => $code, + ucs => $ucs, + comment => $rest, direction => $direction, - f => $in_file, - l => $. + f => $in_file, + l => $. }; } } diff --git a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl index 6426cf4794..0808c6836b 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_SJIS.pl @@ -22,13 +22,13 @@ my $mapping = read_source("CP932.TXT"); # Drop these SJIS codes from the source for UTF8=>SJIS conversion my @reject_sjis = ( 0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782, - 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792, + 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792, 0x8795 .. 0x8797, 0x879a .. 0x879c); foreach my $i (@$mapping) { my $code = $i->{code}; - my $ucs = $i->{ucs}; + my $ucs = $i->{ucs}; if (grep { $code == $_ } @reject_sjis) { @@ -40,67 +40,67 @@ foreach my $i (@$mapping) push @$mapping, ( { direction => FROM_UNICODE, - ucs => 0x00a2, - code => 0x8191, - comment => '# CENT SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00a2, + code => 0x8191, + comment => '# CENT SIGN', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x00a3, - code => 0x8192, - comment => '# POUND SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00a3, + code => 0x8192, + comment => '# POUND SIGN', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x00a5, - code => 0x5c, - comment => '# YEN SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00a5, + code => 0x5c, + comment => '# YEN SIGN', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x00ac, - code => 0x81ca, - comment => '# NOT SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x00ac, + code => 0x81ca, + comment => '# NOT SIGN', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x2016, - code => 0x8161, - comment => '# DOUBLE VERTICAL LINE', - f => $this_script, - l => __LINE__ + ucs => 0x2016, + code => 0x8161, + comment => '# DOUBLE VERTICAL LINE', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x203e, - code => 0x7e, - comment => '# OVERLINE', - f => $this_script, - l => __LINE__ + ucs => 0x203e, + code => 0x7e, + comment => '# OVERLINE', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x2212, - code => 0x817c, - comment => '# MINUS SIGN', - f => $this_script, - l => __LINE__ + ucs => 0x2212, + code => 0x817c, + comment => '# MINUS SIGN', + f => $this_script, + l => __LINE__ }, { direction => FROM_UNICODE, - ucs => 0x301c, - code => 0x8160, - comment => '# WAVE DASH', - f => $this_script, - l => __LINE__ + ucs => 0x301c, + code => 0x8160, + comment => '# WAVE DASH', + f => $this_script, + l => __LINE__ }); print_conversion_tables($this_script, "SJIS", $mapping); diff --git a/src/backend/utils/mb/Unicode/UCS_to_UHC.pl b/src/backend/utils/mb/Unicode/UCS_to_UHC.pl index 5ec9c069b7..207677d76d 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_UHC.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_UHC.pl @@ -33,7 +33,7 @@ while (<$in>) next if (!m/) { push @mapping, { - ucs => $ucs, - code => $code, + ucs => $ucs, + code => $code, direction => BOTH, - f => $in_file, - l => $. + f => $in_file, + l => $. }; } } @@ -56,11 +56,11 @@ close($in); push @mapping, { direction => BOTH, - code => 0xa2e8, - ucs => 0x327e, - comment => 'CIRCLED HANGUL IEUNG U', - f => $this_script, - l => __LINE__ + code => 0xa2e8, + ucs => 0x327e, + comment => 'CIRCLED HANGUL IEUNG U', + f => $this_script, + l => __LINE__ }; print_conversion_tables($this_script, "UHC", \@mapping); diff --git a/src/backend/utils/mb/Unicode/UCS_to_most.pl b/src/backend/utils/mb/Unicode/UCS_to_most.pl index 1917f86f0a..a1947308ff 100755 --- a/src/backend/utils/mb/Unicode/UCS_to_most.pl +++ b/src/backend/utils/mb/Unicode/UCS_to_most.pl @@ -23,33 +23,33 @@ use convutils; my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl'; my %filename = ( - 'WIN866' => 'CP866.TXT', - 'WIN874' => 'CP874.TXT', - 'WIN1250' => 'CP1250.TXT', - 'WIN1251' => 'CP1251.TXT', - 'WIN1252' => 'CP1252.TXT', - 'WIN1253' => 'CP1253.TXT', - 'WIN1254' => 'CP1254.TXT', - 'WIN1255' => 'CP1255.TXT', - 'WIN1256' => 'CP1256.TXT', - 'WIN1257' => 'CP1257.TXT', - 'WIN1258' => 'CP1258.TXT', - 'ISO8859_2' => '8859-2.TXT', - 'ISO8859_3' => '8859-3.TXT', - 'ISO8859_4' => '8859-4.TXT', - 'ISO8859_5' => '8859-5.TXT', - 'ISO8859_6' => '8859-6.TXT', - 'ISO8859_7' => '8859-7.TXT', - 'ISO8859_8' => '8859-8.TXT', - 'ISO8859_9' => '8859-9.TXT', + 'WIN866' => 'CP866.TXT', + 'WIN874' => 'CP874.TXT', + 'WIN1250' => 'CP1250.TXT', + 'WIN1251' => 'CP1251.TXT', + 'WIN1252' => 'CP1252.TXT', + 'WIN1253' => 'CP1253.TXT', + 'WIN1254' => 'CP1254.TXT', + 'WIN1255' => 'CP1255.TXT', + 'WIN1256' => 'CP1256.TXT', + 'WIN1257' => 'CP1257.TXT', + 'WIN1258' => 'CP1258.TXT', + 'ISO8859_2' => '8859-2.TXT', + 'ISO8859_3' => '8859-3.TXT', + 'ISO8859_4' => '8859-4.TXT', + 'ISO8859_5' => '8859-5.TXT', + 'ISO8859_6' => '8859-6.TXT', + 'ISO8859_7' => '8859-7.TXT', + 'ISO8859_8' => '8859-8.TXT', + 'ISO8859_9' => '8859-9.TXT', 'ISO8859_10' => '8859-10.TXT', 'ISO8859_13' => '8859-13.TXT', 'ISO8859_14' => '8859-14.TXT', 'ISO8859_15' => '8859-15.TXT', 'ISO8859_16' => '8859-16.TXT', - 'KOI8R' => 'KOI8-R.TXT', - 'KOI8U' => 'KOI8-U.TXT', - 'GBK' => 'CP936.TXT'); + 'KOI8R' => 'KOI8-R.TXT', + 'KOI8U' => 'KOI8-U.TXT', + 'GBK' => 'CP936.TXT'); # make maps for all encodings if not specified my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename); diff --git a/src/backend/utils/mb/Unicode/convutils.pm b/src/backend/utils/mb/Unicode/convutils.pm index fd019424fd..77de7b1a4d 100644 --- a/src/backend/utils/mb/Unicode/convutils.pm +++ b/src/backend/utils/mb/Unicode/convutils.pm @@ -16,10 +16,10 @@ our @EXPORT = # Constants used in the 'direction' field of the character maps use constant { - NONE => 0, - TO_UNICODE => 1, + NONE => 0, + TO_UNICODE => 1, FROM_UNICODE => 2, - BOTH => 3 + BOTH => 3 }; ####################################################################### @@ -53,12 +53,12 @@ sub read_source exit; } my $out = { - code => hex($1), - ucs => hex($2), - comment => $4, + code => hex($1), + ucs => hex($2), + comment => $4, direction => BOTH, - f => $fname, - l => $. + f => $fname, + l => $. }; # Ignore pure ASCII mappings. PostgreSQL character conversion code @@ -124,14 +124,14 @@ sub print_conversion_tables_direction my $tblname; if ($direction == TO_UNICODE) { - $fname = lc("${csname}_to_utf8.map"); + $fname = lc("${csname}_to_utf8.map"); $tblname = lc("${csname}_to_unicode_tree"); print "- Writing ${csname}=>UTF8 conversion table: $fname\n"; } else { - $fname = lc("utf8_to_${csname}.map"); + $fname = lc("utf8_to_${csname}.map"); $tblname = lc("${csname}_from_unicode_tree"); print "- Writing UTF8=>${csname} conversion table: $fname\n"; @@ -378,10 +378,10 @@ sub print_radix_table unshift @segments, { - header => "Dummy map, for invalid values", + header => "Dummy map, for invalid values", min_idx => 0, max_idx => $widest_range, - label => "dummy map" + label => "dummy map" }; ### @@ -397,7 +397,7 @@ sub print_radix_table ### for (my $j = 0; $j < $#segments - 1; $j++) { - my $seg = $segments[$j]; + my $seg = $segments[$j]; my $nextseg = $segments[ $j + 1 ]; # Count the number of zero values at the end of this segment. @@ -527,17 +527,17 @@ sub print_radix_table if ($max_val <= 0xffff) { $vals_per_line = 8; - $colwidth = 4; + $colwidth = 4; } elsif ($max_val <= 0xffffff) { $vals_per_line = 4; - $colwidth = 6; + $colwidth = 6; } else { $vals_per_line = 4; - $colwidth = 8; + $colwidth = 8; } ### @@ -607,8 +607,10 @@ sub print_radix_table # Print the next line's worth of values. # XXX pad to begin at a nice boundary printf $out " /* %02x */ ", $i; - for (my $j = 0; - $j < $vals_per_line && $i <= $seg->{max_idx}; $j++) + for ( + my $j = 0; + $j < $vals_per_line && $i <= $seg->{max_idx}; + $j++) { # missing values represent zero. my $val = $seg->{values}->{$i} || 0; @@ -671,10 +673,10 @@ sub build_segments_recurse push @segments, { header => $header . ", leaf: ${path}xx", - label => $label, - level => $level, - depth => $depth, - path => $path, + label => $label, + level => $level, + depth => $depth, + path => $path, values => $map }; } @@ -696,10 +698,10 @@ sub build_segments_recurse push @segments, { header => $header . ", byte #$level: ${path}xx", - label => $label, - level => $level, - depth => $depth, - path => $path, + label => $label, + level => $level, + depth => $depth, + path => $path, values => \%children }; } @@ -789,12 +791,12 @@ sub make_charmap_combined if (defined $c->{ucs_second}) { my $entry = { - utf8 => ucs2utf($c->{ucs}), + utf8 => ucs2utf($c->{ucs}), utf8_second => ucs2utf($c->{ucs_second}), - code => $c->{code}, - comment => $c->{comment}, - f => $c->{f}, - l => $c->{l} + code => $c->{code}, + comment => $c->{comment}, + f => $c->{f}, + l => $c->{l} }; push @combined, $entry; } diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 67c37c49cb..a9033b7a54 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1470,8 +1470,8 @@ check_GUC_init(struct config_generic *gconf) /* Flag combinations */ /* - * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part - * of SHOW ALL should not be hidden in postgresql.conf.sample. + * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of + * SHOW ALL should not be hidden in postgresql.conf.sample. */ if ((gconf->flags & GUC_NO_SHOW_ALL) && !(gconf->flags & GUC_NOT_IN_SAMPLE)) diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index 844781a7f5..c27eb36758 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -4685,8 +4685,8 @@ struct config_enum ConfigureNamesEnum[] = { {"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE, - gettext_noop("Log level for reporting invalid ICU locale strings."), - NULL + gettext_noop("Log level for reporting invalid ICU locale strings."), + NULL }, &icu_validation_level, WARNING, icu_validation_level_options, diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c index f5a62061a3..7a3781466e 100644 --- a/src/backend/utils/mmgr/dsa.c +++ b/src/backend/utils/mmgr/dsa.c @@ -1369,7 +1369,7 @@ init_span(dsa_area *area, if (DsaPointerIsValid(pool->spans[1])) { dsa_area_span *head = (dsa_area_span *) - dsa_get_address(area, pool->spans[1]); + dsa_get_address(area, pool->spans[1]); head->prevspan = span_pointer; } @@ -2215,7 +2215,7 @@ make_new_segment(dsa_area *area, size_t requested_pages) if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE) { dsa_segment_map *next = - get_segment_by_index(area, segment_map->header->next); + get_segment_by_index(area, segment_map->header->next); Assert(next->header->bin == segment_map->header->bin); next->header->prev = new_index; diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 722a2e34db..8f9ea090fa 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -285,7 +285,7 @@ sum_free_pages(FreePageManager *fpm) if (!relptr_is_null(fpm->freelist[list])) { FreePageSpanLeader *candidate = - relptr_access(base, fpm->freelist[list]); + relptr_access(base, fpm->freelist[list]); do { diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 42b90e4d4f..9fc83f11f6 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children, * * We don't buffer the information about all memory contexts in a * backend into StringInfo and log it as one message. That would - * require the buffer to be enlarged, risking an OOM as there could - * be a large number of memory contexts in a backend. Instead, we - * log one message per memory context. + * require the buffer to be enlarged, risking an OOM as there could be + * a large number of memory contexts in a backend. Instead, we log + * one message per memory context. */ ereport(LOG_SERVER_ONLY, (errhidestmt(true), diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index 7dec652106..f926f1faad 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -587,7 +587,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner, while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres)) { pg_cryptohash_ctx *context = - (pg_cryptohash_ctx *) DatumGetPointer(foundres); + (pg_cryptohash_ctx *) DatumGetPointer(foundres); if (isCommit) PrintCryptoHashLeakWarning(foundres); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 95c3970437..e5a4e5b371 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state) /* * We were able to accumulate all the tuples required for output * in memory, using a heap to eliminate excess tuples. Now we - * have to transform the heap to a properly-sorted array. - * Note that sort_bounded_heap sets the correct state->status. + * have to transform the heap to a properly-sorted array. Note + * that sort_bounded_heap sets the correct state->status. */ sort_bounded_heap(state); state->current = 0; diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index c9ca44d8b7..3a419e348f 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1990,7 +1990,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) int bucket = (oldSnapshotControl->head_offset + ((ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE)) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin)) oldSnapshotControl->xid_by_minute[bucket] = xmin; @@ -2057,7 +2057,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) /* Extend map to unused entry. */ int new_tail = (oldSnapshotControl->head_offset + oldSnapshotControl->count_used) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; oldSnapshotControl->count_used++; oldSnapshotControl->xid_by_minute[new_tail] = xmin; @@ -2188,7 +2188,7 @@ SerializeSnapshot(Snapshot snapshot, char *start_address) if (serialized_snapshot.subxcnt > 0) { Size subxipoff = sizeof(SerializedSnapshotData) + - snapshot->xcnt * sizeof(TransactionId); + snapshot->xcnt * sizeof(TransactionId); memcpy((TransactionId *) (start_address + subxipoff), snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId)); diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 30b576932f..31156e863b 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1565,8 +1565,8 @@ static void setup_auth(FILE *cmdfd) { /* - * The authid table shouldn't be readable except through views, to - * ensure passwords are not publicly visible. + * The authid table shouldn't be readable except through views, to ensure + * passwords are not publicly visible. */ PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n"); @@ -1957,9 +1957,9 @@ make_template0(FILE *cmdfd) " STRATEGY = file_copy;\n\n"); /* - * template0 shouldn't have any collation-dependent objects, so unset - * the collation version. This disables collation version checks when - * making a new database from it. + * template0 shouldn't have any collation-dependent objects, so unset the + * collation version. This disables collation version checks when making + * a new database from it. */ PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n"); @@ -1969,9 +1969,8 @@ make_template0(FILE *cmdfd) PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n"); /* - * Explicitly revoke public create-schema and create-temp-table - * privileges in template1 and template0; else the latter would be on - * by default + * Explicitly revoke public create-schema and create-temp-table privileges + * in template1 and template0; else the latter would be on by default */ PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n"); PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n"); @@ -2244,11 +2243,11 @@ static char * icu_language_tag(const char *loc_str) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - char *langtag; - size_t buflen = 32; /* arbitrary starting buffer size */ - const bool strict = true; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + char *langtag; + size_t buflen = 32; /* arbitrary starting buffer size */ + const bool strict = true; status = U_ZERO_ERROR; uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status); @@ -2264,8 +2263,8 @@ icu_language_tag(const char *loc_str) return pstrdup("en-US-u-va-posix"); /* - * A BCP47 language tag doesn't have a clearly-defined upper limit - * (cf. RFC5646 section 4.4). Additionally, in older ICU versions, + * A BCP47 language tag doesn't have a clearly-defined upper limit (cf. + * RFC5646 section 4.4). Additionally, in older ICU versions, * uloc_toLanguageTag() doesn't always return the ultimate length on the * first call, necessitating a loop. */ @@ -2298,7 +2297,7 @@ icu_language_tag(const char *loc_str) return langtag; #else pg_fatal("ICU is not supported in this build"); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ #endif } @@ -2311,9 +2310,9 @@ static void icu_validate_locale(const char *loc_str) { #ifdef USE_ICU - UErrorCode status; - char lang[ULOC_LANG_CAPACITY]; - bool found = false; + UErrorCode status; + char lang[ULOC_LANG_CAPACITY]; + bool found = false; /* validate that we can extract the language */ status = U_ZERO_ERROR; @@ -2334,8 +2333,8 @@ icu_validate_locale(const char *loc_str) /* search for matching language within ICU */ for (int32_t i = 0; !found && i < uloc_countAvailable(); i++) { - const char *otherloc = uloc_getAvailable(i); - char otherlang[ULOC_LANG_CAPACITY]; + const char *otherloc = uloc_getAvailable(i); + char otherlang[ULOC_LANG_CAPACITY]; status = U_ZERO_ERROR; uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status); @@ -2366,10 +2365,10 @@ static char * default_icu_locale(void) { #ifdef USE_ICU - UCollator *collator; - UErrorCode status; - const char *valid_locale; - char *default_locale; + UCollator *collator; + UErrorCode status; + const char *valid_locale; + char *default_locale; status = U_ZERO_ERROR; collator = ucol_open(NULL, &status); @@ -2449,7 +2448,7 @@ setlocales(void) if (locale_provider == COLLPROVIDER_ICU) { - char *langtag; + char *langtag; /* acquire default locale from the environment, if not specified */ if (icu_locale == NULL) diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl index 17a444d80c..fa00bb3dab 100644 --- a/src/bin/initdb/t/001_initdb.pl +++ b/src/bin/initdb/t/001_initdb.pl @@ -105,7 +105,7 @@ if ($ENV{with_icu} eq 'yes') { command_ok( [ - 'initdb', '--no-sync', + 'initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=en', "$tempdir/data3" ], @@ -113,7 +113,7 @@ if ($ENV{with_icu} eq 'yes') command_fails_like( [ - 'initdb', '--no-sync', + 'initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', "$tempdir/dataX" ], @@ -122,7 +122,7 @@ if ($ENV{with_icu} eq 'yes') command_fails_like( [ - 'initdb', '--no-sync', + 'initdb', '--no-sync', '--locale-provider=icu', '--encoding=SQL_ASCII', '--icu-locale=en', "$tempdir/dataX" ], @@ -131,18 +131,18 @@ if ($ENV{with_icu} eq 'yes') command_fails_like( [ - 'initdb', '--no-sync', - '--locale-provider=icu', - '--icu-locale=nonsense-nowhere', "$tempdir/dataX" + 'initdb', '--no-sync', + '--locale-provider=icu', '--icu-locale=nonsense-nowhere', + "$tempdir/dataX" ], qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/, 'fails for nonsense language'); command_fails_like( [ - 'initdb', '--no-sync', - '--locale-provider=icu', - '--icu-locale=@colNumeric=lower', "$tempdir/dataX" + 'initdb', '--no-sync', + '--locale-provider=icu', '--icu-locale=@colNumeric=lower', + "$tempdir/dataX" ], qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/, 'fails for invalid collation argument'); @@ -160,7 +160,7 @@ command_fails( command_fails( [ - 'initdb', '--no-sync', + 'initdb', '--no-sync', '--locale-provider=libc', '--icu-locale=en', "$tempdir/dataX" ], diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl index e3cfae9cd4..cf2438717e 100644 --- a/src/bin/pg_amcheck/t/002_nonesuch.pl +++ b/src/bin/pg_amcheck/t/002_nonesuch.pl @@ -183,7 +183,7 @@ $node->command_checks_all( $node->command_checks_all( [ 'pg_amcheck', '--no-strict-names', - '-t', 'this.is.a.really.long.dotted.string' + '-t', 'this.is.a.really.long.dotted.string' ], 2, [qr/^$/], @@ -252,20 +252,20 @@ $node->command_checks_all( $node->command_checks_all( [ 'pg_amcheck', '--no-strict-names', - '-t', 'no_such_table', - '-t', 'no*such*table', - '-i', 'no_such_index', - '-i', 'no*such*index', - '-r', 'no_such_relation', - '-r', 'no*such*relation', - '-d', 'no_such_database', - '-d', 'no*such*database', - '-r', 'none.none', - '-r', 'none.none.none', - '-r', 'postgres.none.none', - '-r', 'postgres.pg_catalog.none', - '-r', 'postgres.none.pg_class', - '-t', 'postgres.pg_catalog.pg_class', # This exists + '-t', 'no_such_table', + '-t', 'no*such*table', + '-i', 'no_such_index', + '-i', 'no*such*index', + '-r', 'no_such_relation', + '-r', 'no*such*relation', + '-d', 'no_such_database', + '-d', 'no*such*database', + '-r', 'none.none', + '-r', 'none.none.none', + '-r', 'postgres.none.none', + '-r', 'postgres.pg_catalog.none', + '-r', 'postgres.none.pg_class', + '-t', 'postgres.pg_catalog.pg_class', # This exists ], 0, [qr/^$/], @@ -304,13 +304,13 @@ $node->safe_psql('postgres', q(CREATE DATABASE another_db)); $node->command_checks_all( [ 'pg_amcheck', '-d', - 'postgres', '--no-strict-names', - '-t', 'template1.public.foo', - '-t', 'another_db.public.foo', - '-t', 'no_such_database.public.foo', - '-i', 'template1.public.foo_idx', - '-i', 'another_db.public.foo_idx', - '-i', 'no_such_database.public.foo_idx', + 'postgres', '--no-strict-names', + '-t', 'template1.public.foo', + '-t', 'another_db.public.foo', + '-t', 'no_such_database.public.foo', + '-i', 'template1.public.foo_idx', + '-i', 'another_db.public.foo_idx', + '-i', 'no_such_database.public.foo_idx', ], 1, [qr/^$/], @@ -334,8 +334,8 @@ $node->command_checks_all( $node->command_checks_all( [ 'pg_amcheck', '--all', '--no-strict-names', '-S', - 'public', '-S', 'pg_catalog', '-S', - 'pg_toast', '-S', 'information_schema', + 'public', '-S', 'pg_catalog', '-S', + 'pg_toast', '-S', 'information_schema', ], 1, [qr/^$/], @@ -348,9 +348,9 @@ $node->command_checks_all( # Check with schema exclusion patterns overriding relation and schema inclusion patterns $node->command_checks_all( [ - 'pg_amcheck', '--all', '--no-strict-names', '-s', - 'public', '-s', 'pg_catalog', '-s', - 'pg_toast', '-s', 'information_schema', '-t', + 'pg_amcheck', '--all', '--no-strict-names', '-s', + 'public', '-s', 'pg_catalog', '-s', + 'pg_toast', '-s', 'information_schema', '-t', 'pg_catalog.pg_class', '-S*' ], 1, diff --git a/src/bin/pg_amcheck/t/003_check.pl b/src/bin/pg_amcheck/t/003_check.pl index 359abe25a1..d577cffa30 100644 --- a/src/bin/pg_amcheck/t/003_check.pl +++ b/src/bin/pg_amcheck/t/003_check.pl @@ -319,7 +319,7 @@ plan_to_remove_relation_file('db2', 's1.t1_btree'); my @cmd = ('pg_amcheck', '-p', $port); # Regular expressions to match various expected output -my $no_output_re = qr/^$/; +my $no_output_re = qr/^$/; my $line_pointer_corruption_re = qr/line pointer/; my $missing_file_re = qr/could not open file ".*": No such file or directory/; my $index_missing_relation_fork_re = diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl index aa62422316..1b5027c420 100644 --- a/src/bin/pg_amcheck/t/004_verify_heapam.pl +++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl @@ -105,31 +105,31 @@ sub read_tuple @_ = unpack(HEAPTUPLE_PACK_CODE, $buffer); %tup = ( - t_xmin => shift, - t_xmax => shift, - t_field3 => shift, - bi_hi => shift, - bi_lo => shift, - ip_posid => shift, - t_infomask2 => shift, - t_infomask => shift, - t_hoff => shift, - t_bits => shift, - a_1 => shift, - a_2 => shift, - b_header => shift, - b_body1 => shift, - b_body2 => shift, - b_body3 => shift, - b_body4 => shift, - b_body5 => shift, - b_body6 => shift, - b_body7 => shift, - c_va_header => shift, - c_va_vartag => shift, - c_va_rawsize => shift, - c_va_extinfo => shift, - c_va_valueid => shift, + t_xmin => shift, + t_xmax => shift, + t_field3 => shift, + bi_hi => shift, + bi_lo => shift, + ip_posid => shift, + t_infomask2 => shift, + t_infomask => shift, + t_hoff => shift, + t_bits => shift, + a_1 => shift, + a_2 => shift, + b_header => shift, + b_body1 => shift, + b_body2 => shift, + b_body3 => shift, + b_body4 => shift, + b_body5 => shift, + b_body6 => shift, + b_body7 => shift, + c_va_header => shift, + c_va_vartag => shift, + c_va_rawsize => shift, + c_va_extinfo => shift, + c_va_valueid => shift, c_va_toastrelid => shift); # Stitch together the text for column 'b' $tup{b} = join('', map { chr($tup{"b_body$_"}) } (1 .. 7)); @@ -151,17 +151,17 @@ sub write_tuple my ($fh, $offset, $tup) = @_; my $buffer = pack( HEAPTUPLE_PACK_CODE, - $tup->{t_xmin}, $tup->{t_xmax}, - $tup->{t_field3}, $tup->{bi_hi}, - $tup->{bi_lo}, $tup->{ip_posid}, - $tup->{t_infomask2}, $tup->{t_infomask}, - $tup->{t_hoff}, $tup->{t_bits}, - $tup->{a_1}, $tup->{a_2}, - $tup->{b_header}, $tup->{b_body1}, - $tup->{b_body2}, $tup->{b_body3}, - $tup->{b_body4}, $tup->{b_body5}, - $tup->{b_body6}, $tup->{b_body7}, - $tup->{c_va_header}, $tup->{c_va_vartag}, + $tup->{t_xmin}, $tup->{t_xmax}, + $tup->{t_field3}, $tup->{bi_hi}, + $tup->{bi_lo}, $tup->{ip_posid}, + $tup->{t_infomask2}, $tup->{t_infomask}, + $tup->{t_hoff}, $tup->{t_bits}, + $tup->{a_1}, $tup->{a_2}, + $tup->{b_header}, $tup->{b_body1}, + $tup->{b_body2}, $tup->{b_body3}, + $tup->{b_body4}, $tup->{b_body5}, + $tup->{b_body6}, $tup->{b_body7}, + $tup->{c_va_header}, $tup->{c_va_vartag}, $tup->{c_va_rawsize}, $tup->{c_va_extinfo}, $tup->{c_va_valueid}, $tup->{c_va_toastrelid}); sysseek($fh, $offset, 0) @@ -188,7 +188,7 @@ $node->append_conf('postgresql.conf', 'max_prepared_transactions=10'); # Start the node and load the extensions. We depend on both # amcheck and pageinspect for this test. $node->start; -my $port = $node->port; +my $port = $node->port; my $pgdata = $node->data_dir; $node->safe_psql('postgres', "CREATE EXTENSION amcheck"); $node->safe_psql('postgres', "CREATE EXTENSION pageinspect"); @@ -354,23 +354,23 @@ binmode $file; my $ENDIANNESS; for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) { - my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based + my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based my $offset = $lp_off[$tupidx]; - next if $offset == -1; # ignore redirect line pointers + next if $offset == -1; # ignore redirect line pointers my $tup = read_tuple($file, $offset); # Sanity-check that the data appears on the page where we expect. my $a_1 = $tup->{a_1}; my $a_2 = $tup->{a_2}; - my $b = $tup->{b}; + my $b = $tup->{b}; if ($a_1 != 0xDEADF9F9 || $a_2 != 0xDEADF9F9 || $b ne 'abcdefg') { close($file); # ignore errors on close; we're exiting anyway $node->clean_node; plan skip_all => sprintf( - "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx, - 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b); + "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", + $tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b); exit; } @@ -395,18 +395,18 @@ $node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ], $node->stop; # Some #define constants from access/htup_details.h for use while corrupting. -use constant HEAP_HASNULL => 0x0001; +use constant HEAP_HASNULL => 0x0001; use constant HEAP_XMAX_LOCK_ONLY => 0x0080; use constant HEAP_XMIN_COMMITTED => 0x0100; -use constant HEAP_XMIN_INVALID => 0x0200; +use constant HEAP_XMIN_INVALID => 0x0200; use constant HEAP_XMAX_COMMITTED => 0x0400; -use constant HEAP_XMAX_INVALID => 0x0800; -use constant HEAP_NATTS_MASK => 0x07FF; -use constant HEAP_XMAX_IS_MULTI => 0x1000; -use constant HEAP_KEYS_UPDATED => 0x2000; -use constant HEAP_HOT_UPDATED => 0x4000; -use constant HEAP_ONLY_TUPLE => 0x8000; -use constant HEAP_UPDATED => 0x2000; +use constant HEAP_XMAX_INVALID => 0x0800; +use constant HEAP_NATTS_MASK => 0x07FF; +use constant HEAP_XMAX_IS_MULTI => 0x1000; +use constant HEAP_KEYS_UPDATED => 0x2000; +use constant HEAP_HOT_UPDATED => 0x4000; +use constant HEAP_ONLY_TUPLE => 0x8000; +use constant HEAP_UPDATED => 0x2000; # Helper function to generate a regular expression matching the header we # expect verify_heapam() to return given which fields we expect to be non-null. @@ -436,7 +436,7 @@ binmode $file; for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) { - my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based + my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based my $offset = $lp_off[$tupidx]; my $header = header(0, $offnum, undef); @@ -534,7 +534,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) # Corrupt the tuple to look like it has lots of attributes, some of # them null. This falsely creates the impression that the t_bits # array is longer than just one byte, but t_hoff still says otherwise. - $tup->{t_infomask} |= HEAP_HASNULL; + $tup->{t_infomask} |= HEAP_HASNULL; $tup->{t_infomask2} |= HEAP_NATTS_MASK; $tup->{t_bits} = 0xAA; @@ -544,7 +544,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) elsif ($offnum == 11) { # Same as above, but this time t_hoff plays along - $tup->{t_infomask} |= HEAP_HASNULL; + $tup->{t_infomask} |= HEAP_HASNULL; $tup->{t_infomask2} |= (HEAP_NATTS_MASK & 0x40); $tup->{t_bits} = 0xAA; $tup->{t_hoff} = 32; @@ -568,9 +568,9 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) # bytes with 0xFF using 0x3FFFFFFF. # $tup->{b_header} = $ENDIANNESS eq 'little' ? 0xFC : 0x3F; - $tup->{b_body1} = 0xFF; - $tup->{b_body2} = 0xFF; - $tup->{b_body3} = 0xFF; + $tup->{b_body1} = 0xFF; + $tup->{b_body2} = 0xFF; + $tup->{b_body3} = 0xFF; $header = header(0, $offnum, 1); push @expected, @@ -620,7 +620,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) # at offnum 19 we will unset HEAP_ONLY_TUPLE flag die "offnum $offnum should be a redirect" if defined $tup; push @expected, - qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/; + qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/; } elsif ($offnum == 18) { @@ -628,8 +628,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) die "offnum $offnum should be a redirect" if defined $tup; sysseek($file, 92, 0) or BAIL_OUT("sysseek failed: $!"); syswrite($file, - pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000)) - or BAIL_OUT("syswrite failed: $!"); + pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000)) + or BAIL_OUT("syswrite failed: $!"); push @expected, qr/${header}redirected line pointer points to another redirected line pointer at offset \d+/; } @@ -644,8 +644,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++) # rewrite line pointer with lp.off = 25, lp_flags = 2, lp_len = 0 sysseek($file, 108, 0) or BAIL_OUT("sysseek failed: $!"); syswrite($file, - pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000)) - or BAIL_OUT("syswrite failed: $!"); + pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000)) + or BAIL_OUT("syswrite failed: $!"); push @expected, qr/${header}redirect line pointer points to offset \d+, but offset \d+ also points there/; } @@ -756,7 +756,7 @@ $node->command_checks_all( [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ], 2, [@expected], [], 'Expected corruption message output'); $node->safe_psql( - 'postgres', qq( + 'postgres', qq( COMMIT PREPARED 'in_progress_tx'; )); diff --git a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl index 76321d1284..cc3386d146 100644 --- a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl +++ b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl @@ -14,7 +14,7 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir; my @walfiles = ( '00000001000000370000000C.gz', '00000001000000370000000D', - '00000001000000370000000E', '00000001000000370000000F.partial',); + '00000001000000370000000E', '00000001000000370000000F.partial',); sub create_files { @@ -57,8 +57,10 @@ command_fails_like( { # like command_like but checking stderr my $stderr; - my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir, - $walfiles[2] ], '2>', \$stderr; + my $result = + IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir, + $walfiles[2] ], + '2>', \$stderr; ok($result, "pg_archivecleanup dry run: exit code 0"); like( $stderr, @@ -98,8 +100,8 @@ sub run_check return; } -run_check('', 'pg_archivecleanup'); -run_check('.partial', 'pg_archivecleanup with .partial file'); +run_check('', 'pg_archivecleanup'); +run_check('.partial', 'pg_archivecleanup with .partial file'); run_check('.00000020.backup', 'pg_archivecleanup with .backup file'); done_testing(); diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index ba471f898c..1dc8efe0cb 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -341,18 +341,18 @@ tablespace_list_append(const char *arg) /* * All tablespaces are created with absolute directories, so specifying a - * non-absolute path here would just never match, possibly confusing users. - * Since we don't know whether the remote side is Windows or not, and it - * might be different than the local side, permit any path that could be - * absolute under either set of rules. + * non-absolute path here would just never match, possibly confusing + * users. Since we don't know whether the remote side is Windows or not, + * and it might be different than the local side, permit any path that + * could be absolute under either set of rules. * * (There is little practical risk of confusion here, because someone * running entirely on Linux isn't likely to have a relative path that * begins with a backslash or something that looks like a drive - * specification. If they do, and they also incorrectly believe that - * a relative path is acceptable here, we'll silently fail to warn them - * of their mistake, and the -T option will just not get applied, same - * as if they'd specified -T for a nonexistent tablespace.) + * specification. If they do, and they also incorrectly believe that a + * relative path is acceptable here, we'll silently fail to warn them of + * their mistake, and the -T option will just not get applied, same as if + * they'd specified -T for a nonexistent tablespace.) */ if (!is_nonwindows_absolute_path(cell->old_dir) && !is_windows_absolute_path(cell->old_dir)) diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index fb9e29682b..d0a4079d50 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -43,7 +43,7 @@ static char *basedir = NULL; static int verbose = 0; static int compresslevel = 0; -static bool noloop = false; +static bool noloop = false; static int standby_message_timeout = 10 * 1000; /* 10 sec = default */ static volatile sig_atomic_t time_to_stop = false; static bool do_create_slot = false; diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 4d130a7f94..793d64863c 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -4,7 +4,7 @@ use strict; use warnings; use File::Basename qw(basename dirname); -use File::Path qw(rmtree); +use File::Path qw(rmtree); use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; @@ -29,7 +29,7 @@ umask(0077); # Initialize node without replication settings $node->init( - extra => ['--data-checksums'], + extra => ['--data-checksums'], auth_extra => [ '--create-role', 'backupuser' ]); $node->start; my $pgdata = $node->data_dir; @@ -144,8 +144,7 @@ SKIP: 'gzip:long', 'invalid compression specification: compression algorithm "gzip" does not support long-distance mode', 'failure on long mode for gzip' - ], - ); + ],); for my $cft (@compression_failure_tests) { @@ -153,7 +152,7 @@ SKIP: my $sfail = quotemeta($server_fails . $cft->[1]); $node->command_fails_like( [ - 'pg_basebackup', '-D', + 'pg_basebackup', '-D', "$tempdir/backup", '--compress', $cft->[0] ], @@ -161,7 +160,7 @@ SKIP: 'client ' . $cft->[2]); $node->command_fails_like( [ - 'pg_basebackup', '-D', + 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'server-' . $cft->[0] ], @@ -193,7 +192,7 @@ my $baseUnloggedPath = $node->safe_psql('postgres', # Make sure main and init forks exist ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base'); -ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base'); +ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base'); # Create files that look like temporary relations to ensure they are ignored. my $postgresOid = $node->safe_psql('postgres', @@ -211,7 +210,7 @@ foreach my $filename (@tempRelationFiles) $node->command_ok( [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ], 'pg_basebackup runs'); -ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); +ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included'); # Permissions on backup should be default @@ -274,13 +273,13 @@ unlink("$pgdata/backup_label") $node->command_ok( [ @pg_basebackup_defs, '-D', - "$tempdir/backup2", '--no-manifest', - '--waldir', "$tempdir/xlog2" + "$tempdir/backup2", '--no-manifest', + '--waldir', "$tempdir/xlog2" ], 'separate xlog directory'); -ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created'); +ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created'); ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed'); -ok(-d "$tempdir/xlog2/", 'xlog directory was created'); +ok(-d "$tempdir/xlog2/", 'xlog directory was created'); rmtree("$tempdir/backup2"); rmtree("$tempdir/xlog2"); @@ -346,7 +345,7 @@ $node->start; # to our physical temp location. That way we can use shorter names # for the tablespace directories, which hopefully won't run afoul of # the 99 character length limit. -my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short; +my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short; my $real_sys_tempdir = "$sys_tempdir/tempdir"; dir_symlink "$tempdir", $real_sys_tempdir; @@ -355,7 +354,7 @@ my $realTsDir = "$real_sys_tempdir/tblspc1"; $node->safe_psql('postgres', "CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';"); $node->safe_psql('postgres', - "CREATE TABLE test1 (a int) TABLESPACE tblspc1;" + "CREATE TABLE test1 (a int) TABLESPACE tblspc1;" . "INSERT INTO test1 VALUES (1234);"); $node->backup('tarbackup2', backup_options => ['-Ft']); # empty test1, just so that it's different from the to-be-restored data @@ -363,7 +362,7 @@ $node->safe_psql('postgres', "TRUNCATE TABLE test1;"); # basic checks on the output my $backupdir = $node->backup_dir . '/tarbackup2'; -ok(-f "$backupdir/base.tar", 'backup tar was created'); +ok(-f "$backupdir/base.tar", 'backup tar was created'); ok(-f "$backupdir/pg_wal.tar", 'WAL tar was created'); my @tblspc_tars = glob "$backupdir/[0-9]*.tar"; is(scalar(@tblspc_tars), 1, 'one tablespace tar was created'); @@ -385,7 +384,7 @@ SKIP: $node2->init_from_backup($node, 'tarbackup2', tar_program => $tar); # Recover tablespace into a new directory (not where it was!) - my $repTsDir = "$tempdir/tblspc1replica"; + my $repTsDir = "$tempdir/tblspc1replica"; my $realRepTsDir = "$real_sys_tempdir/tblspc1replica"; mkdir $repTsDir; PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0], @@ -394,7 +393,7 @@ SKIP: # Update tablespace map to point to new directory. # XXX Ideally pg_basebackup would handle this. $tblspc_tars[0] =~ m|/([0-9]*)\.tar$|; - my $tblspcoid = $1; + my $tblspcoid = $1; my $escapedRepTsDir = $realRepTsDir; $escapedRepTsDir =~ s/\\/\\\\/g; open my $mapfile, '>', $node2->data_dir . '/tablespace_map'; @@ -442,7 +441,7 @@ $node->command_fails( $node->command_ok( [ @pg_basebackup_defs, '-D', - "$tempdir/backup1", '-Fp', + "$tempdir/backup1", '-Fp', "-T$realTsDir=$tempdir/tbackup/tblspc1", ], 'plain format with tablespaces succeeds with tablespace mapping'); @@ -512,7 +511,7 @@ $realTsDir =~ s/=/\\=/; $node->command_ok( [ @pg_basebackup_defs, '-D', - "$tempdir/backup3", '-Fp', + "$tempdir/backup3", '-Fp', "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2", ], 'mapping tablespace with = sign in path'); @@ -533,7 +532,7 @@ rmtree("$tempdir/tarbackup_l3"); $node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ], 'pg_basebackup -R runs'); ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists'); -ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created'); +ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created'); my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf"; rmtree("$tempdir/backupR"); @@ -572,9 +571,9 @@ ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created"); rmtree("$tempdir/backupxst"); $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupnoslot", '-X', - 'stream', '--no-slot' + 'stream', '--no-slot' ], 'pg_basebackup -X stream runs with --no-slot'); rmtree("$tempdir/backupnoslot"); @@ -597,7 +596,7 @@ $node->command_fails_like( $node->command_fails_like( [ @pg_basebackup_defs, '--target', 'blackhole', '-X', - 'none', '-D', "$tempdir/blackhole" + 'none', '-D', "$tempdir/blackhole" ], qr/cannot specify both output directory and backup target/, 'backup target and output directory'); @@ -610,7 +609,7 @@ $node->command_ok( 'backup target blackhole'); $node->command_ok( [ - @pg_basebackup_defs, '--target', + @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ], @@ -634,9 +633,9 @@ rmtree("$tempdir/backuponserver"); $node->command_fails( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_fail", '-X', - 'stream', '-S', + 'stream', '-S', 'slot0' ], 'pg_basebackup fails with nonexistent replication slot'); @@ -647,9 +646,9 @@ $node->command_fails( $node->command_fails( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', - '-S', 'slot0', + '-S', 'slot0', '--no-slot' ], 'pg_basebackup fails with -C -S --no-slot'); @@ -667,9 +666,9 @@ $node->command_ok( $node->command_fails( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_fail", '-X', - 'stream', '-S', + 'stream', '-S', 'slot0' ], 'pg_basebackup fails with nonexistent replication slot'); @@ -680,18 +679,18 @@ $node->command_fails( $node->command_fails( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', - '-S', 'slot0', + '-S', 'slot0', '--no-slot' ], 'pg_basebackup fails with -C -S --no-slot'); $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', - '-S', 'slot0' + '-S', 'slot0' ], 'pg_basebackup -C runs'); rmtree("$tempdir/backupxs_slot"); @@ -712,9 +711,9 @@ isnt( $node->command_fails( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot1", '-C', - '-S', 'slot0' + '-S', 'slot0' ], 'pg_basebackup fails with -C -S and a previously existing slot'); @@ -727,13 +726,13 @@ is($lsn, '', 'restart LSN of new slot is null'); $node->command_fails( [ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', - 'slot1', '-X', 'none' + 'slot1', '-X', 'none' ], 'pg_basebackup with replication slot fails without WAL streaming'); $node->command_ok( [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X', - 'stream', '-S', 'slot1' + 'stream', '-S', 'slot1' ], 'pg_basebackup -X stream with replication slot runs'); $lsn = $node->safe_psql('postgres', @@ -745,7 +744,7 @@ rmtree("$tempdir/backupxs_sl"); $node->command_ok( [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X', - 'stream', '-S', 'slot1', '-R', + 'stream', '-S', 'slot1', '-R', ], 'pg_basebackup with replication slot and -R runs'); like( @@ -813,7 +812,7 @@ rmtree("$tempdir/backup_corrupt3"); # do not verify checksums, should return ok $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt4", '--no-verify-checksums', ], 'pg_basebackup with -k does not report checksum mismatch'); @@ -832,24 +831,24 @@ SKIP: $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backup_gzip", '--compress', - '1', '--format', + '1', '--format', 't' ], 'pg_basebackup with --compress'); $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backup_gzip2", '--gzip', - '--format', 't' + '--format', 't' ], 'pg_basebackup with --gzip'); $node->command_ok( [ - @pg_basebackup_defs, '-D', + @pg_basebackup_defs, '-D', "$tempdir/backup_gzip3", '--compress', - 'gzip:1', '--format', + 'gzip:1', '--format', 't' ], 'pg_basebackup with --compress=gzip:1'); @@ -895,8 +894,8 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', ''); my $sigchld_bb = IPC::Run::start( [ @pg_basebackup_defs, '--wal-method=stream', - '-D', "$tempdir/sigchld", - '--max-rate=32', '-d', + '-D', "$tempdir/sigchld", + '--max-rate=32', '-d', $node->connstr('postgres') ], '<', @@ -916,16 +915,17 @@ is( $node->poll_query_until( "Walsender killed"); ok( pump_until( - $sigchld_bb, $sigchld_bb_timeout, + $sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr, qr/background process terminated unexpectedly/), 'background process exit message'); $sigchld_bb->finish(); # Test that we can back up an in-place tablespace $node->safe_psql('postgres', - "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"); + "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';" +); $node->safe_psql('postgres', - "CREATE TABLE test2 (a int) TABLESPACE tblspc2;" + "CREATE TABLE test2 (a int) TABLESPACE tblspc2;" . "INSERT INTO test2 VALUES (1234);"); my $tblspc_oid = $node->safe_psql('postgres', "SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';"); diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl index 50ac4f94ec..374f090a8b 100644 --- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl +++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl @@ -66,8 +66,8 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (1);'); # compression involved. $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--synchronous', '--no-loop' + 'pg_receivewal', '-D', $stream_dir, '--verbose', + '--endpos', $nextlsn, '--synchronous', '--no-loop' ], 'streaming some WAL with --synchronous'); @@ -92,8 +92,8 @@ SKIP: $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--compress', 'gzip:1', + 'pg_receivewal', '-D', $stream_dir, '--verbose', + '--endpos', $nextlsn, '--compress', 'gzip:1', '--no-loop' ], "streaming some WAL using ZLIB compression"); @@ -145,8 +145,8 @@ SKIP: # Stream up to the given position. $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--no-loop', '--compress', + 'pg_receivewal', '-D', $stream_dir, '--verbose', + '--endpos', $nextlsn, '--no-loop', '--compress', 'lz4' ], 'streaming some WAL using --compress=lz4'); @@ -191,8 +191,8 @@ chomp($nextlsn); $primary->psql('postgres', 'INSERT INTO test_table VALUES (4);'); $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--no-loop' + 'pg_receivewal', '-D', $stream_dir, '--verbose', + '--endpos', $nextlsn, '--no-loop' ], "streaming some WAL"); @@ -247,17 +247,17 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (6);'); # Check case where the slot does not exist. $primary->command_fails_like( [ - 'pg_receivewal', '-D', $slot_dir, '--slot', + 'pg_receivewal', '-D', $slot_dir, '--slot', 'nonexistentslot', '-n', '--no-sync', '--verbose', - '--endpos', $nextlsn + '--endpos', $nextlsn ], qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/, 'pg_receivewal fails with non-existing slot'); $primary->command_ok( [ - 'pg_receivewal', '-D', $slot_dir, '--slot', - $slot_name, '-n', '--no-sync', '--verbose', - '--endpos', $nextlsn + 'pg_receivewal', '-D', $slot_dir, '--slot', + $slot_name, '-n', '--no-sync', '--verbose', + '--endpos', $nextlsn ], "WAL streamed from the slot's restart_lsn"); ok(-e "$slot_dir/$walfile_streamed", @@ -281,7 +281,7 @@ $standby->psql( $primary->wait_for_catchup($standby); # Get a walfilename from before the promotion to make sure it is archived # after promotion -my $standby_slot = $standby->slot($archive_slot); +my $standby_slot = $standby->slot($archive_slot); my $replication_slot_lsn = $standby_slot->{'restart_lsn'}; # pg_walfile_name() is not supported while in recovery, so use the primary @@ -311,9 +311,9 @@ mkdir($timeline_dir); $standby->command_ok( [ - 'pg_receivewal', '-D', $timeline_dir, '--verbose', - '--endpos', $nextlsn, '--slot', $archive_slot, - '--no-sync', '-n' + 'pg_receivewal', '-D', $timeline_dir, '--verbose', + '--endpos', $nextlsn, '--slot', $archive_slot, + '--no-sync', '-n' ], "Stream some wal after promoting, resuming from the slot's position"); ok(-e "$timeline_dir/$walfile_before_promotion", diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl index 6947d12ca8..62dca5b67a 100644 --- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl +++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl @@ -34,16 +34,16 @@ $node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ], 'pg_recvlogical needs an action'); $node->command_fails( [ - 'pg_recvlogical', '-S', - 'test', '-d', + 'pg_recvlogical', '-S', + 'test', '-d', $node->connstr('postgres'), '--start' ], 'no destination file'); $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', + 'pg_recvlogical', '-S', + 'test', '-d', $node->connstr('postgres'), '--create-slot' ], 'slot created'); @@ -67,8 +67,8 @@ $node->command_ok( $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', + 'pg_recvlogical', '-S', + 'test', '-d', $node->connstr('postgres'), '--drop-slot' ], 'slot dropped'); @@ -76,8 +76,8 @@ $node->command_ok( #test with two-phase option enabled $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', + 'pg_recvlogical', '-S', + 'test', '-d', $node->connstr('postgres'), '--create-slot', '--two-phase' ], @@ -94,12 +94,12 @@ chomp($nextlsn); $node->command_fails( [ - 'pg_recvlogical', '-S', - 'test', '-d', + 'pg_recvlogical', '-S', + 'test', '-d', $node->connstr('postgres'), '--start', - '--endpos', "$nextlsn", - '--two-phase', '--no-loop', - '-f', '-' + '--endpos', "$nextlsn", + '--two-phase', '--no-loop', + '-f', '-' ], 'incorrect usage'); diff --git a/src/bin/pg_basebackup/walmethods.c b/src/bin/pg_basebackup/walmethods.c index 1934b7dd46..376ddf72b7 100644 --- a/src/bin/pg_basebackup/walmethods.c +++ b/src/bin/pg_basebackup/walmethods.c @@ -44,14 +44,14 @@ static Walfile *dir_open_for_write(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); -static int dir_close(Walfile *f, WalCloseMethod method); +static int dir_close(Walfile *f, WalCloseMethod method); static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname); static ssize_t dir_get_file_size(WalWriteMethod *wwmethod, const char *pathname); static char *dir_get_file_name(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix); static ssize_t dir_write(Walfile *f, const void *buf, size_t count); -static int dir_sync(Walfile *f); +static int dir_sync(Walfile *f); static bool dir_finish(WalWriteMethod *wwmethod); static void dir_free(WalWriteMethod *wwmethod); @@ -72,7 +72,7 @@ const WalWriteMethodOps WalDirectoryMethodOps = { */ typedef struct DirectoryMethodData { - WalWriteMethod base; + WalWriteMethod base; char *basedir; } DirectoryMethodData; @@ -660,14 +660,14 @@ static Walfile *tar_open_for_write(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); -static int tar_close(Walfile *f, WalCloseMethod method); +static int tar_close(Walfile *f, WalCloseMethod method); static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname); static ssize_t tar_get_file_size(WalWriteMethod *wwmethod, const char *pathname); static char *tar_get_file_name(WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix); static ssize_t tar_write(Walfile *f, const void *buf, size_t count); -static int tar_sync(Walfile *f); +static int tar_sync(Walfile *f); static bool tar_finish(WalWriteMethod *wwmethod); static void tar_free(WalWriteMethod *wwmethod); @@ -693,7 +693,7 @@ typedef struct TarMethodFile typedef struct TarMethodData { - WalWriteMethod base; + WalWriteMethod base; char *tarfilename; int fd; TarMethodFile *currentfile; @@ -1353,7 +1353,7 @@ CreateWalTarMethod(const char *tarbase, { TarMethodData *wwmethod; const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ? - ".tar.gz" : ".tar"; + ".tar.gz" : ".tar"; wwmethod = pg_malloc0(sizeof(TarMethodData)); *((const WalWriteMethodOps **) &wwmethod->base.ops) = diff --git a/src/bin/pg_basebackup/walmethods.h b/src/bin/pg_basebackup/walmethods.h index d7284c08ce..54a22fe607 100644 --- a/src/bin/pg_basebackup/walmethods.h +++ b/src/bin/pg_basebackup/walmethods.h @@ -19,11 +19,12 @@ typedef struct WalWriteMethod *wwmethod; off_t currpos; char *pathname; + /* * MORE DATA FOLLOWS AT END OF STRUCT * - * Each WalWriteMethod is expected to embed this as the first member of - * a larger struct with method-specific fields following. + * Each WalWriteMethod is expected to embed this as the first member of a + * larger struct with method-specific fields following. */ } Walfile; @@ -45,7 +46,7 @@ typedef struct WalWriteMethodOps * automatically renamed in close(). If pad_to_size is specified, the file * will be padded with NUL up to that size, if supported by the Walmethod. */ - Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); + Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size); /* * Close an open Walfile, using one or more methods for handling automatic @@ -107,11 +108,12 @@ struct WalWriteMethod bool sync; const char *lasterrstring; /* if set, takes precedence over lasterrno */ int lasterrno; + /* * MORE DATA FOLLOWS AT END OF STRUCT * - * Each WalWriteMethod is expected to embed this as the first member of - * a larger struct with method-specific fields following. + * Each WalWriteMethod is expected to embed this as the first member of a + * larger struct with method-specific fields following. */ }; diff --git a/src/bin/pg_checksums/t/002_actions.pl b/src/bin/pg_checksums/t/002_actions.pl index 2316f611b2..2d63182d59 100644 --- a/src/bin/pg_checksums/t/002_actions.pl +++ b/src/bin/pg_checksums/t/002_actions.pl @@ -18,10 +18,10 @@ use Test::More; # at the end. sub check_relation_corruption { - my $node = shift; - my $table = shift; + my $node = shift; + my $table = shift; my $tablespace = shift; - my $pgdata = $node->data_dir; + my $pgdata = $node->data_dir; # Create table and discover its filesystem location. $node->safe_psql( @@ -44,8 +44,8 @@ sub check_relation_corruption command_ok( [ 'pg_checksums', '--check', - '-D', $pgdata, - '--filenode', $relfilenode_corrupted + '-D', $pgdata, + '--filenode', $relfilenode_corrupted ], "succeeds for single relfilenode on tablespace $tablespace with offline cluster" ); @@ -57,8 +57,8 @@ sub check_relation_corruption $node->command_checks_all( [ 'pg_checksums', '--check', - '-D', $pgdata, - '--filenode', $relfilenode_corrupted + '-D', $pgdata, + '--filenode', $relfilenode_corrupted ], 1, [qr/Bad checksums:.*1/], @@ -97,21 +97,21 @@ command_like( 'checksums disabled in control file'); # These are correct but empty files, so they should pass through. -append_to_file "$pgdata/global/99999", ""; -append_to_file "$pgdata/global/99999.123", ""; -append_to_file "$pgdata/global/99999_fsm", ""; -append_to_file "$pgdata/global/99999_init", ""; -append_to_file "$pgdata/global/99999_vm", ""; +append_to_file "$pgdata/global/99999", ""; +append_to_file "$pgdata/global/99999.123", ""; +append_to_file "$pgdata/global/99999_fsm", ""; +append_to_file "$pgdata/global/99999_init", ""; +append_to_file "$pgdata/global/99999_vm", ""; append_to_file "$pgdata/global/99999_init.123", ""; -append_to_file "$pgdata/global/99999_fsm.123", ""; -append_to_file "$pgdata/global/99999_vm.123", ""; +append_to_file "$pgdata/global/99999_fsm.123", ""; +append_to_file "$pgdata/global/99999_vm.123", ""; # These are temporary files and folders with dummy contents, which # should be ignored by the scan. append_to_file "$pgdata/global/pgsql_tmp_123", "foo"; mkdir "$pgdata/global/pgsql_tmp"; -append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo"; -append_to_file "$pgdata/global/pg_internal.init", "foo"; +append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo"; +append_to_file "$pgdata/global/pg_internal.init", "foo"; append_to_file "$pgdata/global/pg_internal.init.123", "foo"; # Enable checksums. @@ -197,7 +197,7 @@ command_fails([ 'pg_checksums', '--check', '-D', $pgdata ], check_relation_corruption($node, 'corrupt1', 'pg_default'); # Create tablespace to check corruptions in a non-default tablespace. -my $basedir = $node->basedir; +my $basedir = $node->basedir; my $tablespace_dir = "$basedir/ts_corrupt_dir"; mkdir($tablespace_dir); $node->safe_psql('postgres', @@ -208,8 +208,8 @@ check_relation_corruption($node, 'corrupt2', 'ts_corrupt'); # correctly-named relation files filled with some corrupted data. sub fail_corrupt { - my $node = shift; - my $file = shift; + my $node = shift; + my $file = shift; my $pgdata = $node->data_dir; # Create the file with some dummy data in it. diff --git a/src/bin/pg_controldata/t/001_pg_controldata.pl b/src/bin/pg_controldata/t/001_pg_controldata.pl index a502bce3c9..0c641036e9 100644 --- a/src/bin/pg_controldata/t/001_pg_controldata.pl +++ b/src/bin/pg_controldata/t/001_pg_controldata.pl @@ -24,7 +24,7 @@ command_like([ 'pg_controldata', $node->data_dir ], # check with a corrupted pg_control my $pg_control = $node->data_dir . '/global/pg_control'; -my $size = (stat($pg_control))[7]; +my $size = (stat($pg_control))[7]; open my $fh, '>', $pg_control or BAIL_OUT($!); binmode $fh; diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 11bc805354..f019fe1703 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; -my $tempdir = PostgreSQL::Test::Utils::tempdir; +my $tempdir = PostgreSQL::Test::Utils::tempdir; my $tempdir_short = PostgreSQL::Test::Utils::tempdir_short; program_help_ok('pg_ctl'); diff --git a/src/bin/pg_ctl/t/004_logrotate.pl b/src/bin/pg_ctl/t/004_logrotate.pl index 10815a60d4..8d48e56ee9 100644 --- a/src/bin/pg_ctl/t/004_logrotate.pl +++ b/src/bin/pg_ctl/t/004_logrotate.pl @@ -14,8 +14,8 @@ use Time::HiRes qw(usleep); sub fetch_file_name { my $logfiles = shift; - my $format = shift; - my @lines = split(/\n/, $logfiles); + my $format = shift; + my @lines = split(/\n/, $logfiles); my $filename = undef; foreach my $line (@lines) { @@ -33,11 +33,11 @@ sub check_log_pattern { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $format = shift; + my $format = shift; my $logfiles = shift; - my $pattern = shift; - my $node = shift; - my $lfname = fetch_file_name($logfiles, $format); + my $pattern = shift; + my $node = shift; + my $lfname = fetch_file_name($logfiles, $format); my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; @@ -100,8 +100,8 @@ csvlog log/postgresql-.*csv jsonlog log/postgresql-.*json$|, 'current_logfiles is sane'); -check_log_pattern('stderr', $current_logfiles, 'division by zero', $node); -check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node); +check_log_pattern('stderr', $current_logfiles, 'division by zero', $node); +check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node); check_log_pattern('jsonlog', $current_logfiles, 'division by zero', $node); # Sleep 2 seconds and ask for log rotation; this should result in @@ -131,8 +131,8 @@ jsonlog log/postgresql-.*json$|, # Verify that log output gets to this file, too $node->psql('postgres', 'fee fi fo fum'); -check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node); -check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node); +check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node); +check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node); check_log_pattern('jsonlog', $new_current_logfiles, 'syntax error', $node); $node->stop(); diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index f97fb1aaff..4fee6e2434 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -87,8 +87,8 @@ char * supports_compression(const pg_compress_specification compression_spec) { - const pg_compress_algorithm algorithm = compression_spec.algorithm; - bool supported = false; + const pg_compress_algorithm algorithm = compression_spec.algorithm; + bool supported = false; if (algorithm == PG_COMPRESSION_NONE) supported = true; diff --git a/src/bin/pg_dump/compress_lz4.c b/src/bin/pg_dump/compress_lz4.c index 8d7b28e510..52214b31ee 100644 --- a/src/bin/pg_dump/compress_lz4.c +++ b/src/bin/pg_dump/compress_lz4.c @@ -44,8 +44,8 @@ typedef struct LZ4State LZ4F_preferences_t prefs; - LZ4F_compressionContext_t ctx; - LZ4F_decompressionContext_t dtx; + LZ4F_compressionContext_t ctx; + LZ4F_decompressionContext_t dtx; /* * Used by the Stream API's lazy initialization. @@ -148,8 +148,8 @@ ReadDataFromArchiveLZ4(ArchiveHandle *AH, CompressorState *cs) char *outbuf; char *readbuf; LZ4F_decompressionContext_t ctx = NULL; - LZ4F_decompressOptions_t dec_opt; - LZ4F_errorCode_t status; + LZ4F_decompressOptions_t dec_opt; + LZ4F_errorCode_t status; memset(&dec_opt, 0, sizeof(dec_opt)); status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION); @@ -651,8 +651,8 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH) return NULL; /* - * Our caller expects the return string to be NULL terminated - * and we know that ret is greater than zero. + * Our caller expects the return string to be NULL terminated and we know + * that ret is greater than zero. */ ptr[ret - 1] = '\0'; diff --git a/src/bin/pg_dump/compress_zstd.c b/src/bin/pg_dump/compress_zstd.c index 9fbdc0a87d..82e3310100 100644 --- a/src/bin/pg_dump/compress_zstd.c +++ b/src/bin/pg_dump/compress_zstd.c @@ -82,8 +82,8 @@ _ZstdCStreamParams(pg_compress_specification compress) if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE) _Zstd_CCtx_setParam_or_die(cstream, - ZSTD_c_enableLongDistanceMatching, - compress.long_distance, "long"); + ZSTD_c_enableLongDistanceMatching, + compress.long_distance, "long"); return cstream; } diff --git a/src/bin/pg_dump/compress_zstd.h b/src/bin/pg_dump/compress_zstd.h index 2aaa6b100b..d0ab1351fd 100644 --- a/src/bin/pg_dump/compress_zstd.h +++ b/src/bin/pg_dump/compress_zstd.h @@ -18,8 +18,8 @@ #include "compress_io.h" extern void InitCompressorZstd(CompressorState *cs, - const pg_compress_specification compression_spec); + const pg_compress_specification compression_spec); extern void InitCompressFileHandleZstd(CompressFileHandle *CFH, - const pg_compress_specification compression_spec); + const pg_compress_specification compression_spec); -#endif /* COMPRESS_ZSTD_H */ +#endif /* COMPRESS_ZSTD_H */ diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index d518349e10..39ebcfec32 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -386,10 +386,11 @@ RestoreArchive(Archive *AHX) { if (te->hadDumper && (te->reqs & REQ_DATA) != 0) { - char *errmsg = supports_compression(AH->compression_spec); + char *errmsg = supports_compression(AH->compression_spec); + if (errmsg) pg_fatal("cannot restore from compressed archive (%s)", - errmsg); + errmsg); else break; } @@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) if (!te->hadDumper) { /* - * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then - * it is considered a data entry. We don't need to check for the - * BLOBS entry or old-style BLOB COMMENTS, because they will have - * hadDumper = true ... but we do need to check new-style BLOB ACLs, - * comments, etc. + * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it + * is considered a data entry. We don't need to check for the BLOBS + * entry or old-style BLOB COMMENTS, because they will have hadDumper + * = true ... but we do need to check new-style BLOB ACLs, comments, + * etc. */ if (strcmp(te->desc, "SEQUENCE SET") == 0 || strcmp(te->desc, "BLOB") == 0 || @@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te) { appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag); } + /* * These object types require additional decoration. Fortunately, the * information needed is exactly what's in the DROP command. @@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData) initPQExpBuffer(&temp); _getObjectDescription(&temp, te); + /* * If _getObjectDescription() didn't fill the buffer, then there is no * owner. @@ -3802,7 +3805,7 @@ ReadHead(ArchiveHandle *AH) if (errmsg) { pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available", - errmsg); + errmsg); pg_free(errmsg); } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index babd23b4eb..db5fb43bae 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH) tarClose(AH, th); /* - * Once we have found the first LO, stop at the first non-LO - * entry (which will be 'blobs.toc'). This coding would eat all - * the rest of the archive if there are no LOs ... but this - * function shouldn't be called at all in that case. + * Once we have found the first LO, stop at the first non-LO entry + * (which will be 'blobs.toc'). This coding would eat all the + * rest of the archive if there are no LOs ... but this function + * shouldn't be called at all in that case. */ if (foundLO) break; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index f325045f9f..3af97a6039 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -756,9 +756,9 @@ main(int argc, char **argv) pg_fatal("%s", error_detail); /* - * Disable support for zstd workers for now - these are based on threading, - * and it's unclear how it interacts with parallel dumps on platforms where - * that relies on threads too (e.g. Windows). + * Disable support for zstd workers for now - these are based on + * threading, and it's unclear how it interacts with parallel dumps on + * platforms where that relies on threads too (e.g. Windows). */ if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS) pg_log_warning("compression option \"%s\" is not currently supported by pg_dump", @@ -879,8 +879,8 @@ main(int argc, char **argv) /* * Dumping LOs is the default for dumps where an inclusion switch is not * used (an "include everything" dump). -B can be used to exclude LOs - * from those dumps. -b can be used to include LOs even when an - * inclusion switch is used. + * from those dumps. -b can be used to include LOs even when an inclusion + * switch is used. * * -s means "schema only" and LOs are data, not schema, so we never * include LOs when -s is used. @@ -915,8 +915,8 @@ main(int argc, char **argv) * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. * - * However, we do need to collect LO information as there may be - * comments or other information on LOs that we do need to dump out. + * However, we do need to collect LO information as there may be comments + * or other information on LOs that we do need to dump out. */ if (dopt.outputLOs || dopt.binary_upgrade) getLOs(fout); @@ -3323,8 +3323,8 @@ dumpDatabase(Archive *fout) appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n"); for (int i = 0; i < PQntuples(lo_res); ++i) { - Oid oid; - RelFileNumber relfilenumber; + Oid oid; + RelFileNumber relfilenumber; appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n" "SET relfrozenxid = '%u', relminmxid = '%u'\n" @@ -3590,8 +3590,8 @@ getLOs(Archive *fout) loinfo[i].dobj.components |= DUMP_COMPONENT_ACL; /* - * In binary-upgrade mode for LOs, we do *not* dump out the LO - * data, as it will be copied by pg_upgrade, which simply copies the + * In binary-upgrade mode for LOs, we do *not* dump out the LO data, + * as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not * pg_largeobject_metadata, after the dump is restored. @@ -14828,7 +14828,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name, if (dopt->no_security_labels) return; - /* Security labels are schema not data ... except large object labels are data */ + /* + * Security labels are schema not data ... except large object labels are + * data + */ if (strcmp(type, "LARGE OBJECT") != 0) { if (dopt->dataOnly) @@ -15161,7 +15164,7 @@ dumpTable(Archive *fout, const TableInfo *tbinfo) if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL) { const char *objtype = - (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE"; + (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE"; tableAclDumpId = dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId, @@ -16632,10 +16635,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo) { appendPQExpBufferStr(q, coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE"); + /* * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT * indexes. Being able to create this was fixed, but we need to - * make the index distinct in order to be able to restore the dump. + * make the index distinct in order to be able to restore the + * dump. */ if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p') appendPQExpBufferStr(q, " NULLS NOT DISTINCT"); @@ -17857,7 +17862,7 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[], TableInfo *configtbl; Oid configtbloid = atooid(extconfigarray[j]); bool dumpobj = - curext->dobj.dump & DUMP_COMPONENT_DEFINITION; + curext->dobj.dump & DUMP_COMPONENT_DEFINITION; configtbl = findTableByOid(configtbloid); if (configtbl == NULL) diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index c5647d059b..3627b69e2a 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -949,7 +949,7 @@ static void dumpRoleMembership(PGconn *conn) { PQExpBuffer buf = createPQExpBuffer(); - PQExpBuffer optbuf = createPQExpBuffer(); + PQExpBuffer optbuf = createPQExpBuffer(); PGresult *res; int start = 0, end, @@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn) /* * We can't dump these GRANT commands in arbitrary order, because a role - * that is named as a grantor must already have ADMIN OPTION on the - * role for which it is granting permissions, except for the bootstrap + * that is named as a grantor must already have ADMIN OPTION on the role + * for which it is granting permissions, except for the bootstrap * superuser, who can always be named as the grantor. * * We handle this by considering these grants role by role. For each role, @@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn) * superuser. Every time we grant ADMIN OPTION on the role to some user, * that user also becomes an allowable grantor. We make repeated passes * over the grants for the role, each time dumping those whose grantors - * are allowable and which we haven't done yet. Eventually this should - * let us dump all the grants. + * are allowable and which we haven't done yet. Eventually this should let + * us dump all the grants. */ total = PQntuples(res); while (start < total) @@ -1021,7 +1021,7 @@ dumpRoleMembership(PGconn *conn) /* All memberships for a single role should be adjacent. */ for (end = start; end < total; ++end) { - char *otherrole; + char *otherrole; otherrole = PQgetvalue(res, end, 0); if (strcmp(role, otherrole) != 0) @@ -1105,7 +1105,7 @@ dumpRoleMembership(PGconn *conn) appendPQExpBufferStr(optbuf, "ADMIN OPTION"); if (dump_grant_options) { - char *inherit_option; + char *inherit_option; if (optbuf->data[0] != '\0') appendPQExpBufferStr(optbuf, ", "); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index d66f3b42ea..387c5d3afb 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -53,10 +53,10 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir; # database and then pg_dump *that* database (or something along # those lines) to validate that part of the process. -my $supports_icu = ($ENV{with_icu} eq 'yes'); +my $supports_icu = ($ENV{with_icu} eq 'yes'); my $supports_gzip = check_pg_config("#define HAVE_LIBZ 1"); -my $supports_lz4 = check_pg_config("#define USE_LZ4 1"); -my $supports_zstd = check_pg_config("#define USE_ZSTD 1"); +my $supports_lz4 = check_pg_config("#define USE_LZ4 1"); +my $supports_zstd = check_pg_config("#define USE_ZSTD 1"); my %pgdump_runs = ( binary_upgrade => { @@ -79,10 +79,10 @@ my %pgdump_runs = ( # Do not use --no-sync to give test coverage for data sync. compression_gzip_custom => { - test_key => 'compression', + test_key => 'compression', compile_option => 'gzip', - dump_cmd => [ - 'pg_dump', '--format=custom', + dump_cmd => [ + 'pg_dump', '--format=custom', '--compress=1', "--file=$tempdir/compression_gzip_custom.dump", 'postgres', ], @@ -96,24 +96,24 @@ my %pgdump_runs = ( 'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump", ], expected => qr/Compression: gzip/, - name => 'data content is gzip-compressed' + name => 'data content is gzip-compressed' }, }, # Do not use --no-sync to give test coverage for data sync. compression_gzip_dir => { - test_key => 'compression', + test_key => 'compression', compile_option => 'gzip', - dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=gzip:1', + dump_cmd => [ + 'pg_dump', '--jobs=2', + '--format=directory', '--compress=gzip:1', "--file=$tempdir/compression_gzip_dir", 'postgres', ], # Give coverage for manually compressed blob.toc files during # restore. compress_cmd => { program => $ENV{'GZIP_PROGRAM'}, - args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ], + args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ], }, # Verify that only data files were compressed glob_patterns => [ @@ -128,25 +128,25 @@ my %pgdump_runs = ( }, compression_gzip_plain => { - test_key => 'compression', + test_key => 'compression', compile_option => 'gzip', - dump_cmd => [ + dump_cmd => [ 'pg_dump', '--format=plain', '-Z1', "--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { program => $ENV{'GZIP_PROGRAM'}, - args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ], + args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ], }, }, # Do not use --no-sync to give test coverage for data sync. compression_lz4_custom => { - test_key => 'compression', + test_key => 'compression', compile_option => 'lz4', - dump_cmd => [ - 'pg_dump', '--format=custom', + dump_cmd => [ + 'pg_dump', '--format=custom', '--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump", 'postgres', ], @@ -156,10 +156,8 @@ my %pgdump_runs = ( "$tempdir/compression_lz4_custom.dump", ], command_like => { - command => [ - 'pg_restore', - '-l', "$tempdir/compression_lz4_custom.dump", - ], + command => + [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ], expected => qr/Compression: lz4/, name => 'data content is lz4 compressed' }, @@ -167,18 +165,18 @@ my %pgdump_runs = ( # Do not use --no-sync to give test coverage for data sync. compression_lz4_dir => { - test_key => 'compression', + test_key => 'compression', compile_option => 'lz4', - dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=lz4:1', + dump_cmd => [ + 'pg_dump', '--jobs=2', + '--format=directory', '--compress=lz4:1', "--file=$tempdir/compression_lz4_dir", 'postgres', ], # Give coverage for manually compressed blob.toc files during # restore. compress_cmd => { program => $ENV{'LZ4'}, - args => [ + args => [ '-z', '-f', '--rm', "$tempdir/compression_lz4_dir/blobs.toc", "$tempdir/compression_lz4_dir/blobs.toc.lz4", @@ -187,7 +185,7 @@ my %pgdump_runs = ( # Verify that data files were compressed glob_patterns => [ "$tempdir/compression_lz4_dir/toc.dat", - "$tempdir/compression_lz4_dir/*.dat.lz4", + "$tempdir/compression_lz4_dir/*.dat.lz4", ], restore_cmd => [ 'pg_restore', '--jobs=2', @@ -197,16 +195,16 @@ my %pgdump_runs = ( }, compression_lz4_plain => { - test_key => 'compression', + test_key => 'compression', compile_option => 'lz4', - dump_cmd => [ + dump_cmd => [ 'pg_dump', '--format=plain', '--compress=lz4', "--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { program => $ENV{'LZ4'}, - args => [ + args => [ '-d', '-f', "$tempdir/compression_lz4_plain.sql.lz4", "$tempdir/compression_lz4_plain.sql", @@ -215,10 +213,10 @@ my %pgdump_runs = ( }, compression_zstd_custom => { - test_key => 'compression', + test_key => 'compression', compile_option => 'zstd', - dump_cmd => [ - 'pg_dump', '--format=custom', + dump_cmd => [ + 'pg_dump', '--format=custom', '--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump", 'postgres', ], @@ -229,8 +227,7 @@ my %pgdump_runs = ( ], command_like => { command => [ - 'pg_restore', - '-l', "$tempdir/compression_zstd_custom.dump", + 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump", ], expected => qr/Compression: zstd/, name => 'data content is zstd compressed' @@ -238,27 +235,27 @@ my %pgdump_runs = ( }, compression_zstd_dir => { - test_key => 'compression', + test_key => 'compression', compile_option => 'zstd', - dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=zstd:1', + dump_cmd => [ + 'pg_dump', '--jobs=2', + '--format=directory', '--compress=zstd:1', "--file=$tempdir/compression_zstd_dir", 'postgres', ], # Give coverage for manually compressed blob.toc files during # restore. compress_cmd => { program => $ENV{'ZSTD'}, - args => [ - '-z', '-f', '--rm', - "$tempdir/compression_zstd_dir/blobs.toc", + args => [ + '-z', '-f', + '--rm', "$tempdir/compression_zstd_dir/blobs.toc", "-o", "$tempdir/compression_zstd_dir/blobs.toc.zst", ], }, # Verify that data files were compressed glob_patterns => [ - "$tempdir/compression_zstd_dir/toc.dat", - "$tempdir/compression_zstd_dir/*.dat.zst", + "$tempdir/compression_zstd_dir/toc.dat", + "$tempdir/compression_zstd_dir/*.dat.zst", ], restore_cmd => [ 'pg_restore', '--jobs=2', @@ -269,19 +266,19 @@ my %pgdump_runs = ( # Exercise long mode for test coverage compression_zstd_plain => { - test_key => 'compression', + test_key => 'compression', compile_option => 'zstd', - dump_cmd => [ + dump_cmd => [ 'pg_dump', '--format=plain', '--compress=zstd:long', "--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { program => $ENV{'ZSTD'}, - args => [ + args => [ '-d', '-f', - "$tempdir/compression_zstd_plain.sql.zst", - "-o", "$tempdir/compression_zstd_plain.sql", + "$tempdir/compression_zstd_plain.sql.zst", "-o", + "$tempdir/compression_zstd_plain.sql", ], }, }, @@ -308,9 +305,9 @@ my %pgdump_runs = ( }, column_inserts => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/column_inserts.sql", '-a', - '--column-inserts', 'postgres', + '--column-inserts', 'postgres', ], }, createdb => { @@ -339,7 +336,7 @@ my %pgdump_runs = ( defaults => { dump_cmd => [ 'pg_dump', '--no-sync', - '-f', "$tempdir/defaults.sql", + '-f', "$tempdir/defaults.sql", 'postgres', ], }, @@ -385,9 +382,9 @@ my %pgdump_runs = ( command_like => { command => [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ], - expected => $supports_gzip ? - qr/Compression: gzip/ : - qr/Compression: none/, + expected => $supports_gzip + ? qr/Compression: gzip/ + : qr/Compression: none/, name => 'data content is gzip-compressed by default if available', }, }, @@ -399,7 +396,7 @@ my %pgdump_runs = ( defaults_dir_format => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Fd', + 'pg_dump', '-Fd', "--file=$tempdir/defaults_dir_format", 'postgres', ], restore_cmd => [ @@ -410,17 +407,15 @@ my %pgdump_runs = ( command_like => { command => [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ], - expected => $supports_gzip ? - qr/Compression: gzip/ : - qr/Compression: none/, + expected => $supports_gzip ? qr/Compression: gzip/ + : qr/Compression: none/, name => 'data content is gzip-compressed by default', }, glob_patterns => [ "$tempdir/defaults_dir_format/toc.dat", "$tempdir/defaults_dir_format/blobs.toc", - $supports_gzip ? - "$tempdir/defaults_dir_format/*.dat.gz" : - "$tempdir/defaults_dir_format/*.dat", + $supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz" + : "$tempdir/defaults_dir_format/*.dat", ], }, @@ -442,7 +437,7 @@ my %pgdump_runs = ( defaults_tar_format => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Ft', + 'pg_dump', '-Ft', "--file=$tempdir/defaults_tar_format.tar", 'postgres', ], restore_cmd => [ @@ -468,7 +463,8 @@ my %pgdump_runs = ( }, exclude_measurement => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', + '--no-sync', "--file=$tempdir/exclude_measurement.sql", '--exclude-table-and-children=dump_test.measurement', 'postgres', @@ -496,9 +492,9 @@ my %pgdump_runs = ( }, inserts => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/inserts.sql", '-a', - '--inserts', 'postgres', + '--inserts', 'postgres', ], }, pg_dumpall_globals => { @@ -534,21 +530,20 @@ my %pgdump_runs = ( }, no_large_objects => { dump_cmd => [ - 'pg_dump', '--no-sync', - "--file=$tempdir/no_large_objects.sql", '-B', - 'postgres', + 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql", + '-B', 'postgres', ], }, no_privs => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_privs.sql", '-x', 'postgres', ], }, no_owner => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_owner.sql", '-O', 'postgres', ], @@ -630,21 +625,21 @@ my %pgdump_runs = ( }, schema_only => { dump_cmd => [ - 'pg_dump', '--format=plain', + 'pg_dump', '--format=plain', "--file=$tempdir/schema_only.sql", '--no-sync', - '-s', 'postgres', + '-s', 'postgres', ], }, section_pre_data => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/section_pre_data.sql", + 'pg_dump', "--file=$tempdir/section_pre_data.sql", '--section=pre-data', '--no-sync', 'postgres', ], }, section_data => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/section_data.sql", + 'pg_dump', "--file=$tempdir/section_data.sql", '--section=data', '--no-sync', 'postgres', ], @@ -705,38 +700,38 @@ my %pgdump_runs = ( # Tests which target the 'dump_test' schema, specifically. my %dump_test_schema_runs = ( - only_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_test_schema => 1, + only_dump_measurement => 1, test_schema_plus_large_objects => 1,); # Tests which are considered 'full' dumps by pg_dump, but there # are flags used to exclude specific items (ACLs, LOs, etc). my %full_runs = ( - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - compression => 1, - createdb => 1, - defaults => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + compression => 1, + createdb => 1, + defaults => 1, exclude_dump_test_schema => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - exclude_measurement => 1, + exclude_test_table => 1, + exclude_test_table_data => 1, + exclude_measurement => 1, exclude_measurement_data => 1, - no_toast_compression => 1, - no_large_objects => 1, - no_owner => 1, - no_privs => 1, - no_table_access_method => 1, - pg_dumpall_dbprivs => 1, - pg_dumpall_exclude => 1, - schema_only => 1,); + no_toast_compression => 1, + no_large_objects => 1, + no_owner => 1, + no_privs => 1, + no_table_access_method => 1, + pg_dumpall_dbprivs => 1, + pg_dumpall_exclude => 1, + schema_only => 1,); # This is where the actual tests are defined. my %tests = ( 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => { create_order => 14, - create_sql => 'ALTER DEFAULT PRIVILEGES + create_sql => 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role IN SCHEMA dump_test GRANT SELECT ON TABLES TO regress_dump_test_role;', regexp => qr/^ @@ -748,15 +743,15 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT EXECUTE ON FUNCTIONS' => { create_order => 15, - create_sql => 'ALTER DEFAULT PRIVILEGES + create_sql => 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role IN SCHEMA dump_test GRANT EXECUTE ON FUNCTIONS TO regress_dump_test_role;', regexp => qr/^ @@ -768,14 +763,14 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => { create_order => 55, - create_sql => 'ALTER DEFAULT PRIVILEGES + create_sql => 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC;', regexp => qr/^ @@ -790,7 +785,7 @@ my %tests = ( 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT' => { create_order => 56, - create_sql => 'ALTER DEFAULT PRIVILEGES + create_sql => 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT ON TABLES FROM regress_dump_test_role;', regexp => qr/^ @@ -812,29 +807,29 @@ my %tests = ( \QNOREPLICATION NOBYPASSRLS;\E /xm, like => { - pg_dumpall_dbprivs => 1, - pg_dumpall_globals => 1, + pg_dumpall_dbprivs => 1, + pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, - pg_dumpall_exclude => 1, + pg_dumpall_exclude => 1, }, }, 'ALTER COLLATION test0 OWNER TO' => { - regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m, + regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m, collation => 1, - like => { %full_runs, section_pre_data => 1, }, - unlike => { %dump_test_schema_runs, no_owner => 1, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { %dump_test_schema_runs, no_owner => 1, }, }, 'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => { regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .+;/m, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, unlike => { no_owner => 1, }, }, 'ALTER SERVER s1 OWNER TO' => { regexp => qr/^ALTER SERVER s1 OWNER TO .+;/m, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, unlike => { no_owner => 1, }, }, @@ -847,8 +842,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, @@ -861,8 +856,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, @@ -897,7 +892,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -910,37 +905,37 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, 'ALTER PUBLICATION pub1 OWNER TO' => { regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .+;/m, - like => { %full_runs, section_post_data => 1, }, + like => { %full_runs, section_post_data => 1, }, unlike => { no_owner => 1, }, }, 'ALTER LARGE OBJECT ... OWNER TO' => { regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .+;/m, - like => { + like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_pre_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_pre_data => 1, test_schema_plus_large_objects => 1, }, unlike => { no_large_objects => 1, - no_owner => 1, + no_owner => 1, schema_only => 1, }, }, 'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => { regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .+;/m, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, unlike => { no_owner => 1, }, }, @@ -950,16 +945,16 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, 'ALTER SCHEMA dump_test_second_schema OWNER TO' => { regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .+;/m, - like => { + like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, }, unlike => { no_owner => 1, }, @@ -970,14 +965,14 @@ my %tests = ( create_sql => 'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";', regexp => qr/^ALTER SCHEMA public OWNER TO .+;/m, - like => { + like => { %full_runs, section_pre_data => 1, }, unlike => { no_owner => 1, }, }, 'ALTER SCHEMA public OWNER TO (w/o ACL changes)' => { - database => 'regress_public_owner', + database => 'regress_public_owner', create_order => 100, create_sql => 'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";', @@ -993,12 +988,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1011,18 +1006,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'ALTER TABLE (partitioned) ADD CONSTRAINT ... FOREIGN KEY' => { create_order => 4, - create_sql => 'CREATE TABLE dump_test.test_table_fk ( + create_sql => 'CREATE TABLE dump_test.test_table_fk ( col1 int references dump_test.test_table) PARTITION BY RANGE (col1); CREATE TABLE dump_test.test_table_fk_1 @@ -1036,7 +1031,7 @@ my %tests = ( }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -1051,12 +1046,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1071,12 +1066,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1091,12 +1086,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1111,12 +1106,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1128,9 +1123,9 @@ my %tests = ( /xm, like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, - binary_upgrade => 1, + binary_upgrade => 1, only_dump_measurement => 1, }, unlike => { @@ -1149,12 +1144,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1178,29 +1173,29 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'ALTER TABLE test_table OWNER TO' => { regexp => qr/^\QALTER TABLE dump_test.test_table OWNER TO \E.+;/m, - like => { + like => { %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, - no_owner => 1, + exclude_test_table => 1, + only_dump_measurement => 1, + no_owner => 1, }, }, 'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => { create_order => 23, - create_sql => 'ALTER TABLE dump_test.test_table + create_sql => 'ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;', regexp => qr/^\QALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;\E/m, @@ -1208,12 +1203,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -1224,8 +1219,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, @@ -1239,8 +1234,8 @@ my %tests = ( }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - exclude_measurement => 1, + no_owner => 1, + exclude_measurement => 1, }, }, @@ -1249,7 +1244,7 @@ my %tests = ( qr/^\QALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO \E.+;/m, like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, only_dump_measurement => 1, }, @@ -1266,8 +1261,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, @@ -1278,8 +1273,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, - only_dump_measurement => 1, + no_owner => 1, + only_dump_measurement => 1, }, }, @@ -1290,10 +1285,10 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_test_table => 1, - no_owner => 1, - role => 1, - only_dump_measurement => 1, + only_dump_test_table => 1, + no_owner => 1, + role => 1, + only_dump_measurement => 1, }, }, @@ -1302,12 +1297,12 @@ my %tests = ( create_sql => 'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');', regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m, - like => { + like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_pre_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_pre_data => 1, test_schema_plus_large_objects => 1, }, unlike => { @@ -1325,39 +1320,38 @@ my %tests = ( /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_data => 1, test_schema_plus_large_objects => 1, }, unlike => { binary_upgrade => 1, no_large_objects => 1, - schema_only => 1, + schema_only => 1, }, }, 'LO create (with no data)' => { - create_sql => - 'SELECT pg_catalog.lo_create(0);', + create_sql => 'SELECT pg_catalog.lo_create(0);', regexp => qr/^ \QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n \QSELECT pg_catalog.lo_close(0);\E /xm, - like => { + like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_data => 1, test_schema_plus_large_objects => 1, }, unlike => { - binary_upgrade => 1, - no_large_objects => 1, - schema_only => 1, - section_pre_data => 1, + binary_upgrade => 1, + no_large_objects => 1, + schema_only => 1, + section_pre_data => 1, }, }, @@ -1385,16 +1379,16 @@ my %tests = ( }, 'COMMENT ON SCHEMA public IS NULL' => { - database => 'regress_public_owner', + database => 'regress_public_owner', create_order => 100, - create_sql => 'COMMENT ON SCHEMA public IS NULL;', - regexp => qr/^COMMENT ON SCHEMA public IS '';/m, - like => { defaults_public_owner => 1 }, + create_sql => 'COMMENT ON SCHEMA public IS NULL;', + regexp => qr/^COMMENT ON SCHEMA public IS '';/m, + like => { defaults_public_owner => 1 }, }, 'COMMENT ON TABLE dump_test.test_table' => { create_order => 36, - create_sql => 'COMMENT ON TABLE dump_test.test_table + create_sql => 'COMMENT ON TABLE dump_test.test_table IS \'comment on table\';', regexp => qr/^\QCOMMENT ON TABLE dump_test.test_table IS 'comment on table';\E/m, @@ -1402,18 +1396,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON COLUMN dump_test.test_table.col1' => { create_order => 36, - create_sql => 'COMMENT ON COLUMN dump_test.test_table.col1 + create_sql => 'COMMENT ON COLUMN dump_test.test_table.col1 IS \'comment on column\';', regexp => qr/^ \QCOMMENT ON COLUMN dump_test.test_table.col1 IS 'comment on column';\E @@ -1422,18 +1416,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON COLUMN dump_test.composite.f1' => { create_order => 44, - create_sql => 'COMMENT ON COLUMN dump_test.composite.f1 + create_sql => 'COMMENT ON COLUMN dump_test.composite.f1 IS \'comment on column of type\';', regexp => qr/^ \QCOMMENT ON COLUMN dump_test.composite.f1 IS 'comment on column of type';\E @@ -1442,13 +1436,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON COLUMN dump_test.test_second_table.col1' => { create_order => 63, - create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col1 + create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col1 IS \'comment on column col1\';', regexp => qr/^ \QCOMMENT ON COLUMN dump_test.test_second_table.col1 IS 'comment on column col1';\E @@ -1457,13 +1451,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON COLUMN dump_test.test_second_table.col2' => { create_order => 64, - create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col2 + create_sql => 'COMMENT ON COLUMN dump_test.test_second_table.col2 IS \'comment on column col2\';', regexp => qr/^ \QCOMMENT ON COLUMN dump_test.test_second_table.col2 IS 'comment on column col2';\E @@ -1472,13 +1466,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON CONVERSION dump_test.test_conversion' => { create_order => 79, - create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion + create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion IS \'comment on test conversion\';', regexp => qr/^\QCOMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversion';\E/m, @@ -1486,23 +1480,23 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON COLLATION test0' => { create_order => 77, - create_sql => 'COMMENT ON COLLATION test0 + create_sql => 'COMMENT ON COLLATION test0 IS \'comment on test0 collation\';', regexp => qr/^\QCOMMENT ON COLLATION public.test0 IS 'comment on test0 collation';\E/m, collation => 1, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, }, 'COMMENT ON LARGE OBJECT ...' => { create_order => 65, - create_sql => 'DO $$ + create_sql => 'DO $$ DECLARE myoid oid; BEGIN SELECT loid FROM pg_largeobject INTO myoid; @@ -1514,10 +1508,10 @@ my %tests = ( /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_pre_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_pre_data => 1, test_schema_plus_large_objects => 1, }, unlike => { @@ -1528,7 +1522,7 @@ my %tests = ( 'COMMENT ON PUBLICATION pub1' => { create_order => 55, - create_sql => 'COMMENT ON PUBLICATION pub1 + create_sql => 'COMMENT ON PUBLICATION pub1 IS \'comment on publication\';', regexp => qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m, @@ -1537,7 +1531,7 @@ my %tests = ( 'COMMENT ON SUBSCRIPTION sub1' => { create_order => 55, - create_sql => 'COMMENT ON SUBSCRIPTION sub1 + create_sql => 'COMMENT ON SUBSCRIPTION sub1 IS \'comment on subscription\';', regexp => qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m, @@ -1555,7 +1549,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -1570,13 +1564,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1' => { create_order => 84, - create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 + create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS \'comment on text search parser\';', regexp => qr/^\QCOMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS 'comment on text search parser';\E/m, @@ -1584,7 +1578,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -1598,13 +1592,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON TYPE dump_test.planets - ENUM' => { create_order => 68, - create_sql => 'COMMENT ON TYPE dump_test.planets + create_sql => 'COMMENT ON TYPE dump_test.planets IS \'comment on enum type\';', regexp => qr/^\QCOMMENT ON TYPE dump_test.planets IS 'comment on enum type';\E/m, @@ -1612,13 +1606,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON TYPE dump_test.textrange - RANGE' => { create_order => 69, - create_sql => 'COMMENT ON TYPE dump_test.textrange + create_sql => 'COMMENT ON TYPE dump_test.textrange IS \'comment on range type\';', regexp => qr/^\QCOMMENT ON TYPE dump_test.textrange IS 'comment on range type';\E/m, @@ -1626,13 +1620,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON TYPE dump_test.int42 - Regular' => { create_order => 70, - create_sql => 'COMMENT ON TYPE dump_test.int42 + create_sql => 'COMMENT ON TYPE dump_test.int42 IS \'comment on regular type\';', regexp => qr/^\QCOMMENT ON TYPE dump_test.int42 IS 'comment on regular type';\E/m, @@ -1640,13 +1634,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COMMENT ON TYPE dump_test.undefined - Undefined' => { create_order => 71, - create_sql => 'COMMENT ON TYPE dump_test.undefined + create_sql => 'COMMENT ON TYPE dump_test.undefined IS \'comment on undefined type\';', regexp => qr/^\QCOMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';\E/m, @@ -1654,13 +1648,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'COPY test_table' => { create_order => 4, - create_sql => 'INSERT INTO dump_test.test_table (col1) ' + create_sql => 'INSERT INTO dump_test.test_table (col1) ' . 'SELECT generate_series FROM generate_series(1,9);', regexp => qr/^ \QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E @@ -1669,17 +1663,17 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, only_dump_test_table => 1, - section_data => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - schema_only => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + exclude_test_table_data => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1694,16 +1688,16 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - exclude_test_table => 1, + data_only => 1, + exclude_test_table => 1, exclude_test_table_data => 1, - section_data => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1732,14 +1726,14 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1754,14 +1748,14 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1777,14 +1771,14 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1799,14 +1793,14 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1821,14 +1815,14 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -1891,24 +1885,24 @@ my %tests = ( 'CREATE ROLE regress_dump_test_role' => { create_order => 1, - create_sql => 'CREATE ROLE regress_dump_test_role;', - regexp => qr/^CREATE ROLE regress_dump_test_role;/m, - like => { - pg_dumpall_dbprivs => 1, - pg_dumpall_exclude => 1, - pg_dumpall_globals => 1, + create_sql => 'CREATE ROLE regress_dump_test_role;', + regexp => qr/^CREATE ROLE regress_dump_test_role;/m, + like => { + pg_dumpall_dbprivs => 1, + pg_dumpall_exclude => 1, + pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, }, }, 'CREATE ROLE regress_quoted...' => { create_order => 1, - create_sql => 'CREATE ROLE "regress_quoted \"" role";', - regexp => qr/^CREATE ROLE "regress_quoted \\"" role";/m, - like => { - pg_dumpall_dbprivs => 1, - pg_dumpall_exclude => 1, - pg_dumpall_globals => 1, + create_sql => 'CREATE ROLE "regress_quoted \"" role";', + regexp => qr/^CREATE ROLE "regress_quoted \\"" role";/m, + like => { + pg_dumpall_dbprivs => 1, + pg_dumpall_exclude => 1, + pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, }, }, @@ -1924,20 +1918,21 @@ my %tests = ( 'CREATE COLLATION test0 FROM "C"' => { create_order => 76, - create_sql => 'CREATE COLLATION test0 FROM "C";', + create_sql => 'CREATE COLLATION test0 FROM "C";', regexp => qr/CREATE COLLATION public.test0 \(provider = libc, locale = 'C'(, version = '[^']*')?\);/m, collation => 1, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE COLLATION icu_collation' => { create_order => 76, - create_sql => "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');", + create_sql => + "CREATE COLLATION icu_collation (PROVIDER = icu, LOCALE = 'en-US-u-va-posix');", regexp => qr/CREATE COLLATION public.icu_collation \(provider = icu, locale = 'en-US-u-va-posix'(, version = '[^']*')?\);/m, icu => 1, - like => { %full_runs, section_pre_data => 1, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE CAST FOR timestamptz' => { @@ -1958,8 +1953,8 @@ my %tests = ( 'CREATE DATABASE dump_test' => { create_order => 47, - create_sql => 'CREATE DATABASE dump_test;', - regexp => qr/^ + create_sql => 'CREATE DATABASE dump_test;', + regexp => qr/^ \QCREATE DATABASE dump_test WITH TEMPLATE = template0 \E .+;/xm, like => { pg_dumpall_dbprivs => 1, }, @@ -1986,7 +1981,7 @@ my %tests = ( 'CREATE AGGREGATE dump_test.newavg' => { create_order => 25, - create_sql => 'CREATE AGGREGATE dump_test.newavg ( + create_sql => 'CREATE AGGREGATE dump_test.newavg ( sfunc = int4_avg_accum, basetype = int4, stype = _int8, @@ -2006,11 +2001,11 @@ my %tests = ( %full_runs, %dump_test_schema_runs, exclude_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2024,13 +2019,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE DOMAIN dump_test.us_postal_code' => { create_order => 29, - create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT + create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT COLLATE "C" DEFAULT \'10014\' CHECK(VALUE ~ \'^\d{5}$\' OR @@ -2049,13 +2044,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FUNCTION dump_test.pltestlang_call_handler' => { create_order => 17, - create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler() + create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler() RETURNS LANGUAGE_HANDLER AS \'$libdir/plpgsql\', \'plpgsql_call_handler\' LANGUAGE C;', regexp => qr/^ @@ -2069,13 +2064,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FUNCTION dump_test.trigger_func' => { create_order => 30, - create_sql => 'CREATE FUNCTION dump_test.trigger_func() + create_sql => 'CREATE FUNCTION dump_test.trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$ BEGIN RETURN NULL; END;$$;', regexp => qr/^ @@ -2088,13 +2083,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FUNCTION dump_test.event_trigger_func' => { create_order => 32, - create_sql => 'CREATE FUNCTION dump_test.event_trigger_func() + create_sql => 'CREATE FUNCTION dump_test.event_trigger_func() RETURNS event_trigger LANGUAGE plpgsql AS $$ BEGIN RETURN; END;$$;', regexp => qr/^ @@ -2107,7 +2102,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2122,13 +2117,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE OPERATOR CLASS dump_test.op_class' => { create_order => 74, - create_sql => 'CREATE OPERATOR CLASS dump_test.op_class + create_sql => 'CREATE OPERATOR CLASS dump_test.op_class FOR TYPE bigint USING btree FAMILY dump_test.op_family AS STORAGE bigint, OPERATOR 1 <(bigint,bigint), @@ -2155,14 +2150,14 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, # verify that a custom operator/opclass/range type is dumped in right order 'CREATE OPERATOR CLASS dump_test.op_class_custom' => { create_order => 74, - create_sql => 'CREATE OPERATOR dump_test.~~ ( + create_sql => 'CREATE OPERATOR dump_test.~~ ( PROCEDURE = int4eq, LEFTARG = int, RIGHTARG = int); @@ -2194,7 +2189,7 @@ my %tests = ( 'CREATE OPERATOR CLASS dump_test.op_class_empty' => { create_order => 89, - create_sql => 'CREATE OPERATOR CLASS dump_test.op_class_empty + create_sql => 'CREATE OPERATOR CLASS dump_test.op_class_empty FOR TYPE bigint USING btree FAMILY dump_test.op_family AS STORAGE bigint;', regexp => qr/^ @@ -2206,13 +2201,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE EVENT TRIGGER test_event_trigger' => { create_order => 33, - create_sql => 'CREATE EVENT TRIGGER test_event_trigger + create_sql => 'CREATE EVENT TRIGGER test_event_trigger ON ddl_command_start EXECUTE FUNCTION dump_test.event_trigger_func();', regexp => qr/^ @@ -2225,7 +2220,7 @@ my %tests = ( 'CREATE TRIGGER test_trigger' => { create_order => 31, - create_sql => 'CREATE TRIGGER test_trigger + create_sql => 'CREATE TRIGGER test_trigger BEFORE INSERT ON dump_test.test_table FOR EACH ROW WHEN (NEW.col1 > 10) EXECUTE FUNCTION dump_test.trigger_func();', @@ -2238,18 +2233,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { - exclude_test_table => 1, + exclude_test_table => 1, exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TYPE dump_test.planets AS ENUM' => { create_order => 37, - create_sql => 'CREATE TYPE dump_test.planets + create_sql => 'CREATE TYPE dump_test.planets AS ENUM ( \'venus\', \'earth\', \'mars\' );', regexp => qr/^ \QCREATE TYPE dump_test.planets AS ENUM (\E @@ -2260,9 +2255,9 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2281,7 +2276,7 @@ my %tests = ( 'CREATE TYPE dump_test.textrange AS RANGE' => { create_order => 38, - create_sql => 'CREATE TYPE dump_test.textrange + create_sql => 'CREATE TYPE dump_test.textrange AS RANGE (subtype=text, collation="C");', regexp => qr/^ \QCREATE TYPE dump_test.textrange AS RANGE (\E @@ -2293,19 +2288,19 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TYPE dump_test.int42' => { create_order => 39, - create_sql => 'CREATE TYPE dump_test.int42;', - regexp => qr/^\QCREATE TYPE dump_test.int42;\E/m, + create_sql => 'CREATE TYPE dump_test.int42;', + regexp => qr/^\QCREATE TYPE dump_test.int42;\E/m, like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2320,7 +2315,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2388,7 +2383,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2403,13 +2398,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1' => { create_order => 82, - create_sql => 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1 + create_sql => 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1 (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype);', regexp => qr/^ \QCREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1 (\E\n @@ -2422,7 +2417,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2438,13 +2433,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FUNCTION dump_test.int42_in' => { create_order => 40, - create_sql => 'CREATE FUNCTION dump_test.int42_in(cstring) + create_sql => 'CREATE FUNCTION dump_test.int42_in(cstring) RETURNS dump_test.int42 AS \'int4in\' LANGUAGE internal STRICT IMMUTABLE;', regexp => qr/^ @@ -2456,13 +2451,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FUNCTION dump_test.int42_out' => { create_order => 41, - create_sql => 'CREATE FUNCTION dump_test.int42_out(dump_test.int42) + create_sql => 'CREATE FUNCTION dump_test.int42_out(dump_test.int42) RETURNS cstring AS \'int4out\' LANGUAGE internal STRICT IMMUTABLE;', regexp => qr/^ @@ -2474,7 +2469,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2491,13 +2486,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE PROCEDURE dump_test.ptest1' => { create_order => 41, - create_sql => 'CREATE PROCEDURE dump_test.ptest1(a int) + create_sql => 'CREATE PROCEDURE dump_test.ptest1(a int) LANGUAGE SQL AS $$ INSERT INTO dump_test.test_table (col1) VALUES (a) $$;', regexp => qr/^ \QCREATE PROCEDURE dump_test.ptest1(IN a integer)\E @@ -2508,13 +2503,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TYPE dump_test.int42 populated' => { create_order => 42, - create_sql => 'CREATE TYPE dump_test.int42 ( + create_sql => 'CREATE TYPE dump_test.int42 ( internallength = 4, input = dump_test.int42_in, output = dump_test.int42_out, @@ -2535,13 +2530,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TYPE dump_test.composite' => { create_order => 43, - create_sql => 'CREATE TYPE dump_test.composite AS ( + create_sql => 'CREATE TYPE dump_test.composite AS ( f1 int, f2 dump_test.int42 );', @@ -2555,34 +2550,34 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TYPE dump_test.undefined' => { create_order => 39, - create_sql => 'CREATE TYPE dump_test.undefined;', - regexp => qr/^\QCREATE TYPE dump_test.undefined;\E/m, + create_sql => 'CREATE TYPE dump_test.undefined;', + regexp => qr/^\QCREATE TYPE dump_test.undefined;\E/m, like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE FOREIGN DATA WRAPPER dummy' => { create_order => 35, - create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;', - regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m, - like => { %full_runs, section_pre_data => 1, }, + create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;', + regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy' => { create_order => 36, - create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;', - regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m, - like => { %full_runs, section_pre_data => 1, }, + create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;', + regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => { @@ -2603,7 +2598,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -2627,7 +2622,7 @@ my %tests = ( 'CREATE LANGUAGE pltestlang' => { create_order => 18, - create_sql => 'CREATE LANGUAGE pltestlang + create_sql => 'CREATE LANGUAGE pltestlang HANDLER dump_test.pltestlang_call_handler;', regexp => qr/^ \QCREATE PROCEDURAL LANGUAGE pltestlang \E @@ -2639,7 +2634,7 @@ my %tests = ( 'CREATE MATERIALIZED VIEW matview' => { create_order => 20, - create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview (col1) AS + create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview (col1) AS SELECT col1 FROM dump_test.test_table;', regexp => qr/^ \QCREATE MATERIALIZED VIEW dump_test.matview AS\E @@ -2651,13 +2646,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE MATERIALIZED VIEW matview_second' => { create_order => 21, - create_sql => 'CREATE MATERIALIZED VIEW + create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview_second (col1) AS SELECT * FROM dump_test.matview;', regexp => qr/^ @@ -2670,13 +2665,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE MATERIALIZED VIEW matview_third' => { create_order => 58, - create_sql => 'CREATE MATERIALIZED VIEW + create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview_third (col1) AS SELECT * FROM dump_test.matview_second WITH NO DATA;', regexp => qr/^ @@ -2689,13 +2684,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE MATERIALIZED VIEW matview_fourth' => { create_order => 59, - create_sql => 'CREATE MATERIALIZED VIEW + create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview_fourth (col1) AS SELECT * FROM dump_test.matview_third WITH NO DATA;', regexp => qr/^ @@ -2708,13 +2703,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE MATERIALIZED VIEW matview_compression' => { create_order => 20, - create_sql => 'CREATE MATERIALIZED VIEW + create_sql => 'CREATE MATERIALIZED VIEW dump_test.matview_compression (col2) AS SELECT col2 FROM dump_test.test_table; ALTER MATERIALIZED VIEW dump_test.matview_compression @@ -2732,14 +2727,14 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_toast_compression => 1, - only_dump_measurement => 1, + no_toast_compression => 1, + only_dump_measurement => 1, }, }, 'CREATE POLICY p1 ON test_table' => { create_order => 22, - create_sql => 'CREATE POLICY p1 ON dump_test.test_table + create_sql => 'CREATE POLICY p1 ON dump_test.test_table USING (true) WITH CHECK (true);', regexp => qr/^ @@ -2750,18 +2745,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE POLICY p2 ON test_table FOR SELECT' => { create_order => 24, - create_sql => 'CREATE POLICY p2 ON dump_test.test_table + create_sql => 'CREATE POLICY p2 ON dump_test.test_table FOR SELECT TO regress_dump_test_role USING (true);', regexp => qr/^ \QCREATE POLICY p2 ON dump_test.test_table FOR SELECT TO regress_dump_test_role \E @@ -2771,18 +2766,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE POLICY p3 ON test_table FOR INSERT' => { create_order => 25, - create_sql => 'CREATE POLICY p3 ON dump_test.test_table + create_sql => 'CREATE POLICY p3 ON dump_test.test_table FOR INSERT TO regress_dump_test_role WITH CHECK (true);', regexp => qr/^ \QCREATE POLICY p3 ON dump_test.test_table FOR INSERT \E @@ -2792,18 +2787,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE POLICY p4 ON test_table FOR UPDATE' => { create_order => 26, - create_sql => 'CREATE POLICY p4 ON dump_test.test_table FOR UPDATE + create_sql => 'CREATE POLICY p4 ON dump_test.test_table FOR UPDATE TO regress_dump_test_role USING (true) WITH CHECK (true);', regexp => qr/^ \QCREATE POLICY p4 ON dump_test.test_table FOR UPDATE TO regress_dump_test_role \E @@ -2813,18 +2808,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE POLICY p5 ON test_table FOR DELETE' => { create_order => 27, - create_sql => 'CREATE POLICY p5 ON dump_test.test_table + create_sql => 'CREATE POLICY p5 ON dump_test.test_table FOR DELETE TO regress_dump_test_role USING (true);', regexp => qr/^ \QCREATE POLICY p5 ON dump_test.test_table FOR DELETE \E @@ -2834,12 +2829,12 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, @@ -2855,19 +2850,19 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE PUBLICATION pub1' => { create_order => 50, - create_sql => 'CREATE PUBLICATION pub1;', - regexp => qr/^ + create_sql => 'CREATE PUBLICATION pub1;', + regexp => qr/^ \QCREATE PUBLICATION pub1 WITH (publish = 'insert, update, delete, truncate');\E /xm, like => { %full_runs, section_post_data => 1, }, @@ -2875,7 +2870,7 @@ my %tests = ( 'CREATE PUBLICATION pub2' => { create_order => 50, - create_sql => 'CREATE PUBLICATION pub2 + create_sql => 'CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish = \'\');', regexp => qr/^ @@ -2886,8 +2881,8 @@ my %tests = ( 'CREATE PUBLICATION pub3' => { create_order => 50, - create_sql => 'CREATE PUBLICATION pub3;', - regexp => qr/^ + create_sql => 'CREATE PUBLICATION pub3;', + regexp => qr/^ \QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E /xm, like => { %full_runs, section_post_data => 1, }, @@ -2895,8 +2890,8 @@ my %tests = ( 'CREATE PUBLICATION pub4' => { create_order => 50, - create_sql => 'CREATE PUBLICATION pub4;', - regexp => qr/^ + create_sql => 'CREATE PUBLICATION pub4;', + regexp => qr/^ \QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E /xm, like => { %full_runs, section_post_data => 1, }, @@ -2904,7 +2899,7 @@ my %tests = ( 'CREATE SUBSCRIPTION sub1' => { create_order => 50, - create_sql => 'CREATE SUBSCRIPTION sub1 + create_sql => 'CREATE SUBSCRIPTION sub1 CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1 WITH (connect = false);', regexp => qr/^ @@ -2915,7 +2910,7 @@ my %tests = ( 'CREATE SUBSCRIPTION sub2' => { create_order => 50, - create_sql => 'CREATE SUBSCRIPTION sub2 + create_sql => 'CREATE SUBSCRIPTION sub2 CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1 WITH (connect = false, origin = none);', regexp => qr/^ @@ -2926,7 +2921,7 @@ my %tests = ( 'CREATE SUBSCRIPTION sub3' => { create_order => 50, - create_sql => 'CREATE SUBSCRIPTION sub3 + create_sql => 'CREATE SUBSCRIPTION sub3 CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1 WITH (connect = false, origin = any);', regexp => qr/^ @@ -2942,10 +2937,10 @@ my %tests = ( regexp => qr/^ \QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_table;\E /xm, - like => { %full_runs, section_post_data => 1, }, + like => { %full_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, + exclude_test_table => 1, }, }, @@ -2996,8 +2991,8 @@ my %tests = ( 'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public' => { create_order => 52, - create_sql => 'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;', - regexp => qr/^ + create_sql => 'ALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;', + regexp => qr/^ \QALTER PUBLICATION pub3 ADD TABLES IN SCHEMA public;\E /xm, like => { %full_runs, section_post_data => 1, }, @@ -3010,10 +3005,10 @@ my %tests = ( regexp => qr/^ \QALTER PUBLICATION pub3 ADD TABLE ONLY dump_test.test_table;\E /xm, - like => { %full_runs, section_post_data => 1, }, + like => { %full_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, + exclude_test_table => 1, }, }, @@ -3024,10 +3019,10 @@ my %tests = ( regexp => qr/^ \QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E /xm, - like => { %full_runs, section_post_data => 1, }, + like => { %full_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, + exclude_test_table => 1, }, }, @@ -3052,30 +3047,30 @@ my %tests = ( 'CREATE SCHEMA dump_test' => { create_order => 2, - create_sql => 'CREATE SCHEMA dump_test;', - regexp => qr/^CREATE SCHEMA dump_test;/m, + create_sql => 'CREATE SCHEMA dump_test;', + regexp => qr/^CREATE SCHEMA dump_test;/m, like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE SCHEMA dump_test_second_schema' => { create_order => 9, - create_sql => 'CREATE SCHEMA dump_test_second_schema;', - regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m, - like => { + create_sql => 'CREATE SCHEMA dump_test_second_schema;', + regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m, + like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, }, }, 'CREATE TABLE test_table' => { create_order => 3, - create_sql => 'CREATE TABLE dump_test.test_table ( + create_sql => 'CREATE TABLE dump_test.test_table ( col1 serial primary key, col2 text COMPRESSION pglz, col3 text, @@ -3099,18 +3094,18 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_compression_method' => { create_order => 110, - create_sql => 'CREATE TABLE dump_test.test_compression_method ( + create_sql => 'CREATE TABLE dump_test.test_compression_method ( col1 text );', regexp => qr/^ @@ -3119,13 +3114,11 @@ my %tests = ( \Q);\E /xm, like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, + %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3133,7 +3126,7 @@ my %tests = ( # (de)compression operations 'COPY test_compression_method' => { create_order => 111, - create_sql => 'INSERT INTO dump_test.test_compression_method (col1) ' + create_sql => 'INSERT INTO dump_test.test_compression_method (col1) ' . 'SELECT string_agg(a::text, \'\') FROM generate_series(1,4096) a;', regexp => qr/^ \QCOPY dump_test.test_compression_method (col1) FROM stdin;\E @@ -3141,22 +3134,22 @@ my %tests = ( /xm, like => { %full_runs, - data_only => 1, - section_data => 1, - only_dump_test_schema => 1, - test_schema_plus_large_objects => 1, + data_only => 1, + section_data => 1, + only_dump_test_schema => 1, + test_schema_plus_large_objects => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE fk_reference_test_table' => { create_order => 21, - create_sql => 'CREATE TABLE dump_test.fk_reference_test_table ( + create_sql => 'CREATE TABLE dump_test.fk_reference_test_table ( col1 int primary key references dump_test.test_table );', regexp => qr/^ @@ -3168,13 +3161,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_second_table' => { create_order => 6, - create_sql => 'CREATE TABLE dump_test.test_second_table ( + create_sql => 'CREATE TABLE dump_test.test_second_table ( col1 int, col2 text );', @@ -3188,13 +3181,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_compression' => { create_order => 3, - create_sql => 'CREATE TABLE dump_test.test_compression ( + create_sql => 'CREATE TABLE dump_test.test_compression ( col1 int, col2 text COMPRESSION lz4 );', @@ -3212,13 +3205,13 @@ my %tests = ( unlike => { exclude_dump_test_schema => 1, no_toast_compression => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE measurement PARTITIONED BY' => { create_order => 90, - create_sql => 'CREATE TABLE dump_test.measurement ( + create_sql => 'CREATE TABLE dump_test.measurement ( city_id serial not null, logdate date not null, peaktemp int CHECK (peaktemp >= -460), @@ -3243,7 +3236,7 @@ my %tests = ( only_dump_measurement => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, exclude_measurement => 1, }, @@ -3270,8 +3263,8 @@ my %tests = ( like => { %full_runs, section_pre_data => 1, - role => 1, - binary_upgrade => 1, + role => 1, + binary_upgrade => 1, only_dump_measurement => 1, }, unlike => { @@ -3281,7 +3274,7 @@ my %tests = ( 'Creation of row-level trigger in partitioned table' => { create_order => 92, - create_sql => 'CREATE TRIGGER test_trigger + create_sql => 'CREATE TRIGGER test_trigger AFTER INSERT ON dump_test.measurement FOR EACH ROW EXECUTE PROCEDURE dump_test.trigger_func()', regexp => qr/^ @@ -3290,7 +3283,8 @@ my %tests = ( \QEXECUTE FUNCTION dump_test.trigger_func();\E /xm, like => { - %full_runs, %dump_test_schema_runs, section_post_data => 1, + %full_runs, %dump_test_schema_runs, + section_post_data => 1, only_dump_measurement => 1, }, unlike => { @@ -3301,7 +3295,8 @@ my %tests = ( 'COPY measurement' => { create_order => 93, - create_sql => 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) ' + create_sql => + 'INSERT INTO dump_test.measurement (city_id, logdate, peaktemp, unitsales) ' . "VALUES (1, '2006-02-12', 35, 1);", regexp => qr/^ \QCOPY dump_test_second_schema.measurement_y2006m2 (city_id, logdate, peaktemp, unitsales) FROM stdin;\E @@ -3310,20 +3305,20 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - data_only => 1, + data_only => 1, only_dump_measurement => 1, - section_data => 1, + section_data => 1, only_dump_test_schema => 1, role_parallel => 1, role => 1, }, unlike => { - binary_upgrade => 1, - schema_only => 1, - exclude_measurement => 1, - only_dump_test_schema => 1, + binary_upgrade => 1, + schema_only => 1, + exclude_measurement => 1, + only_dump_test_schema => 1, test_schema_plus_large_objects => 1, - exclude_measurement => 1, + exclude_measurement => 1, exclude_measurement_data => 1, }, }, @@ -3350,8 +3345,8 @@ my %tests = ( like => { %full_runs, section_post_data => 1, - role => 1, - binary_upgrade => 1, + role => 1, + binary_upgrade => 1, only_dump_measurement => 1, }, unlike => { @@ -3366,8 +3361,8 @@ my %tests = ( like => { %full_runs, section_post_data => 1, - role => 1, - binary_upgrade => 1, + role => 1, + binary_upgrade => 1, only_dump_measurement => 1, }, unlike => { @@ -3382,8 +3377,8 @@ my %tests = ( like => { %full_runs, section_post_data => 1, - role => 1, - binary_upgrade => 1, + role => 1, + binary_upgrade => 1, only_dump_measurement => 1, }, unlike => { @@ -3394,19 +3389,19 @@ my %tests = ( # We should never see the creation of a trigger on a partition 'Disabled trigger on partition is not created' => { regexp => qr/CREATE TRIGGER test_trigger.*ON dump_test_second_schema/, - like => {}, + like => {}, unlike => { %full_runs, %dump_test_schema_runs }, }, # Triggers on partitions should not be dropped individually 'Triggers on partitions are not dropped' => { regexp => qr/DROP TRIGGER test_trigger.*ON dump_test_second_schema/, - like => {} + like => {} }, 'CREATE TABLE test_third_table_generated_cols' => { create_order => 6, - create_sql => 'CREATE TABLE dump_test.test_third_table ( + create_sql => 'CREATE TABLE dump_test.test_third_table ( f1 int, junk int, g1 int generated always as (f1 * 2) stored, "F3" int, @@ -3432,7 +3427,7 @@ my %tests = ( 'CREATE TABLE test_fourth_table_zero_col' => { create_order => 6, - create_sql => 'CREATE TABLE dump_test.test_fourth_table ( + create_sql => 'CREATE TABLE dump_test.test_fourth_table ( );', regexp => qr/^ \QCREATE TABLE dump_test.test_fourth_table (\E @@ -3442,13 +3437,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_fifth_table' => { create_order => 53, - create_sql => 'CREATE TABLE dump_test.test_fifth_table ( + create_sql => 'CREATE TABLE dump_test.test_fifth_table ( col1 integer, col2 boolean, col3 boolean, @@ -3468,13 +3463,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_sixth_table' => { create_order => 6, - create_sql => 'CREATE TABLE dump_test.test_sixth_table ( + create_sql => 'CREATE TABLE dump_test.test_sixth_table ( col1 int, col2 text, col3 bytea @@ -3490,13 +3485,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_seventh_table' => { create_order => 6, - create_sql => 'CREATE TABLE dump_test.test_seventh_table ( + create_sql => 'CREATE TABLE dump_test.test_seventh_table ( col1 int, col2 text, col3 bytea @@ -3512,13 +3507,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_table_identity' => { create_order => 3, - create_sql => 'CREATE TABLE dump_test.test_table_identity ( + create_sql => 'CREATE TABLE dump_test.test_table_identity ( col1 int generated always as identity primary key, col2 text );', @@ -3541,13 +3536,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_table_generated' => { create_order => 3, - create_sql => 'CREATE TABLE dump_test.test_table_generated ( + create_sql => 'CREATE TABLE dump_test.test_table_generated ( col1 int primary key, col2 int generated always as (col1 * 2) stored );', @@ -3561,13 +3556,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_table_generated_child1 (without local columns)' => { create_order => 4, - create_sql => 'CREATE TABLE dump_test.test_table_generated_child1 () + create_sql => 'CREATE TABLE dump_test.test_table_generated_child1 () INHERITS (dump_test.test_table_generated);', regexp => qr/^ \QCREATE TABLE dump_test.test_table_generated_child1 (\E\n @@ -3577,9 +3572,9 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3593,7 +3588,7 @@ my %tests = ( 'CREATE TABLE test_table_generated_child2 (with local columns)' => { create_order => 4, - create_sql => 'CREATE TABLE dump_test.test_table_generated_child2 ( + create_sql => 'CREATE TABLE dump_test.test_table_generated_child2 ( col1 int, col2 int ) INHERITS (dump_test.test_table_generated);', @@ -3607,15 +3602,15 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE table_with_stats' => { create_order => 98, - create_sql => 'CREATE TABLE dump_test.table_index_stats ( + create_sql => 'CREATE TABLE dump_test.table_index_stats ( col1 int, col2 int, col3 int); @@ -3634,13 +3629,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_inheritance_parent' => { create_order => 90, - create_sql => 'CREATE TABLE dump_test.test_inheritance_parent ( + create_sql => 'CREATE TABLE dump_test.test_inheritance_parent ( col1 int NOT NULL, col2 int CHECK (col2 >= 42) );', @@ -3655,13 +3650,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE TABLE test_inheritance_child' => { create_order => 91, - create_sql => 'CREATE TABLE dump_test.test_inheritance_child ( + create_sql => 'CREATE TABLE dump_test.test_inheritance_child ( col1 int NOT NULL, CONSTRAINT test_inheritance_child CHECK (col2 >= 142857) ) INHERITS (dump_test.test_inheritance_parent);', @@ -3676,15 +3671,15 @@ my %tests = ( %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE STATISTICS extended_stats_no_options' => { create_order => 97, - create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_no_options + create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_no_options ON col1, col2 FROM dump_test.test_fifth_table', regexp => qr/^ \QCREATE STATISTICS dump_test.test_ext_stats_no_options ON col1, col2 FROM dump_test.test_fifth_table;\E @@ -3693,13 +3688,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE STATISTICS extended_stats_options' => { create_order => 97, - create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_opts + create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_opts (ndistinct) ON col1, col2 FROM dump_test.test_fifth_table', regexp => qr/^ \QCREATE STATISTICS dump_test.test_ext_stats_opts (ndistinct) ON col1, col2 FROM dump_test.test_fifth_table;\E @@ -3708,7 +3703,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3723,13 +3718,13 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE STATISTICS extended_stats_expression' => { create_order => 99, - create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_expr + create_sql => 'CREATE STATISTICS dump_test.test_ext_stats_expr ON (2 * col1) FROM dump_test.test_fifth_table', regexp => qr/^ \QCREATE STATISTICS dump_test.test_ext_stats_expr ON (2 * col1) FROM dump_test.test_fifth_table;\E @@ -3738,7 +3733,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3756,11 +3751,11 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3772,42 +3767,42 @@ my %tests = ( \QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E /xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - compression => 1, - createdb => 1, - defaults => 1, - exclude_test_table => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + compression => 1, + createdb => 1, + defaults => 1, + exclude_test_table => 1, exclude_test_table_data => 1, - no_toast_compression => 1, - no_large_objects => 1, - no_privs => 1, - no_owner => 1, - no_table_access_method => 1, - only_dump_test_schema => 1, - pg_dumpall_dbprivs => 1, - pg_dumpall_exclude => 1, - schema_only => 1, - section_post_data => 1, + no_toast_compression => 1, + no_large_objects => 1, + no_privs => 1, + no_owner => 1, + no_table_access_method => 1, + only_dump_test_schema => 1, + pg_dumpall_dbprivs => 1, + pg_dumpall_exclude => 1, + schema_only => 1, + section_post_data => 1, test_schema_plus_large_objects => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, exclude_measurement_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_test_table => 1, - pg_dumpall_globals => 1, + only_dump_test_table => 1, + pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, - role => 1, - section_pre_data => 1, - exclude_measurement => 1, + role => 1, + section_pre_data => 1, + exclude_measurement => 1, }, }, 'ALTER TABLE measurement PRIMARY KEY' => { - all_runs => 1, - catch_all => 'CREATE ... commands', + all_runs => 1, + catch_all => 'CREATE ... commands', create_order => 93, create_sql => 'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);', @@ -3823,7 +3818,7 @@ my %tests = ( }, unlike => { exclude_dump_test_schema => 1, - exclude_measurement => 1, + exclude_measurement => 1, }, }, @@ -3833,12 +3828,12 @@ my %tests = ( /xm, like => { %full_runs, - role => 1, + role => 1, section_post_data => 1, only_dump_measurement => 1, }, unlike => { - exclude_measurement => 1, + exclude_measurement => 1, }, }, @@ -3848,59 +3843,59 @@ my %tests = ( /xm, like => { %full_runs, - role => 1, + role => 1, section_post_data => 1, only_dump_measurement => 1, exclude_measurement_data => 1, }, unlike => { - exclude_measurement => 1, + exclude_measurement => 1, }, }, 'ALTER INDEX ... ATTACH PARTITION (primary key)' => { - all_runs => 1, + all_runs => 1, catch_all => 'CREATE ... commands', - regexp => qr/^ + regexp => qr/^ \QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E /xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - compression => 1, - createdb => 1, - defaults => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + compression => 1, + createdb => 1, + defaults => 1, exclude_dump_test_schema => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - no_toast_compression => 1, - no_large_objects => 1, - no_privs => 1, - no_owner => 1, - no_table_access_method => 1, - pg_dumpall_dbprivs => 1, - pg_dumpall_exclude => 1, - role => 1, - schema_only => 1, - section_post_data => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + exclude_test_table_data => 1, + no_toast_compression => 1, + no_large_objects => 1, + no_privs => 1, + no_owner => 1, + no_table_access_method => 1, + pg_dumpall_dbprivs => 1, + pg_dumpall_exclude => 1, + role => 1, + schema_only => 1, + section_post_data => 1, + only_dump_measurement => 1, exclude_measurement_data => 1, }, unlike => { - only_dump_test_schema => 1, - only_dump_test_table => 1, - pg_dumpall_globals => 1, + only_dump_test_schema => 1, + only_dump_test_table => 1, + pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, - section_pre_data => 1, + section_pre_data => 1, test_schema_plus_large_objects => 1, - exclude_measurement => 1, + exclude_measurement => 1, }, }, 'CREATE VIEW test_view' => { create_order => 61, - create_sql => 'CREATE VIEW dump_test.test_view + create_sql => 'CREATE VIEW dump_test.test_view WITH (check_option = \'local\', security_barrier = true) AS SELECT col1 FROM dump_test.test_table;', regexp => qr/^ @@ -3912,7 +3907,7 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, @@ -3926,17 +3921,17 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, # FIXME 'DROP SCHEMA public (for testing without public schema)' => { - database => 'regress_pg_dump_test', + database => 'regress_pg_dump_test', create_order => 100, - create_sql => 'DROP SCHEMA public;', - regexp => qr/^DROP SCHEMA public;/m, - like => {}, + create_sql => 'DROP SCHEMA public;', + regexp => qr/^DROP SCHEMA public;/m, + like => {}, }, 'DROP SCHEMA public' => { @@ -3962,37 +3957,37 @@ my %tests = ( 'DROP FUNCTION dump_test.pltestlang_call_handler()' => { regexp => qr/^DROP FUNCTION dump_test\.pltestlang_call_handler\(\);/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP LANGUAGE pltestlang' => { regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP SCHEMA dump_test' => { regexp => qr/^DROP SCHEMA dump_test;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP SCHEMA dump_test_second_schema' => { regexp => qr/^DROP SCHEMA dump_test_second_schema;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP TABLE test_table' => { regexp => qr/^DROP TABLE dump_test\.test_table;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP TABLE fk_reference_test_table' => { regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP TABLE test_second_table' => { regexp => qr/^DROP TABLE dump_test\.test_second_table;/m, - like => { clean => 1, }, + like => { clean => 1, }, }, 'DROP EXTENSION IF EXISTS plpgsql' => { @@ -4011,27 +4006,27 @@ my %tests = ( 'DROP LANGUAGE IF EXISTS pltestlang' => { regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m, - like => { clean_if_exists => 1, }, + like => { clean_if_exists => 1, }, }, 'DROP SCHEMA IF EXISTS dump_test' => { regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m, - like => { clean_if_exists => 1, }, + like => { clean_if_exists => 1, }, }, 'DROP SCHEMA IF EXISTS dump_test_second_schema' => { regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m, - like => { clean_if_exists => 1, }, + like => { clean_if_exists => 1, }, }, 'DROP TABLE IF EXISTS test_table' => { regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m, - like => { clean_if_exists => 1, }, + like => { clean_if_exists => 1, }, }, 'DROP TABLE IF EXISTS test_second_table' => { regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m, - like => { clean_if_exists => 1, }, + like => { clean_if_exists => 1, }, }, 'DROP ROLE regress_dump_test_role' => { @@ -4052,14 +4047,14 @@ my %tests = ( 'GRANT USAGE ON SCHEMA dump_test_second_schema' => { create_order => 10, - create_sql => 'GRANT USAGE ON SCHEMA dump_test_second_schema + create_sql => 'GRANT USAGE ON SCHEMA dump_test_second_schema TO regress_dump_test_role;', regexp => qr/^ \QGRANT USAGE ON SCHEMA dump_test_second_schema TO regress_dump_test_role;\E /xm, like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, }, unlike => { no_privs => 1, }, @@ -4067,7 +4062,7 @@ my %tests = ( 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy' => { create_order => 85, - create_sql => 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy + create_sql => 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;', regexp => qr/^ \QGRANT ALL ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;\E @@ -4078,7 +4073,7 @@ my %tests = ( 'GRANT USAGE ON FOREIGN SERVER s1' => { create_order => 85, - create_sql => 'GRANT USAGE ON FOREIGN SERVER s1 + create_sql => 'GRANT USAGE ON FOREIGN SERVER s1 TO regress_dump_test_role;', regexp => qr/^ \QGRANT ALL ON FOREIGN SERVER s1 TO regress_dump_test_role;\E @@ -4098,8 +4093,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, @@ -4114,8 +4109,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, @@ -4130,8 +4125,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, @@ -4146,8 +4141,8 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, @@ -4163,7 +4158,7 @@ my %tests = ( 'GRANT SELECT ON TABLE test_table' => { create_order => 5, - create_sql => 'GRANT SELECT ON TABLE dump_test.test_table + create_sql => 'GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;', regexp => qr/^\QGRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;\E/m, @@ -4171,19 +4166,19 @@ my %tests = ( %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_pre_data => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - no_privs => 1, - only_dump_measurement => 1, + exclude_test_table => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, 'GRANT SELECT ON TABLE measurement' => { create_order => 91, - create_sql => 'GRANT SELECT ON + create_sql => 'GRANT SELECT ON TABLE dump_test.measurement TO regress_dump_test_role;', regexp => @@ -4196,14 +4191,14 @@ my %tests = ( }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - exclude_measurement => 1, + no_privs => 1, + exclude_measurement => 1, }, }, 'GRANT SELECT ON TABLE measurement_y2006m2' => { create_order => 94, - create_sql => 'GRANT SELECT ON TABLE + create_sql => 'GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2, dump_test_second_schema.measurement_y2006m3, dump_test_second_schema.measurement_y2006m4, @@ -4213,19 +4208,19 @@ my %tests = ( qr/^\QGRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;\E/m, like => { %full_runs, - role => 1, + role => 1, section_pre_data => 1, only_dump_measurement => 1, }, unlike => { no_privs => 1, - exclude_measurement => 1, + exclude_measurement => 1, }, }, 'GRANT ALL ON LARGE OBJECT ...' => { create_order => 60, - create_sql => 'DO $$ + create_sql => 'DO $$ DECLARE myoid oid; BEGIN SELECT loid FROM pg_largeobject INTO myoid; @@ -4237,16 +4232,16 @@ my %tests = ( /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - inserts => 1, - section_pre_data => 1, + column_inserts => 1, + data_only => 1, + inserts => 1, + section_pre_data => 1, test_schema_plus_large_objects => 1, - binary_upgrade => 1, + binary_upgrade => 1, }, unlike => { no_large_objects => 1, - no_privs => 1, + no_privs => 1, schema_only => 1, }, }, @@ -4263,14 +4258,14 @@ my %tests = ( { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, - only_dump_measurement => 1, + no_privs => 1, + only_dump_measurement => 1, }, }, 'GRANT EXECUTE ON FUNCTION pg_sleep() TO regress_dump_test_role' => { create_order => 16, - create_sql => 'GRANT EXECUTE ON FUNCTION pg_sleep(float8) + create_sql => 'GRANT EXECUTE ON FUNCTION pg_sleep(float8) TO regress_dump_test_role;', regexp => qr/^ \QGRANT ALL ON FUNCTION pg_catalog.pg_sleep(double precision) TO regress_dump_test_role;\E @@ -4281,7 +4276,7 @@ my %tests = ( 'GRANT SELECT (proname ...) ON TABLE pg_proc TO public' => { create_order => 46, - create_sql => 'GRANT SELECT ( + create_sql => 'GRANT SELECT ( tableoid, oid, proname, @@ -4363,10 +4358,10 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -4379,10 +4374,10 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, - only_dump_measurement => 1, + schema_only => 1, + only_dump_measurement => 1, }, }, @@ -4404,8 +4399,8 @@ my %tests = ( 'REVOKE CONNECT ON DATABASE dump_test FROM public' => { create_order => 49, - create_sql => 'REVOKE CONNECT ON DATABASE dump_test FROM public;', - regexp => qr/^ + create_sql => 'REVOKE CONNECT ON DATABASE dump_test FROM public;', + regexp => qr/^ \QREVOKE CONNECT,TEMPORARY ON DATABASE dump_test FROM PUBLIC;\E\n \QGRANT TEMPORARY ON DATABASE dump_test TO PUBLIC;\E\n \QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E @@ -4415,7 +4410,7 @@ my %tests = ( 'REVOKE EXECUTE ON FUNCTION pg_sleep() FROM public' => { create_order => 15, - create_sql => 'REVOKE EXECUTE ON FUNCTION pg_sleep(float8) + create_sql => 'REVOKE EXECUTE ON FUNCTION pg_sleep(float8) FROM public;', regexp => qr/^ \QREVOKE ALL ON FUNCTION pg_catalog.pg_sleep(double precision) FROM PUBLIC;\E @@ -4431,7 +4426,7 @@ my %tests = ( 'REVOKE EXECUTE ON FUNCTION pg_stat_reset FROM regress_dump_test_role' => { create_order => 15, - create_sql => ' + create_sql => ' ALTER FUNCTION pg_stat_reset OWNER TO regress_dump_test_role; REVOKE EXECUTE ON FUNCTION pg_stat_reset FROM regress_dump_test_role;', @@ -4443,7 +4438,7 @@ my %tests = ( 'REVOKE SELECT ON TABLE pg_proc FROM public' => { create_order => 45, - create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;', + create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;', regexp => qr/^\QREVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;\E/m, like => { %full_runs, section_pre_data => 1, }, @@ -4462,14 +4457,14 @@ my %tests = ( 'REVOKE USAGE ON LANGUAGE plpgsql FROM public' => { create_order => 16, - create_sql => 'REVOKE USAGE ON LANGUAGE plpgsql FROM public;', - regexp => qr/^REVOKE ALL ON LANGUAGE plpgsql FROM PUBLIC;/m, - like => { + create_sql => 'REVOKE USAGE ON LANGUAGE plpgsql FROM public;', + regexp => qr/^REVOKE ALL ON LANGUAGE plpgsql FROM PUBLIC;/m, + like => { %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - role => 1, - section_pre_data => 1, + role => 1, + section_pre_data => 1, only_dump_measurement => 1, }, unlike => { no_privs => 1, }, @@ -4496,7 +4491,7 @@ my %tests = ( # pretty, but seems hard to do better in this framework. 'CREATE TABLE regress_pg_dump_table_am' => { create_order => 12, - create_sql => ' + create_sql => ' CREATE TABLE dump_test.regress_pg_dump_table_am_0() USING heap; CREATE TABLE dump_test.regress_pg_dump_table_am_1 (col1 int) USING regress_table_am; CREATE TABLE dump_test.regress_pg_dump_table_am_2() USING heap;', @@ -4512,13 +4507,13 @@ my %tests = ( unlike => { exclude_dump_test_schema => 1, no_table_access_method => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }, 'CREATE MATERIALIZED VIEW regress_pg_dump_matview_am' => { create_order => 13, - create_sql => ' + create_sql => ' CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_0 USING heap AS SELECT 1; CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_1 USING regress_table_am AS SELECT count(*) FROM pg_class; @@ -4536,7 +4531,7 @@ my %tests = ( unlike => { exclude_dump_test_schema => 1, no_table_access_method => 1, - only_dump_measurement => 1, + only_dump_measurement => 1, }, }); @@ -4557,7 +4552,7 @@ $node->psql( 'postgres', "CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;", on_error_stop => 0, - stderr => \$collation_check_stderr); + stderr => \$collation_check_stderr); if ($collation_check_stderr !~ /ERROR: /) { @@ -4765,15 +4760,19 @@ command_fails_like( foreach my $run (sort keys %pgdump_runs) { my $test_key = $run; - my $run_db = 'postgres'; + my $run_db = 'postgres'; # Skip command-level tests for gzip/lz4/zstd if the tool is not supported - if ($pgdump_runs{$run}->{compile_option} && - (($pgdump_runs{$run}->{compile_option} eq 'gzip' && !$supports_gzip) || - ($pgdump_runs{$run}->{compile_option} eq 'lz4' && !$supports_lz4) || - ($pgdump_runs{$run}->{compile_option} eq 'zstd' && !$supports_zstd))) + if ($pgdump_runs{$run}->{compile_option} + && (($pgdump_runs{$run}->{compile_option} eq 'gzip' + && !$supports_gzip) + || ($pgdump_runs{$run}->{compile_option} eq 'lz4' + && !$supports_lz4) + || ($pgdump_runs{$run}->{compile_option} eq 'zstd' + && !$supports_zstd))) { - note "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support"; + note + "$run: skipped due to no $pgdump_runs{$run}->{compile_option} support"; next; } @@ -4800,16 +4799,18 @@ foreach my $run (sort keys %pgdump_runs) foreach my $glob_pattern (@{$glob_patterns}) { my @glob_output = glob($glob_pattern); - is(scalar(@glob_output) > 0, 1, "$run: glob check for $glob_pattern"); + is(scalar(@glob_output) > 0, + 1, "$run: glob check for $glob_pattern"); } } if ($pgdump_runs{$run}->{command_like}) { my $cmd_like = $pgdump_runs{$run}->{command_like}; - $node->command_like(\@{ $cmd_like->{command} }, - $cmd_like->{expected}, - "$run: " . $cmd_like->{name}) + $node->command_like( + \@{ $cmd_like->{command} }, + $cmd_like->{expected}, + "$run: " . $cmd_like->{name}); } if ($pgdump_runs{$run}->{restore_cmd}) diff --git a/src/bin/pg_dump/t/004_pg_dump_parallel.pl b/src/bin/pg_dump/t/004_pg_dump_parallel.pl index f41c2fa223..c4b461ed87 100644 --- a/src/bin/pg_dump/t/004_pg_dump_parallel.pl +++ b/src/bin/pg_dump/t/004_pg_dump_parallel.pl @@ -56,16 +56,16 @@ $node->command_ok( $node->command_ok( [ 'pg_restore', '-v', - '-d', $node->connstr($dbname2), - '-j3', "$backupdir/dump1" + '-d', $node->connstr($dbname2), + '-j3', "$backupdir/dump1" ], 'parallel restore'); $node->command_ok( [ - 'pg_dump', '-Fd', + 'pg_dump', '-Fd', '--no-sync', '-j2', - '-f', "$backupdir/dump2", + '-f', "$backupdir/dump2", '--inserts', $node->connstr($dbname1) ], 'parallel dump as inserts'); @@ -73,8 +73,8 @@ $node->command_ok( $node->command_ok( [ 'pg_restore', '-v', - '-d', $node->connstr($dbname3), - '-j3', "$backupdir/dump2" + '-d', $node->connstr($dbname3), + '-j3', "$backupdir/dump2" ], 'parallel restore as inserts'); diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl index de55564555..ed86c332ef 100644 --- a/src/bin/pg_dump/t/010_dump_connstr.pl +++ b/src/bin/pg_dump/t/010_dump_connstr.pl @@ -15,7 +15,7 @@ if ($PostgreSQL::Test::Utils::is_msys2) # We're going to use byte sequences that aren't valid UTF-8 strings. Use # LATIN1, which accepts any byte and has a conversion from each byte to UTF-8. -$ENV{LC_ALL} = 'C'; +$ENV{LC_ALL} = 'C'; $ENV{PGCLIENTENCODING} = 'LATIN1'; # Create database and user names covering the range of LATIN1 @@ -26,8 +26,8 @@ $ENV{PGCLIENTENCODING} = 'LATIN1'; # The odds of finding something interesting by testing all ASCII letters # seem too small to justify the cycles of testing a fifth name. my $dbname1 = - 'regression' - . generate_ascii_string(1, 9) + 'regression' + . generate_ascii_string(1, 9) . generate_ascii_string(11, 12) . generate_ascii_string(14, 33) . ( @@ -37,7 +37,7 @@ my $dbname1 = . generate_ascii_string(35, 43) # skip ',' . generate_ascii_string(45, 54); my $dbname2 = 'regression' . generate_ascii_string(55, 65) # skip 'B'-'W' - . generate_ascii_string(88, 99) # skip 'd'-'w' + . generate_ascii_string(88, 99) # skip 'd'-'w' . generate_ascii_string(120, 149); my $dbname3 = 'regression' . generate_ascii_string(150, 202); my $dbname4 = 'regression' . generate_ascii_string(203, 255); @@ -57,17 +57,17 @@ $node->init(extra => # prep pg_hba.conf and pg_ident.conf $node->run_log( [ - $ENV{PG_REGRESS}, '--config-auth', - $node->data_dir, '--user', + $ENV{PG_REGRESS}, '--config-auth', + $node->data_dir, '--user', $src_bootstrap_super, '--create-role', "$username1,$username2,$username3,$username4" ]); $node->start; my $backupdir = $node->backup_dir; -my $discard = "$backupdir/discard.sql"; -my $plain = "$backupdir/plain.sql"; -my $dirfmt = "$backupdir/dirfmt"; +my $discard = "$backupdir/discard.sql"; +my $plain = "$backupdir/plain.sql"; +my $dirfmt = "$backupdir/dirfmt"; $node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]); $node->run_log( @@ -115,9 +115,9 @@ $node->command_ok( 'pg_dumpall with long ASCII name 4'); $node->command_ok( [ - 'pg_dumpall', '-U', + 'pg_dumpall', '-U', $src_bootstrap_super, '--no-sync', - '-r', '-l', + '-r', '-l', 'dbname=template1' ], 'pg_dumpall -l accepts connection string'); @@ -146,13 +146,13 @@ $node->command_ok( 'parallel dump'); # recreate $dbname1 for restore test -$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]); +$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]); $node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]); $node->command_ok( [ - 'pg_restore', '-v', '-d', 'template1', - '-j2', '-U', $username1, $dirfmt + 'pg_restore', '-v', '-d', 'template1', + '-j2', '-U', $username1, $dirfmt ], 'parallel restore'); @@ -160,8 +160,8 @@ $node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]); $node->command_ok( [ - 'pg_restore', '-C', '-v', '-d', - 'template1', '-j2', '-U', $username1, + 'pg_restore', '-C', '-v', '-d', + 'template1', '-j2', '-U', $username1, $dirfmt ], 'parallel restore with create'); @@ -220,8 +220,8 @@ $cmdline_node->run_log( { $result = run_log( [ - 'psql', '-p', $cmdline_node->port, '-U', - $restore_super, '-X', '-f', $plain + 'psql', '-p', $cmdline_node->port, '-U', + $restore_super, '-X', '-f', $plain ], '2>', \$stderr); diff --git a/src/bin/pg_resetwal/t/002_corrupted.pl b/src/bin/pg_resetwal/t/002_corrupted.pl index 3dd2a4e89f..6d19a1efd5 100644 --- a/src/bin/pg_resetwal/t/002_corrupted.pl +++ b/src/bin/pg_resetwal/t/002_corrupted.pl @@ -14,7 +14,7 @@ my $node = PostgreSQL::Test::Cluster->new('main'); $node->init; my $pg_control = $node->data_dir . '/global/pg_control'; -my $size = (stat($pg_control))[7]; +my $size = (stat($pg_control))[7]; # Read out the head of the file to get PG_CONTROL_VERSION in # particular. diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl index 63490360e5..031594e14e 100644 --- a/src/bin/pg_rewind/t/001_basic.pl +++ b/src/bin/pg_rewind/t/001_basic.pl @@ -92,7 +92,7 @@ sub run_test # step. command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--source-pgdata', $standby_pgdata, '--target-pgdata', $primary_pgdata, '--no-sync' @@ -104,10 +104,10 @@ sub run_test # recovery once. command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--source-pgdata', $standby_pgdata, '--target-pgdata', $primary_pgdata, - '--no-sync', '--no-ensure-shutdown' + '--no-sync', '--no-ensure-shutdown' ], 'pg_rewind --no-ensure-shutdown with running target'); @@ -117,10 +117,10 @@ sub run_test $node_primary->stop; command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--source-pgdata', $standby_pgdata, '--target-pgdata', $primary_pgdata, - '--no-sync', '--no-ensure-shutdown' + '--no-sync', '--no-ensure-shutdown' ], 'pg_rewind with unexpected running source'); @@ -131,10 +131,10 @@ sub run_test $node_standby->stop; command_ok( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--source-pgdata', $standby_pgdata, '--target-pgdata', $primary_pgdata, - '--no-sync', '--dry-run' + '--no-sync', '--dry-run' ], 'pg_rewind --dry-run'); diff --git a/src/bin/pg_rewind/t/006_options.pl b/src/bin/pg_rewind/t/006_options.pl index 2d0c5e2f8b..4b6e39a47c 100644 --- a/src/bin/pg_rewind/t/006_options.pl +++ b/src/bin/pg_rewind/t/006_options.pl @@ -17,7 +17,7 @@ my $primary_pgdata = PostgreSQL::Test::Utils::tempdir; my $standby_pgdata = PostgreSQL::Test::Utils::tempdir; command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--target-pgdata', $primary_pgdata, '--source-pgdata', $standby_pgdata, 'extra_arg1' @@ -27,7 +27,7 @@ command_fails([ 'pg_rewind', '--target-pgdata', $primary_pgdata ], 'no source specified'); command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--target-pgdata', $primary_pgdata, '--source-pgdata', $standby_pgdata, '--source-server', 'incorrect_source' @@ -35,7 +35,7 @@ command_fails( 'both remote and local sources specified'); command_fails( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--target-pgdata', $primary_pgdata, '--source-pgdata', $standby_pgdata, '--write-recovery-conf' diff --git a/src/bin/pg_rewind/t/007_standby_source.pl b/src/bin/pg_rewind/t/007_standby_source.pl index 3f813929a6..4fd1ed001c 100644 --- a/src/bin/pg_rewind/t/007_standby_source.pl +++ b/src/bin/pg_rewind/t/007_standby_source.pl @@ -124,8 +124,8 @@ copy( # recovery configuration automatically. command_ok( [ - 'pg_rewind', "--debug", - "--source-server", $node_b->connstr('postgres'), + 'pg_rewind', "--debug", + "--source-server", $node_b->connstr('postgres'), "--target-pgdata=$node_c_pgdata", "--no-sync", "--write-recovery-conf" ], diff --git a/src/bin/pg_rewind/t/008_min_recovery_point.pl b/src/bin/pg_rewind/t/008_min_recovery_point.pl index c753a64fdb..d4c89451e6 100644 --- a/src/bin/pg_rewind/t/008_min_recovery_point.pl +++ b/src/bin/pg_rewind/t/008_min_recovery_point.pl @@ -132,7 +132,7 @@ $node_2->poll_query_until('postgres', $node_2->stop('fast'); $node_3->stop('fast'); -my $node_2_pgdata = $node_2->data_dir; +my $node_2_pgdata = $node_2->data_dir; my $node_1_connstr = $node_1->connstr; # Keep a temporary postgresql.conf or it would be overwritten during the rewind. @@ -142,7 +142,7 @@ copy( command_ok( [ - 'pg_rewind', "--source-server=$node_1_connstr", + 'pg_rewind', "--source-server=$node_1_connstr", "--target-pgdata=$node_2_pgdata", "--debug" ], 'run pg_rewind'); diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl index ed89aba35f..cf60a04ae7 100644 --- a/src/bin/pg_rewind/t/009_growing_files.pl +++ b/src/bin/pg_rewind/t/009_growing_files.pl @@ -51,7 +51,7 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a'; # copy operation and the result will be an error. my $ret = run_log( [ - 'pg_rewind', '--debug', + 'pg_rewind', '--debug', '--source-pgdata', $standby_pgdata, '--target-pgdata', $primary_pgdata, '--no-sync', diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 373f6dfbf7..4957791e94 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -38,7 +38,7 @@ use Carp; use Exporter 'import'; use File::Copy; use File::Path qw(rmtree); -use IPC::Run qw(run); +use IPC::Run qw(run); use PostgreSQL::Test::Cluster; use PostgreSQL::Test::RecursiveCopy; use PostgreSQL::Test::Utils; @@ -101,8 +101,8 @@ sub check_query ], '>', \$stdout, '2>', \$stderr; - is($result, 1, "$test_name: psql exit code"); - is($stderr, '', "$test_name: psql no stderr"); + is($result, 1, "$test_name: psql exit code"); + is($stderr, '', "$test_name: psql no stderr"); is($stdout, $expected_stdout, "$test_name: query result matches"); return; @@ -111,7 +111,7 @@ sub check_query sub setup_cluster { my $extra_name = shift; # Used to differentiate clusters - my $extra = shift; # Extra params for initdb + my $extra = shift; # Extra params for initdb # Initialize primary, data checksums are mandatory $node_primary = @@ -123,8 +123,8 @@ sub setup_cluster # minimal permissions enough to rewind from an online source. $node_primary->init( allows_streaming => 1, - extra => $extra, - auth_extra => [ '--create-role', 'rewind_user' ]); + extra => $extra, + auth_extra => [ '--create-role', 'rewind_user' ]); # Set wal_keep_size to prevent WAL segment recycling after enforced # checkpoints in the tests. @@ -203,11 +203,11 @@ sub promote_standby sub run_pg_rewind { - my $test_mode = shift; - my $primary_pgdata = $node_primary->data_dir; - my $standby_pgdata = $node_standby->data_dir; + my $test_mode = shift; + my $primary_pgdata = $node_primary->data_dir; + my $standby_pgdata = $node_standby->data_dir; my $standby_connstr = $node_standby->connstr('postgres'); - my $tmp_folder = PostgreSQL::Test::Utils::tempdir; + my $tmp_folder = PostgreSQL::Test::Utils::tempdir; # Append the rewind-specific role to the connection string. $standby_connstr = "$standby_connstr user=rewind_user"; @@ -269,10 +269,10 @@ sub run_pg_rewind # recovery configuration automatically. command_ok( [ - 'pg_rewind', "--debug", - "--source-server", $standby_connstr, + 'pg_rewind', "--debug", + "--source-server", $standby_connstr, "--target-pgdata=$primary_pgdata", "--no-sync", - "--write-recovery-conf", "--config-file", + "--write-recovery-conf", "--config-file", "$tmp_folder/primary-postgresql.conf.tmp" ], 'pg_rewind remote'); diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c index 3d5e8f30ab..435df8d808 100644 --- a/src/bin/pg_test_fsync/pg_test_fsync.c +++ b/src/bin/pg_test_fsync/pg_test_fsync.c @@ -623,7 +623,7 @@ static void print_elapse(struct timeval start_t, struct timeval stop_t, int ops) { double total_time = (stop_t.tv_sec - start_t.tv_sec) + - (stop_t.tv_usec - start_t.tv_usec) * 0.000001; + (stop_t.tv_usec - start_t.tv_usec) * 0.000001; double per_second = ops / total_time; double avg_op_time_us = (total_time / ops) * USECS_SEC; diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 1ff68c5cc6..64024e3b9e 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -105,8 +105,8 @@ check_and_dump_old_cluster(bool live_check) check_for_isn_and_int8_passing_mismatch(&old_cluster); /* - * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk - * format for existing data. + * PG 16 increased the size of the 'aclitem' type, which breaks the + * on-disk format for existing data. */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500) check_for_aclitem_data_type_usage(&old_cluster); diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index 85ed15ae4a..a9988abfe1 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -61,9 +61,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, new_relnum < new_db->rel_arr.nrels) { RelInfo *old_rel = (old_relnum < old_db->rel_arr.nrels) ? - &old_db->rel_arr.rels[old_relnum] : NULL; + &old_db->rel_arr.rels[old_relnum] : NULL; RelInfo *new_rel = (new_relnum < new_db->rel_arr.nrels) ? - &new_db->rel_arr.rels[new_relnum] : NULL; + &new_db->rel_arr.rels[new_relnum] : NULL; /* handle running off one array before the other */ if (!new_rel) @@ -302,14 +302,14 @@ get_db_and_rel_infos(ClusterInfo *cluster) static void get_template0_info(ClusterInfo *cluster) { - PGconn *conn = connectToServer(cluster, "template1"); - DbLocaleInfo *locale; - PGresult *dbres; - int i_datencoding; - int i_datlocprovider; - int i_datcollate; - int i_datctype; - int i_daticulocale; + PGconn *conn = connectToServer(cluster, "template1"); + DbLocaleInfo *locale; + PGresult *dbres; + int i_datencoding; + int i_datlocprovider; + int i_datcollate; + int i_datctype; + int i_daticulocale; if (GET_MAJOR_VERSION(cluster->major_version) >= 1500) dbres = executeQueryOrDie(conn, diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index 75bab0a04c..4562dafcff 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -379,10 +379,10 @@ setup(char *argv0, bool *live_check) static void set_locale_and_encoding(void) { - PGconn *conn_new_template1; - char *datcollate_literal; - char *datctype_literal; - char *daticulocale_literal = NULL; + PGconn *conn_new_template1; + char *datcollate_literal; + char *datctype_literal; + char *daticulocale_literal = NULL; DbLocaleInfo *locale = old_cluster.template0; prep_status("Setting locale and encoding for new cluster"); diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl index 4a7895a756..41fce089d6 100644 --- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl +++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl @@ -4,7 +4,7 @@ use strict; use warnings; -use Cwd qw(abs_path); +use Cwd qw(abs_path); use File::Basename qw(dirname); use File::Compare; use File::Find qw(find); @@ -81,7 +81,7 @@ if ( (defined($ENV{olddump}) && !defined($ENV{oldinstall})) } # Paths to the dumps taken during the tests. -my $tempdir = PostgreSQL::Test::Utils::tempdir; +my $tempdir = PostgreSQL::Test::Utils::tempdir; my $dump1_file = "$tempdir/dump1.sql"; my $dump2_file = "$tempdir/dump2.sql"; @@ -108,7 +108,7 @@ if ($oldnode->pg_version >= 11) # can test that pg_upgrade copies the locale settings of template0 # from the old to the new cluster. -my $original_encoding = "6"; # UTF-8 +my $original_encoding = "6"; # UTF-8 my $original_provider = "c"; my $original_locale = "C"; my $original_iculocale = ""; @@ -138,11 +138,12 @@ $oldnode->start; my $result; $result = $oldnode->safe_psql( - 'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field + 'postgres', + "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field FROM pg_database WHERE datname='template0'"); -is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", - "check locales in original cluster" - ); +is( $result, + "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", + "check locales in original cluster"); # The default location of the source code is the root of this directory. my $srcdir = abs_path("../../.."); @@ -166,9 +167,9 @@ else # Create databases with names covering most ASCII bytes. The # first name exercises backslashes adjacent to double quotes, a # Windows special case. - generate_db($oldnode, 'regression\\"\\', 1, 45, '\\\\"\\\\\\'); - generate_db($oldnode, 'regression', 46, 90, ''); - generate_db($oldnode, 'regression', 91, 127, ''); + generate_db($oldnode, 'regression\\"\\', 1, 45, '\\\\"\\\\\\'); + generate_db($oldnode, 'regression', 46, 90, ''); + generate_db($oldnode, 'regression', 91, 127, ''); # Grab any regression options that may be passed down by caller. my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || ""; @@ -251,9 +252,9 @@ if (defined($ENV{oldinstall})) $newnode->command_ok( [ 'psql', '-X', - '-v', 'ON_ERROR_STOP=1', - '-c', $upcmds, - '-d', $oldnode->connstr($updb), + '-v', 'ON_ERROR_STOP=1', + '-c', $upcmds, + '-d', $oldnode->connstr($updb), ], "ran version adaptation commands for database $updb"); } @@ -263,7 +264,7 @@ if (defined($ENV{oldinstall})) # that we need to use pg_dumpall from the new node here. my @dump_command = ( 'pg_dumpall', '--no-sync', '-d', $oldnode->connstr('postgres'), - '-f', $dump1_file); + '-f', $dump1_file); # --extra-float-digits is needed when upgrading from a version older than 11. push(@dump_command, '--extra-float-digits', '0') if ($oldnode->pg_version < 12); @@ -330,15 +331,14 @@ $oldnode->stop; command_fails( [ 'pg_upgrade', '--no-sync', - '-d', $oldnode->data_dir, - '-D', $newnode->data_dir, - '-b', $oldbindir . '/does/not/exist/', - '-B', $newbindir, - '-s', $newnode->host, - '-p', $oldnode->port, - '-P', $newnode->port, - $mode, - '--check', + '-d', $oldnode->data_dir, + '-D', $newnode->data_dir, + '-b', $oldbindir . '/does/not/exist/', + '-B', $newbindir, + '-s', $newnode->host, + '-p', $oldnode->port, + '-P', $newnode->port, + $mode, '--check', ], 'run of pg_upgrade --check for new instance with incorrect binary path'); ok(-d $newnode->data_dir . "/pg_upgrade_output.d", @@ -348,12 +348,11 @@ rmtree($newnode->data_dir . "/pg_upgrade_output.d"); # --check command works here, cleans up pg_upgrade_output.d. command_ok( [ - 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir, - '-D', $newnode->data_dir, '-b', $oldbindir, - '-B', $newbindir, '-s', $newnode->host, - '-p', $oldnode->port, '-P', $newnode->port, - $mode, - '--check', + 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir, + '-D', $newnode->data_dir, '-b', $oldbindir, + '-B', $newbindir, '-s', $newnode->host, + '-p', $oldnode->port, '-P', $newnode->port, + $mode, '--check', ], 'run of pg_upgrade --check for new instance'); ok(!-d $newnode->data_dir . "/pg_upgrade_output.d", @@ -362,10 +361,10 @@ ok(!-d $newnode->data_dir . "/pg_upgrade_output.d", # Actual run, pg_upgrade_output.d is removed at the end. command_ok( [ - 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir, - '-D', $newnode->data_dir, '-b', $oldbindir, - '-B', $newbindir, '-s', $newnode->host, - '-p', $oldnode->port, '-P', $newnode->port, + 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir, + '-D', $newnode->data_dir, '-b', $oldbindir, + '-B', $newbindir, '-s', $newnode->host, + '-p', $oldnode->port, '-P', $newnode->port, $mode, ], 'run of pg_upgrade for new instance'); @@ -396,16 +395,17 @@ if (-d $log_path) # Test that upgraded cluster has original locale settings. $result = $newnode->safe_psql( - 'postgres', "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field + 'postgres', + "SELECT encoding, $provider_field, datcollate, datctype, $iculocale_field FROM pg_database WHERE datname='template0'"); -is($result, "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", - "check that locales in new cluster match original cluster" - ); +is( $result, + "$original_encoding|$original_provider|$original_locale|$original_locale|$original_iculocale", + "check that locales in new cluster match original cluster"); # Second dump from the upgraded instance. @dump_command = ( 'pg_dumpall', '--no-sync', '-d', $newnode->connstr('postgres'), - '-f', $dump2_file); + '-f', $dump2_file); # --extra-float-digits is needed when upgrading from a version older than 11. push(@dump_command, '--extra-float-digits', '0') if ($oldnode->pg_version < 12); diff --git a/src/bin/pg_verifybackup/t/002_algorithm.pl b/src/bin/pg_verifybackup/t/002_algorithm.pl index 87b8803a33..5b02ea4d55 100644 --- a/src/bin/pg_verifybackup/t/002_algorithm.pl +++ b/src/bin/pg_verifybackup/t/002_algorithm.pl @@ -17,7 +17,7 @@ $primary->start; for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512)) { my $backup_path = $primary->backup_dir . '/' . $algorithm; - my @backup = ( + my @backup = ( 'pg_basebackup', '-D', $backup_path, '--manifest-checksums', $algorithm, '--no-sync', '-cfast'); my @verify = ('pg_verifybackup', '-e', $backup_path); diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl index 0c304105c5..4cc3dd05e3 100644 --- a/src/bin/pg_verifybackup/t/003_corruption.pl +++ b/src/bin/pg_verifybackup/t/003_corruption.pl @@ -16,7 +16,7 @@ $primary->start; # Include a user-defined tablespace in the hopes of detecting problems in that # area. -my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short(); +my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short(); my $source_ts_prefix = $source_ts_path; $source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!; @@ -30,67 +30,67 @@ EOM my @scenario = ( { - 'name' => 'extra_file', + 'name' => 'extra_file', 'mutilate' => \&mutilate_extra_file, 'fails_like' => qr/extra_file.*present on disk but not in the manifest/ }, { - 'name' => 'extra_tablespace_file', + 'name' => 'extra_tablespace_file', 'mutilate' => \&mutilate_extra_tablespace_file, 'fails_like' => qr/extra_ts_file.*present on disk but not in the manifest/ }, { - 'name' => 'missing_file', + 'name' => 'missing_file', 'mutilate' => \&mutilate_missing_file, 'fails_like' => qr/pg_xact\/0000.*present in the manifest but not on disk/ }, { - 'name' => 'missing_tablespace', + 'name' => 'missing_tablespace', 'mutilate' => \&mutilate_missing_tablespace, 'fails_like' => qr/pg_tblspc.*present in the manifest but not on disk/ }, { - 'name' => 'append_to_file', - 'mutilate' => \&mutilate_append_to_file, + 'name' => 'append_to_file', + 'mutilate' => \&mutilate_append_to_file, 'fails_like' => qr/has size \d+ on disk but size \d+ in the manifest/ }, { - 'name' => 'truncate_file', - 'mutilate' => \&mutilate_truncate_file, + 'name' => 'truncate_file', + 'mutilate' => \&mutilate_truncate_file, 'fails_like' => qr/has size 0 on disk but size \d+ in the manifest/ }, { - 'name' => 'replace_file', - 'mutilate' => \&mutilate_replace_file, + 'name' => 'replace_file', + 'mutilate' => \&mutilate_replace_file, 'fails_like' => qr/checksum mismatch for file/ }, { - 'name' => 'bad_manifest', - 'mutilate' => \&mutilate_bad_manifest, + 'name' => 'bad_manifest', + 'mutilate' => \&mutilate_bad_manifest, 'fails_like' => qr/manifest checksum mismatch/ }, { - 'name' => 'open_file_fails', - 'mutilate' => \&mutilate_open_file_fails, - 'fails_like' => qr/could not open file/, + 'name' => 'open_file_fails', + 'mutilate' => \&mutilate_open_file_fails, + 'fails_like' => qr/could not open file/, 'skip_on_windows' => 1 }, { - 'name' => 'open_directory_fails', - 'mutilate' => \&mutilate_open_directory_fails, - 'cleanup' => \&cleanup_open_directory_fails, - 'fails_like' => qr/could not open directory/, + 'name' => 'open_directory_fails', + 'mutilate' => \&mutilate_open_directory_fails, + 'cleanup' => \&cleanup_open_directory_fails, + 'fails_like' => qr/could not open directory/, 'skip_on_windows' => 1 }, { - 'name' => 'search_directory_fails', - 'mutilate' => \&mutilate_search_directory_fails, - 'cleanup' => \&cleanup_search_directory_fails, - 'fails_like' => qr/could not stat file or directory/, + 'name' => 'search_directory_fails', + 'mutilate' => \&mutilate_search_directory_fails, + 'cleanup' => \&cleanup_search_directory_fails, + 'fails_like' => qr/could not stat file or directory/, 'skip_on_windows' => 1 }); @@ -104,7 +104,7 @@ for my $scenario (@scenario) if $scenario->{'skip_on_windows'} && $windows_os; # Take a backup and check that it verifies OK. - my $backup_path = $primary->backup_dir . '/' . $name; + my $backup_path = $primary->backup_dir . '/' . $name; my $backup_ts_path = PostgreSQL::Test::Utils::tempdir_short(); # The tablespace map parameter confuses Msys2, which tries to mangle # it. Tell it not to. @@ -228,8 +228,8 @@ sub mutilate_truncate_file sub mutilate_replace_file { my ($backup_path) = @_; - my $pathname = "$backup_path/PG_VERSION"; - my $contents = slurp_file($pathname); + my $pathname = "$backup_path/PG_VERSION"; + my $contents = slurp_file($pathname); open(my $fh, '>', $pathname) || die "open $pathname: $!"; print $fh 'q' x length($contents); close($fh); diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl index 591a6b36be..2aa8352f00 100644 --- a/src/bin/pg_verifybackup/t/004_options.pl +++ b/src/bin/pg_verifybackup/t/004_options.pl @@ -108,7 +108,7 @@ unlike( # Test valid manifest with nonexistent backup directory. command_fails_like( [ - 'pg_verifybackup', '-m', + 'pg_verifybackup', '-m', "$backup_path/backup_manifest", "$backup_path/fake" ], qr/could not open directory/, diff --git a/src/bin/pg_verifybackup/t/006_encoding.pl b/src/bin/pg_verifybackup/t/006_encoding.pl index 4cbd1a6051..0b37bda20c 100644 --- a/src/bin/pg_verifybackup/t/006_encoding.pl +++ b/src/bin/pg_verifybackup/t/006_encoding.pl @@ -16,8 +16,8 @@ my $backup_path = $primary->backup_dir . '/test_encoding'; $primary->command_ok( [ 'pg_basebackup', '-D', - $backup_path, '--no-sync', - '-cfast', '--manifest-force-encode' + $backup_path, '--no-sync', + '-cfast', '--manifest-force-encode' ], "backup ok with forced hex encoding"); diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl index 34ca877d10..89f96f85db 100644 --- a/src/bin/pg_verifybackup/t/007_wal.pl +++ b/src/bin/pg_verifybackup/t/007_wal.pl @@ -19,7 +19,7 @@ $primary->command_ok( "base backup ok"); # Rename pg_wal. -my $original_pg_wal = $backup_path . '/pg_wal'; +my $original_pg_wal = $backup_path . '/pg_wal'; my $relocated_pg_wal = $primary->backup_dir . '/relocated_pg_wal'; rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!"; @@ -46,7 +46,7 @@ my @walfiles = grep { /^[0-9A-F]{24}$/ } slurp_dir($original_pg_wal); # Replace the contents of one of the files with garbage of equal length. my $wal_corruption_target = $original_pg_wal . '/' . $walfiles[0]; -my $wal_size = -s $wal_corruption_target; +my $wal_size = -s $wal_corruption_target; open(my $fh, '>', $wal_corruption_target) || die "open $wal_corruption_target: $!"; print $fh 'w' x $wal_size; diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl index 05754bc8ec..1a783d1188 100644 --- a/src/bin/pg_verifybackup/t/008_untar.pl +++ b/src/bin/pg_verifybackup/t/008_untar.pl @@ -16,47 +16,47 @@ my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->start; -my $backup_path = $primary->backup_dir . '/server-backup'; +my $backup_path = $primary->backup_dir . '/server-backup'; my $extract_path = $primary->backup_dir . '/extracted-backup'; my @test_configuration = ( { 'compression_method' => 'none', - 'backup_flags' => [], - 'backup_archive' => 'base.tar', - 'enabled' => 1 + 'backup_flags' => [], + 'backup_archive' => 'base.tar', + 'enabled' => 1 }, { 'compression_method' => 'gzip', - 'backup_flags' => [ '--compress', 'server-gzip' ], - 'backup_archive' => 'base.tar.gz', + 'backup_flags' => [ '--compress', 'server-gzip' ], + 'backup_archive' => 'base.tar.gz', 'decompress_program' => $ENV{'GZIP_PROGRAM'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define HAVE_LIBZ 1") + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define HAVE_LIBZ 1") }, { 'compression_method' => 'lz4', - 'backup_flags' => [ '--compress', 'server-lz4' ], - 'backup_archive' => 'base.tar.lz4', + 'backup_flags' => [ '--compress', 'server-lz4' ], + 'backup_archive' => 'base.tar.lz4', 'decompress_program' => $ENV{'LZ4'}, - 'decompress_flags' => [ '-d', '-m' ], - 'enabled' => check_pg_config("#define USE_LZ4 1") + 'decompress_flags' => [ '-d', '-m' ], + 'enabled' => check_pg_config("#define USE_LZ4 1") }, { 'compression_method' => 'zstd', - 'backup_flags' => [ '--compress', 'server-zstd' ], - 'backup_archive' => 'base.tar.zst', + 'backup_flags' => [ '--compress', 'server-zstd' ], + 'backup_archive' => 'base.tar.zst', 'decompress_program' => $ENV{'ZSTD'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define USE_ZSTD 1") + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define USE_ZSTD 1") }, { 'compression_method' => 'zstd', - 'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ], - 'backup_archive' => 'base.tar.zst', + 'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ], + 'backup_archive' => 'base.tar.zst', 'decompress_program' => $ENV{'ZSTD'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define USE_ZSTD 1") + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define USE_ZSTD 1") }); for my $tc (@test_configuration) @@ -74,8 +74,8 @@ for my $tc (@test_configuration) # Take a server-side backup. my @backup = ( - 'pg_basebackup', '--no-sync', - '-cfast', '--target', + 'pg_basebackup', '--no-sync', + '-cfast', '--target', "server:$backup_path", '-Xfetch'); push @backup, @{ $tc->{'backup_flags'} }; $primary->command_ok(\@backup, diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl index d26064b002..f4d5378555 100644 --- a/src/bin/pg_verifybackup/t/009_extract.pl +++ b/src/bin/pg_verifybackup/t/009_extract.pl @@ -17,28 +17,28 @@ $primary->start; my @test_configuration = ( { 'compression_method' => 'none', - 'backup_flags' => [], - 'enabled' => 1 + 'backup_flags' => [], + 'enabled' => 1 }, { 'compression_method' => 'gzip', - 'backup_flags' => [ '--compress', 'server-gzip:5' ], - 'enabled' => check_pg_config("#define HAVE_LIBZ 1") + 'backup_flags' => [ '--compress', 'server-gzip:5' ], + 'enabled' => check_pg_config("#define HAVE_LIBZ 1") }, { 'compression_method' => 'lz4', - 'backup_flags' => [ '--compress', 'server-lz4:5' ], - 'enabled' => check_pg_config("#define USE_LZ4 1") + 'backup_flags' => [ '--compress', 'server-lz4:5' ], + 'enabled' => check_pg_config("#define USE_LZ4 1") }, { 'compression_method' => 'zstd', - 'backup_flags' => [ '--compress', 'server-zstd:5' ], - 'enabled' => check_pg_config("#define USE_ZSTD 1") + 'backup_flags' => [ '--compress', 'server-zstd:5' ], + 'enabled' => check_pg_config("#define USE_ZSTD 1") }, { 'compression_method' => 'parallel zstd', - 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ], - 'enabled' => check_pg_config("#define USE_ZSTD 1"), + 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ], + 'enabled' => check_pg_config("#define USE_ZSTD 1"), 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/ }); @@ -46,7 +46,7 @@ my @test_configuration = ( for my $tc (@test_configuration) { my $backup_path = $primary->backup_dir . '/' . 'extract_backup'; - my $method = $tc->{'compression_method'}; + my $method = $tc->{'compression_method'}; SKIP: { diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl index ac51a174d1..44d83e777f 100644 --- a/src/bin/pg_verifybackup/t/010_client_untar.pl +++ b/src/bin/pg_verifybackup/t/010_client_untar.pl @@ -15,56 +15,56 @@ my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->start; -my $backup_path = $primary->backup_dir . '/client-backup'; +my $backup_path = $primary->backup_dir . '/client-backup'; my $extract_path = $primary->backup_dir . '/extracted-backup'; my @test_configuration = ( { 'compression_method' => 'none', - 'backup_flags' => [], - 'backup_archive' => 'base.tar', - 'enabled' => 1 + 'backup_flags' => [], + 'backup_archive' => 'base.tar', + 'enabled' => 1 }, { 'compression_method' => 'gzip', - 'backup_flags' => [ '--compress', 'client-gzip:5' ], - 'backup_archive' => 'base.tar.gz', + 'backup_flags' => [ '--compress', 'client-gzip:5' ], + 'backup_archive' => 'base.tar.gz', 'decompress_program' => $ENV{'GZIP_PROGRAM'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define HAVE_LIBZ 1") + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define HAVE_LIBZ 1") }, { 'compression_method' => 'lz4', - 'backup_flags' => [ '--compress', 'client-lz4:5' ], - 'backup_archive' => 'base.tar.lz4', + 'backup_flags' => [ '--compress', 'client-lz4:5' ], + 'backup_archive' => 'base.tar.lz4', 'decompress_program' => $ENV{'LZ4'}, - 'decompress_flags' => ['-d'], - 'output_file' => 'base.tar', - 'enabled' => check_pg_config("#define USE_LZ4 1") + 'decompress_flags' => ['-d'], + 'output_file' => 'base.tar', + 'enabled' => check_pg_config("#define USE_LZ4 1") }, { 'compression_method' => 'zstd', - 'backup_flags' => [ '--compress', 'client-zstd:5' ], - 'backup_archive' => 'base.tar.zst', - 'decompress_program' => $ENV{'ZSTD'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define USE_ZSTD 1") - }, - { - 'compression_method' => 'zstd', - 'backup_flags' => ['--compress', 'client-zstd:level=1,long'], + 'backup_flags' => [ '--compress', 'client-zstd:5' ], 'backup_archive' => 'base.tar.zst', 'decompress_program' => $ENV{'ZSTD'}, - 'decompress_flags' => [ '-d' ], + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define USE_ZSTD 1") + }, + { + 'compression_method' => 'zstd', + 'backup_flags' => [ '--compress', 'client-zstd:level=1,long' ], + 'backup_archive' => 'base.tar.zst', + 'decompress_program' => $ENV{'ZSTD'}, + 'decompress_flags' => ['-d'], 'enabled' => check_pg_config("#define USE_ZSTD 1") }, { 'compression_method' => 'parallel zstd', - 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ], - 'backup_archive' => 'base.tar.zst', + 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ], + 'backup_archive' => 'base.tar.zst', 'decompress_program' => $ENV{'ZSTD'}, - 'decompress_flags' => ['-d'], - 'enabled' => check_pg_config("#define USE_ZSTD 1"), + 'decompress_flags' => ['-d'], + 'enabled' => check_pg_config("#define USE_ZSTD 1"), 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/ }); diff --git a/src/bin/pg_waldump/t/002_save_fullpage.pl b/src/bin/pg_waldump/t/002_save_fullpage.pl index 18a89a26f8..831ffdefef 100644 --- a/src/bin/pg_waldump/t/002_save_fullpage.pl +++ b/src/bin/pg_waldump/t/002_save_fullpage.pl @@ -14,7 +14,7 @@ my ($blocksize, $walfile_name); # Function to extract the LSN from the given block structure sub get_block_lsn { - my $path = shift; + my $path = shift; my $blocksize = shift; my $block; @@ -64,16 +64,16 @@ my $relation = $node->safe_psql( datname = current_database()} ); -my $walfile = $node->data_dir . '/pg_wal/' . $walfile_name; +my $walfile = $node->data_dir . '/pg_wal/' . $walfile_name; my $tmp_folder = PostgreSQL::Test::Utils::tempdir; ok(-f $walfile, "Got a WAL file"); $node->command_ok( [ - 'pg_waldump', '--quiet', + 'pg_waldump', '--quiet', '--save-fullpage', "$tmp_folder/raw", - '--relation', $relation, + '--relation', $relation, $walfile ], 'pg_waldump with --save-fullpage runs'); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 70ed034e70..7dbb2ed6a7 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -4621,7 +4621,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now, double latency = 0.0, lag = 0.0; bool detailed = progress || throttle_delay || latency_limit || - use_log || per_script_stats; + use_log || per_script_stats; if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR) { @@ -6400,7 +6400,7 @@ printResults(StatsData *total, StatsData *sstats = &sql_script[i].stats; int64 script_failures = getFailures(sstats); int64 script_total_cnt = - sstats->cnt + sstats->skipped + script_failures; + sstats->cnt + sstats->skipped + script_failures; printf("SQL script %d: %s\n" " - weight: %d (targets %.1f%% of total)\n" diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index 99273203f0..363a1ffabd 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -140,7 +140,7 @@ $node->pgbench( qr{mode: prepared} ], [ - qr{vacuum}, qr{client 0}, qr{client 1}, qr{sending}, + qr{vacuum}, qr{client 0}, qr{client 1}, qr{sending}, qr{receiving}, qr{executing} ], 'pgbench select only'); @@ -233,7 +233,7 @@ COMMIT; # 1. Logging neither with errors nor with statements $node->append_conf('postgresql.conf', - "log_min_duration_statement = 0\n" + "log_min_duration_statement = 0\n" . "log_parameter_max_length = 0\n" . "log_parameter_max_length_on_error = 0"); $node->reload; @@ -261,7 +261,7 @@ $log = undef; # 2. Logging truncated parameters on error, full with statements $node->append_conf('postgresql.conf', - "log_parameter_max_length = -1\n" + "log_parameter_max_length = -1\n" . "log_parameter_max_length_on_error = 64"); $node->reload; $node->pgbench( @@ -302,7 +302,7 @@ $log = undef; # 3. Logging full parameters on error, truncated with statements $node->append_conf('postgresql.conf', - "log_min_duration_statement = -1\n" + "log_min_duration_statement = -1\n" . "log_parameter_max_length = 7\n" . "log_parameter_max_length_on_error = -1"); $node->reload; @@ -363,7 +363,7 @@ select :value1::smallint, :value2::smallint; # Restore default logging config $node->append_conf('postgresql.conf', - "log_min_duration_statement = -1\n" + "log_min_duration_statement = -1\n" . "log_parameter_max_length_on_error = 0\n" . "log_parameter_max_length = -1"); $node->reload; @@ -438,7 +438,7 @@ $node->pgbench( qr{command=98.: int 5432\b}, # :random_seed qr{command=99.: int -9223372036854775808\b}, # min int qr{command=100.: int 9223372036854775807\b}, # max int - # pseudorandom permutation tests + # pseudorandom permutation tests qr{command=101.: boolean true\b}, qr{command=102.: boolean true\b}, qr{command=103.: boolean true\b}, @@ -640,7 +640,7 @@ my ($ret, $out, $err) = $node->psql('postgres', 'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val' ); -ok($ret == 0, "psql seeded_random count ok"); +ok($ret == 0, "psql seeded_random count ok"); ok($err eq '', "psql seeded_random count stderr is empty"); ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/, "psql seeded_random count uniform"); @@ -734,7 +734,7 @@ SELECT 5432 AS fail UNION SELECT 5433 ORDER BY 1 \gset $node->pgbench( '-t 1', 0, [ qr{type: .*/001_pgbench_aset}, qr{processed: 1/1} ], - [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ], + [ qr{command=3.: int 8\b}, qr{command=4.: int 7\b} ], 'pgbench aset command', { '001_pgbench_aset' => q{ @@ -886,7 +886,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} # SHELL [ - 'shell bad command', 2, + 'shell bad command', 2, [qr{\(shell\) .* meta-command failed}], q{\shell no-such-command} ], [ @@ -905,11 +905,11 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} # SET [ - 'set syntax error', 1, + 'set syntax error', 1, [qr{syntax error in command "set"}], q{\set i 1 +} ], [ - 'set no such function', 1, + 'set no such function', 1, [qr{unexpected function name}], q{\set i noSuchFunction()} ], [ @@ -931,11 +931,11 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} q{\set i least(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)} ], [ - 'set empty random range', 2, + 'set empty random range', 2, [qr{empty range given to random}], q{\set i random(5,3)} ], [ - 'set random range too large', 2, + 'set random range too large', 2, [qr{random range is too large}], q{\set i random(:minint, :maxint)} ], [ @@ -963,21 +963,21 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} q{\set i random_zipfian(0, 10, 1000000)} ], [ - 'set non numeric value', 2, + 'set non numeric value', 2, [qr{malformed variable "foo" value: "bla"}], q{\set i :foo + 1} ], - [ 'set no expression', 1, [qr{syntax error}], q{\set i} ], + [ 'set no expression', 1, [qr{syntax error}], q{\set i} ], [ 'set missing argument', 1, [qr{missing argument}i], q{\set} ], [ - 'set not a bool', 2, + 'set not a bool', 2, [qr{cannot coerce double to boolean}], q{\set b NOT 0.0} ], [ - 'set not an int', 2, + 'set not an int', 2, [qr{cannot coerce boolean to int}], q{\set i TRUE + 2} ], [ - 'set not a double', 2, + 'set not a double', 2, [qr{cannot coerce boolean to double}], q{\set d ln(TRUE)} ], [ @@ -987,26 +987,26 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} q{\set i CASE TRUE THEN 1 ELSE 0 END} ], [ - 'set random error', 2, + 'set random error', 2, [qr{cannot coerce boolean to int}], q{\set b random(FALSE, TRUE)} ], [ - 'set number of args mismatch', 1, + 'set number of args mismatch', 1, [qr{unexpected number of arguments}], q{\set d ln(1.0, 2.0))} ], [ - 'set at least one arg', 1, + 'set at least one arg', 1, [qr{at least one argument expected}], q{\set i greatest())} ], # SET: ARITHMETIC OVERFLOW DETECTION [ - 'set double to int overflow', 2, + 'set double to int overflow', 2, [qr{double to int overflow for 100}], q{\set i int(1E32)} ], [ 'set bigint add overflow', 2, - [qr{int add out}], q{\set i (1<<62) + (1<<62)} + [qr{int add out}], q{\set i (1<<62) + (1<<62)} ], [ 'set bigint sub overflow', @@ -1023,22 +1023,22 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} # SETSHELL [ - 'setshell not an int', 2, + 'setshell not an int', 2, [qr{command must return an integer}], q{\setshell i echo -n one} ], [ 'setshell missing arg', 1, [qr{missing argument }], q{\setshell var} ], [ - 'setshell no such command', 2, + 'setshell no such command', 2, [qr{could not read result }], q{\setshell var no-such-command} ], # SLEEP [ - 'sleep undefined variable', 2, + 'sleep undefined variable', 2, [qr{sleep: undefined variable}], q{\sleep :nosuchvariable} ], [ - 'sleep too many args', 1, + 'sleep too many args', 1, [qr{too many arguments}], q{\sleep too many args} ], [ @@ -1046,18 +1046,18 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} [ qr{missing argument}, qr{\\sleep} ], q{\sleep} ], [ - 'sleep unknown unit', 1, + 'sleep unknown unit', 1, [qr{unrecognized time unit}], q{\sleep 1 week} ], # MISC [ - 'misc invalid backslash command', 1, + 'misc invalid backslash command', 1, [qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand} ], [ 'misc empty script', 1, [qr{empty command list for script}], q{} ], [ - 'bad boolean', 2, + 'bad boolean', 2, [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ], [ @@ -1069,21 +1069,21 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} # GSET [ - 'gset no row', 2, + 'gset no row', 2, [qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset} ], [ 'gset alone', 1, [qr{gset must follow an SQL command}], q{\gset} ], [ - 'gset no SQL', 1, + 'gset no SQL', 1, [qr{gset must follow an SQL command}], q{\set i +1 \gset} ], [ 'gset too many arguments', 1, - [qr{too many arguments}], q{SELECT 1 \gset a b} + [qr{too many arguments}], q{SELECT 1 \gset a b} ], [ - 'gset after gset', 1, + 'gset after gset', 1, [qr{gset must follow an SQL command}], q{SELECT 1 AS i \gset \gset} ], @@ -1094,7 +1094,7 @@ SELECT LEAST(} . join(', ', (':i') x 256) . q{)} q{DROP TABLE IF EXISTS no_such_table \gset} ], [ - 'gset bad default name', 2, + 'gset bad default name', 2, [qr{error storing into variable \?column\?}], q{SELECT 1 \gset} ], [ @@ -1234,7 +1234,7 @@ $node->pgbench( # Test the concurrent update in the table row and deadlocks. $node->safe_psql('postgres', - 'CREATE UNLOGGED TABLE first_client_table (value integer); ' + 'CREATE UNLOGGED TABLE first_client_table (value integer); ' . 'CREATE UNLOGGED TABLE xy (x integer, y integer); ' . 'INSERT INTO xy VALUES (1, 2);'); @@ -1245,7 +1245,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=repeatable\\ read"; # Check that we have a serialization error and the same random value of the # delta variable in the next try my $err_pattern = - "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*" + "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*" . "client \\2 got an error in command 3 \\(SQL\\) of script 0; " . "ERROR: could not serialize access due to concurrent update\\b.*" . "\\1"; @@ -1331,7 +1331,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=read\\ committed"; # Check that we have a deadlock error $err_pattern = - "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; " + "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; " . "ERROR: deadlock detected\\b"; $node->pgbench( diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl index d59d2ab6da..0ec54fbb03 100644 --- a/src/bin/pgbench/t/002_pgbench_no_server.pl +++ b/src/bin/pgbench/t/002_pgbench_no_server.pl @@ -128,7 +128,7 @@ my @options = ( 'invalid progress', '--progress=0', [qr{-P/--progress must be in range}] ], - [ 'invalid rate', '--rate=0.0', [qr{invalid rate limit}] ], + [ 'invalid rate', '--rate=0.0', [qr{invalid rate limit}] ], [ 'invalid latency', '--latency-limit=0.0', [qr{invalid latency limit}] ], [ 'invalid sampling rate', '--sampling-rate=0', @@ -144,7 +144,7 @@ my @options = ( '-b se@0 -b si@0 -b tpcb@0', [qr{weight must not be zero}] ], - [ 'init vs run', '-i -S', [qr{cannot be used in initialization}] ], + [ 'init vs run', '-i -S', [qr{cannot be used in initialization}] ], [ 'run vs init', '-S -F 90', [qr{cannot be used in benchmarking}] ], [ 'ambiguous builtin', '-b s', [qr{ambiguous}] ], [ @@ -257,7 +257,7 @@ pgbench( [qr{^$}], [ qr{Available builtin scripts:}, qr{tpcb-like}, - qr{simple-update}, qr{select-only} + qr{simple-update}, qr{select-only} ], 'pgbench builtin list'); @@ -268,7 +268,7 @@ pgbench( [qr{^$}], [ qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE}, - qr{(?!UPDATE)}, qr{(?!INSERT)} + qr{(?!UPDATE)}, qr{(?!INSERT)} ], 'pgbench builtin listing'); diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index ab3f4e4920..511debbe81 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -4511,7 +4511,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet) /* header line width in expanded mode */ else if (strcmp(param, "xheader_width") == 0) { - if (! value) + if (!value) ; else if (pg_strcasecmp(value, "full") == 0) popt->topt.expanded_header_width_type = PRINT_XHEADER_FULL; @@ -5063,15 +5063,16 @@ pset_value_string(const char *param, printQueryOpt *popt) else if (strcmp(param, "xheader_width") == 0) { if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL) - return(pstrdup("full")); + return pstrdup("full"); else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN) - return(pstrdup("column")); + return pstrdup("column"); else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE) - return(pstrdup("page")); + return pstrdup("page"); else { /* must be PRINT_XHEADER_EXACT_WIDTH */ - char wbuff[32]; + char wbuff[32]; + snprintf(wbuff, sizeof(wbuff), "%d", popt->topt.expanded_header_exact_width); return pstrdup(wbuff); diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index c0e6e8e6ed..5973df2e39 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -1432,7 +1432,7 @@ ExecQueryAndProcessResults(const char *query, INSTR_TIME_SET_ZERO(before); if (pset.bind_flag) - success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char * const *) pset.bind_params, NULL, NULL, 0); + success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char *const *) pset.bind_params, NULL, NULL, 0); else success = PQsendQuery(pset.db, query); diff --git a/src/bin/psql/create_help.pl b/src/bin/psql/create_help.pl index 1d5366db16..0809db4151 100644 --- a/src/bin/psql/create_help.pl +++ b/src/bin/psql/create_help.pl @@ -23,18 +23,18 @@ use strict; use warnings; use Getopt::Long; -my $docdir = ''; -my $outdir = '.'; -my $depfile = ''; +my $docdir = ''; +my $outdir = '.'; +my $depfile = ''; my $hfilebasename = ''; GetOptions( - 'docdir=s' => \$docdir, - 'outdir=s' => \$outdir, + 'docdir=s' => \$docdir, + 'outdir=s' => \$outdir, 'basename=s' => \$hfilebasename, - 'depfile=s' => \$depfile,) or die "$0: wrong arguments"; + 'depfile=s' => \$depfile,) or die "$0: wrong arguments"; -$docdir or die "$0: missing required argument: docdir\n"; +$docdir or die "$0: missing required argument: docdir\n"; $hfilebasename or die "$0: missing required argument: basename\n"; my $hfile = $hfilebasename . '.h'; @@ -163,11 +163,11 @@ foreach my $file (sort readdir $dh) foreach my $cmdname (@cmdnames) { $entries{$cmdname} = { - cmdid => $cmdid, - cmddesc => $cmddesc, + cmdid => $cmdid, + cmddesc => $cmddesc, cmdsynopsis => $cmdsynopsis, - params => \@params, - nl_count => $nl_count + params => \@params, + nl_count => $nl_count }; $maxlen = ($maxlen >= length $cmdname) ? $maxlen : length $cmdname; @@ -182,7 +182,7 @@ foreach my $file (sort readdir $dh) foreach (sort keys %entries) { my $prefix = "\t" x 5 . ' '; - my $id = $_; + my $id = $_; $id =~ s/ /_/g; my $synopsis = "\"$entries{$_}{cmdsynopsis}\""; $synopsis =~ s/\\n/\\n"\n$prefix"/g; diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c index 67fcdb49dd..e1ad0e61d9 100644 --- a/src/bin/psql/crosstabview.c +++ b/src/bin/psql/crosstabview.c @@ -532,7 +532,7 @@ avlInsertNode(avl_tree *tree, avl_node **node, pivot_field field) if (current == tree->end) { avl_node *new_node = (avl_node *) - pg_malloc(sizeof(avl_node)); + pg_malloc(sizeof(avl_node)); new_node->height = 1; new_node->field = field; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index ab4279ed58..9325a46b8f 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -1160,8 +1160,8 @@ permissionsList(const char *pattern, bool showSystem) return true; error_return: - termPQExpBuffer(&buf); - return false; + termPQExpBuffer(&buf); + return false; } diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index 73d4b393bc..1106954236 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -96,7 +96,8 @@ typedef struct _psqlSettings char *gset_prefix; /* one-shot prefix argument for \gset */ bool gdesc_flag; /* one-shot request to describe query result */ bool gexec_flag; /* one-shot request to execute query result */ - bool bind_flag; /* one-shot request to use extended query protocol */ + bool bind_flag; /* one-shot request to use extended query + * protocol */ int bind_nparams; /* number of parameters */ char **bind_params; /* parameters for extended query protocol call */ bool crosstab_flag; /* one-shot request to crosstab result */ diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl index 596746de17..9ac27db212 100644 --- a/src/bin/psql/t/001_basic.pl +++ b/src/bin/psql/t/001_basic.pl @@ -22,7 +22,7 @@ sub psql_like my ($ret, $stdout, $stderr) = $node->psql('postgres', $sql); - is($ret, 0, "$test_name: exit code 0"); + is($ret, 0, "$test_name: exit code 0"); is($stderr, '', "$test_name: no stderr"); like($stdout, $expected_stdout, "$test_name: matches"); @@ -69,9 +69,9 @@ max_wal_senders = 4 }); $node->start; -psql_like($node, '\copyright', qr/Copyright/, '\copyright'); -psql_like($node, '\help', qr/ALTER/, '\help without arguments'); -psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument'); +psql_like($node, '\copyright', qr/Copyright/, '\copyright'); +psql_like($node, '\help', qr/ALTER/, '\help without arguments'); +psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument'); # Test clean handling of unsupported replication command responses psql_fails_like( @@ -132,7 +132,7 @@ NOTIFY foo, 'bar';", # test behavior and output on server crash my ($ret, $out, $err) = $node->psql('postgres', - "SELECT 'before' AS running;\n" + "SELECT 'before' AS running;\n" . "SELECT pg_terminate_backend(pg_backend_pid());\n" . "SELECT 'AFTER' AS not_running;\n"); @@ -216,9 +216,9 @@ $node->safe_psql('postgres', "CREATE TABLE tab_psql_single (a int);"); # Tests with ON_ERROR_STOP. $node->command_ok( [ - 'psql', '-X', - '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-c', + 'psql', '-X', + '--single-transaction', '-v', + 'ON_ERROR_STOP=1', '-c', 'INSERT INTO tab_psql_single VALUES (1)', '-c', 'INSERT INTO tab_psql_single VALUES (2)' ], @@ -231,9 +231,9 @@ is($row_count, '2', $node->command_fails( [ - 'psql', '-X', - '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-c', + 'psql', '-X', + '--single-transaction', '-v', + 'ON_ERROR_STOP=1', '-c', 'INSERT INTO tab_psql_single VALUES (3)', '-c', "\\copy tab_psql_single FROM '$tempdir/nonexistent'" ], @@ -245,15 +245,15 @@ is($row_count, '2', ); # Tests mixing files and commands. -my $copy_sql_file = "$tempdir/tab_copy.sql"; +my $copy_sql_file = "$tempdir/tab_copy.sql"; my $insert_sql_file = "$tempdir/tab_insert.sql"; append_to_file($copy_sql_file, "\\copy tab_psql_single FROM '$tempdir/nonexistent';"); append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);'); $node->command_ok( [ - 'psql', '-X', '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', + 'psql', '-X', '--single-transaction', '-v', + 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', $insert_sql_file ], 'ON_ERROR_STOP, --single-transaction and multiple -f switches'); @@ -265,8 +265,8 @@ is($row_count, '4', $node->command_fails( [ - 'psql', '-X', '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', + 'psql', '-X', '--single-transaction', '-v', + 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', $copy_sql_file ], 'ON_ERROR_STOP, --single-transaction and multiple -f switches, error'); @@ -281,10 +281,10 @@ is($row_count, '4', # transaction commits. $node->command_fails( [ - 'psql', '-X', + 'psql', '-X', '--single-transaction', '-f', - $insert_sql_file, '-f', - $insert_sql_file, '-c', + $insert_sql_file, '-f', + $insert_sql_file, '-c', "\\copy tab_psql_single FROM '$tempdir/nonexistent'" ], 'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches'); @@ -298,8 +298,8 @@ is($row_count, '6', # returns a success and the transaction commits. $node->command_ok( [ - 'psql', '-X', '--single-transaction', '-f', - $insert_sql_file, '-f', $insert_sql_file, '-f', + 'psql', '-X', '--single-transaction', '-f', + $insert_sql_file, '-f', $insert_sql_file, '-f', $copy_sql_file ], 'no ON_ERROR_STOP, --single-transaction and multiple -f switches'); @@ -313,10 +313,10 @@ is($row_count, '8', # the transaction commit even if there is a failure in-between. $node->command_ok( [ - 'psql', '-X', - '--single-transaction', '-c', + 'psql', '-X', + '--single-transaction', '-c', 'INSERT INTO tab_psql_single VALUES (5)', '-f', - $copy_sql_file, '-c', + $copy_sql_file, '-c', 'INSERT INTO tab_psql_single VALUES (6)' ], 'no ON_ERROR_STOP, --single-transaction and multiple -c switches'); @@ -348,16 +348,12 @@ psql_like( qr/1\|value\|2022-07-04 00:00:00 2|test|2022-07-03 00:00:00 3|test|2022-07-05 00:00:00/, - '\copy from with DEFAULT' -); + '\copy from with DEFAULT'); # Check \watch # Note: the interval value is parsed with locale-aware strtod() -psql_like( - $node, - sprintf('SELECT 1 \watch c=3 i=%g', 0.01), - qr/1\n1\n1/, - '\watch with 3 iterations'); +psql_like($node, sprintf('SELECT 1 \watch c=3 i=%g', 0.01), + qr/1\n1\n1/, '\watch with 3 iterations'); # Check \watch errors psql_fails_like( diff --git a/src/bin/psql/t/010_tab_completion.pl b/src/bin/psql/t/010_tab_completion.pl index 576b81958e..4cd0fa4680 100644 --- a/src/bin/psql/t/010_tab_completion.pl +++ b/src/bin/psql/t/010_tab_completion.pl @@ -39,7 +39,7 @@ $node->start; # set up a few database objects $node->safe_psql('postgres', - "CREATE TABLE tab1 (c1 int primary key, c2 text);\n" + "CREATE TABLE tab1 (c1 int primary key, c2 text);\n" . "CREATE TABLE mytab123 (f1 int, f2 text);\n" . "CREATE TABLE mytab246 (f1 int, f2 text);\n" . "CREATE TABLE \"mixedName\" (f1 int, f2 text);\n" @@ -71,7 +71,8 @@ delete $ENV{LS_COLORS}; # completion tests is too variable. if ($ENV{TESTDATADIR}) { - chdir $ENV{TESTDATADIR} or die "could not chdir to \"$ENV{TESTDATADIR}\": $!"; + chdir $ENV{TESTDATADIR} + or die "could not chdir to \"$ENV{TESTDATADIR}\": $!"; } # Create some junk files for filename completion testing. @@ -331,8 +332,8 @@ clear_line(); # check completion of a keyword offered in addition to object names; # such a keyword should obey COMP_KEYWORD_CASE foreach ( - [ 'lower', 'CO', 'column' ], - [ 'upper', 'co', 'COLUMN' ], + [ 'lower', 'CO', 'column' ], + [ 'upper', 'co', 'COLUMN' ], [ 'preserve-lower', 'co', 'column' ], [ 'preserve-upper', 'CO', 'COLUMN' ],) { diff --git a/src/bin/psql/t/020_cancel.pl b/src/bin/psql/t/020_cancel.pl index 12dc6cf429..0765d82b92 100644 --- a/src/bin/psql/t/020_cancel.pl +++ b/src/bin/psql/t/020_cancel.pl @@ -43,7 +43,7 @@ SKIP: # Get the PID $stdout = ''; $stderr = ''; - $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n"; + $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n"; pump $h while length $stdin; my $count; my $psql_pid; diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl index af3b1492e3..d0830a4a1d 100644 --- a/src/bin/scripts/t/020_createdb.pl +++ b/src/bin/scripts/t/020_createdb.pl @@ -39,9 +39,11 @@ if ($ENV{with_icu} eq 'yes') $node->issues_sql_like( [ - 'createdb', '-T', - 'template0', '-E', 'UTF8', '--locale-provider=icu', - '--locale=C', '--icu-locale=en', 'foobar5' + 'createdb', '-T', + 'template0', '-E', + 'UTF8', '--locale-provider=icu', + '--locale=C', '--icu-locale=en', + 'foobar5' ], qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/, 'create database with ICU locale specified'); @@ -56,8 +58,8 @@ if ($ENV{with_icu} eq 'yes') $node->command_fails_like( [ - 'createdb', '-T', - 'template0', '--locale-provider=icu', + 'createdb', '-T', + 'template0', '--locale-provider=icu', '--encoding=SQL_ASCII', 'foobarX' ], qr/ERROR: encoding "SQL_ASCII" is not supported with ICU provider/, @@ -65,16 +67,25 @@ if ($ENV{with_icu} eq 'yes') # additional node, which uses the icu provider my $node2 = PostgreSQL::Test::Cluster->new('icu'); - $node2->init(extra => ['--locale-provider=icu', '--icu-locale=en']); + $node2->init(extra => [ '--locale-provider=icu', '--icu-locale=en' ]); $node2->start; $node2->command_ok( - [ 'createdb', '-T', 'template0', '--locale-provider=libc', 'foobar55' ], - 'create database with libc provider from template database with icu provider'); + [ + 'createdb', '-T', + 'template0', '--locale-provider=libc', + 'foobar55' + ], + 'create database with libc provider from template database with icu provider' + ); $node2->command_ok( - [ 'createdb', '-T', 'template0', '--icu-locale', 'en-US', 'foobar56' ], - 'create database with icu locale from template database with icu provider'); + [ + 'createdb', '-T', 'template0', '--icu-locale', 'en-US', + 'foobar56' + ], + 'create database with icu locale from template database with icu provider' + ); } else { @@ -163,17 +174,11 @@ $node->issues_sql_like( [ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ], qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/, 'create database with owner role_foobar'); -($ret, $stdout, $stderr) = $node->psql( - 'foobar2', - 'DROP OWNED BY role_foobar;', - on_error_die => 1, -); +($ret, $stdout, $stderr) = + $node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,); ok($ret == 0, "DROP OWNED BY role_foobar"); -($ret, $stdout, $stderr) = $node->psql( - 'foobar2', - 'DROP DATABASE foobar8;', - on_error_die => 1, -); +($ret, $stdout, $stderr) = + $node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,); ok($ret == 0, "DROP DATABASE foobar8"); done_testing(); diff --git a/src/bin/scripts/t/040_createuser.pl b/src/bin/scripts/t/040_createuser.pl index d60cae4f58..da99d0ccb9 100644 --- a/src/bin/scripts/t/040_createuser.pl +++ b/src/bin/scripts/t/040_createuser.pl @@ -34,7 +34,7 @@ $node->issues_sql_like( 'create a superuser'); $node->issues_sql_like( [ - 'createuser', '-a', + 'createuser', '-a', 'regress_user1', '-a', 'regress user2', 'regress user #4' ], @@ -42,8 +42,8 @@ $node->issues_sql_like( 'add a role as a member with admin option of the newly created role'); $node->issues_sql_like( [ - 'createuser', '-m', - 'regress_user3', '-m', + 'createuser', '-m', + 'regress_user3', '-m', 'regress user #4', 'REGRESS_USER5' ], qr/statement: CREATE ROLE "REGRESS_USER5" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user3,"regress user #4";/, diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl index 73188739c0..b663d0e741 100644 --- a/src/bin/scripts/t/090_reindexdb.pl +++ b/src/bin/scripts/t/090_reindexdb.pl @@ -53,7 +53,8 @@ my $fetch_toast_relfilenodes = WHERE b.oid IN ('pg_constraint'::regclass, 'test1'::regclass)}; # Same for relfilenodes of normal indexes. This saves the relfilenode # from an index of pg_constraint, and from the index of the test table. -my $fetch_index_relfilenodes = qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode +my $fetch_index_relfilenodes = + qq{SELECT i.indrelid, a.oid::regclass::text, a.oid, a.relfilenode FROM pg_class a JOIN pg_index i ON (i.indexrelid = a.oid) WHERE a.relname IN ('pg_constraint_oid_index', 'test1x')}; @@ -128,7 +129,7 @@ $node->issues_sql_like( 'reindex with verbose output'); $node->issues_sql_like( [ - 'reindexdb', '-v', '-t', 'test1', + 'reindexdb', '-v', '-t', 'test1', '--tablespace', $tbspace_name, 'postgres' ], qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE public\.test1;/, @@ -171,8 +172,8 @@ $node->issues_sql_like( 'reindex with verbose output concurrently'); $node->issues_sql_like( [ - 'reindexdb', '--concurrently', '-v', '-t', - 'test1', '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', '--concurrently', '-v', '-t', + 'test1', '--tablespace', $tbspace_name, 'postgres' ], qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE CONCURRENTLY public\.test1;/, 'reindex concurrently with verbose output and tablespace'); @@ -184,7 +185,7 @@ $node->issues_sql_like( # messages. $node->command_checks_all( [ - 'reindexdb', '-t', $toast_table, '--tablespace', + 'reindexdb', '-t', $toast_table, '--tablespace', $tbspace_name, 'postgres' ], 1, @@ -193,8 +194,8 @@ $node->command_checks_all( 'reindex toast table with tablespace'); $node->command_checks_all( [ - 'reindexdb', '--concurrently', '-t', $toast_table, - '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', '--concurrently', '-t', $toast_table, + '--tablespace', $tbspace_name, 'postgres' ], 1, [], @@ -202,7 +203,7 @@ $node->command_checks_all( 'reindex toast table concurrently with tablespace'); $node->command_checks_all( [ - 'reindexdb', '-i', $toast_index, '--tablespace', + 'reindexdb', '-i', $toast_index, '--tablespace', $tbspace_name, 'postgres' ], 1, @@ -211,8 +212,8 @@ $node->command_checks_all( 'reindex toast index with tablespace'); $node->command_checks_all( [ - 'reindexdb', '--concurrently', '-i', $toast_index, - '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', '--concurrently', '-i', $toast_index, + '--tablespace', $tbspace_name, 'postgres' ], 1, [], diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index 4788c313a7..a93782bc0d 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -146,7 +146,7 @@ $node->command_fails( 'vacuumdb --min-xid-age with incorrect value'); $node->issues_sql_like( [ - 'vacuumdb', '--table', 'vactable', '--min-mxid-age', + 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '2147483000', 'postgres' ], qr/GREATEST.*relminmxid.*2147483000/, diff --git a/src/bin/scripts/t/200_connstr.pl b/src/bin/scripts/t/200_connstr.pl index 18324139db..53c5e21ab2 100644 --- a/src/bin/scripts/t/200_connstr.pl +++ b/src/bin/scripts/t/200_connstr.pl @@ -12,7 +12,7 @@ use Test::More; # We're going to use byte sequences that aren't valid UTF-8 strings. Use # LATIN1, which accepts any byte and has a conversion from each byte to UTF-8. -$ENV{LC_ALL} = 'C'; +$ENV{LC_ALL} = 'C'; $ENV{PGCLIENTENCODING} = 'LATIN1'; # Create database names covering the range of LATIN1 characters and diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index 687af9c1f3..4b17a07089 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -52,12 +52,12 @@ typedef struct vacuumingOptions /* object filter options */ typedef enum { - OBJFILTER_NONE = 0, /* no filter used */ - OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */ - OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */ - OBJFILTER_TABLE = (1 << 2), /* -t | --table */ - OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */ - OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */ + OBJFILTER_NONE = 0, /* no filter used */ + OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */ + OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */ + OBJFILTER_TABLE = (1 << 2), /* -t | --table */ + OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */ + OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */ } VacObjFilter; VacObjFilter objfilter = OBJFILTER_NONE; @@ -83,7 +83,7 @@ static void run_vacuum_command(PGconn *conn, const char *sql, bool echo, static void help(const char *progname); -void check_objfilter(void); +void check_objfilter(void); /* For analyze-in-stages mode */ #define ANALYZE_NO_STAGE -1 diff --git a/src/common/unicode/generate-norm_test_table.pl b/src/common/unicode/generate-norm_test_table.pl index 45fedad9ae..3434f7e263 100644 --- a/src/common/unicode/generate-norm_test_table.pl +++ b/src/common/unicode/generate-norm_test_table.pl @@ -13,7 +13,7 @@ use warnings; use File::Basename; die "Usage: $0 INPUT_FILE OUTPUT_FILE\n" if @ARGV != 2; -my $input_file = $ARGV[0]; +my $input_file = $ARGV[0]; my $output_file = $ARGV[1]; my $output_base = basename($output_file); @@ -66,7 +66,7 @@ sub codepoint_string_to_hex foreach (split(' ', $codepoint_string)) { - my $cp = $_; + my $cp = $_; my $utf8 = "0x$cp, "; $result .= $utf8; } @@ -89,10 +89,10 @@ while (my $line = <$INPUT>) my ($source, $nfc, $nfd, $nfkc, $nfkd) = split(';', $line); my $source_utf8 = codepoint_string_to_hex($source); - my $nfc_utf8 = codepoint_string_to_hex($nfc); - my $nfd_utf8 = codepoint_string_to_hex($nfd); - my $nfkc_utf8 = codepoint_string_to_hex($nfkc); - my $nfkd_utf8 = codepoint_string_to_hex($nfkd); + my $nfc_utf8 = codepoint_string_to_hex($nfc); + my $nfd_utf8 = codepoint_string_to_hex($nfd); + my $nfkc_utf8 = codepoint_string_to_hex($nfkc); + my $nfkd_utf8 = codepoint_string_to_hex($nfkd); print $OUTPUT "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n"; diff --git a/src/common/unicode/generate-unicode_norm_table.pl b/src/common/unicode/generate-unicode_norm_table.pl index ecc33fcd1a..d5914118ab 100644 --- a/src/common/unicode/generate-unicode_norm_table.pl +++ b/src/common/unicode/generate-unicode_norm_table.pl @@ -18,11 +18,10 @@ use PerfectHash; my $output_path = '.'; -GetOptions( - 'outdir:s' => \$output_path); +GetOptions('outdir:s' => \$output_path); my $output_table_file = "$output_path/unicode_norm_table.h"; -my $output_func_file = "$output_path/unicode_norm_hashfunc.h"; +my $output_func_file = "$output_path/unicode_norm_hashfunc.h"; my $FH; @@ -43,7 +42,7 @@ close $FH; # Read entries from UnicodeData.txt into a list, and a hash table. We need # three fields from each row: the codepoint, canonical combining class, # and character decomposition mapping -my @characters = (); +my @characters = (); my %character_hash = (); open($FH, '<', "$output_path/UnicodeData.txt") or die "Could not open $output_path/UnicodeData.txt: $!."; @@ -54,9 +53,9 @@ while (my $line = <$FH>) # - Unicode code value # - Canonical Combining Class # - Character Decomposition Mapping - my @elts = split(';', $line); - my $code = $elts[0]; - my $class = $elts[3]; + my @elts = split(';', $line); + my $code = $elts[0]; + my $class = $elts[3]; my $decomp = $elts[5]; # Skip codepoints above U+10FFFF. They cannot be represented in 4 bytes @@ -168,7 +167,7 @@ typedef struct HEADER -my $decomp_index = 0; +my $decomp_index = 0; my $decomp_string = ""; my @dec_cp_packed; my $main_index = 0; @@ -177,8 +176,8 @@ my @rec_info; my $last_code = $characters[-1]->{code}; foreach my $char (@characters) { - my $code = $char->{code}; - my $class = $char->{class}; + my $code = $char->{code}; + my $class = $char->{class}; my $decomp = $char->{decomp}; # Save the code point bytes as a string in network order. @@ -205,7 +204,7 @@ foreach my $char (@characters) my $first_decomp = shift @decomp_elts; - my $flags = ""; + my $flags = ""; my $comment = ""; if ($compat) @@ -243,10 +242,10 @@ foreach my $char (@characters) { push @rec_info, { - code => $code, + code => $code, main_index => $main_index, - first => $first_decomp, - second => $decomp_elts[0] + first => $first_decomp, + second => $decomp_elts[0] }; } } @@ -302,7 +301,7 @@ HEADER # Emit the definition of the decomp hash function. my $dec_funcname = 'Decomp_hash_func'; -my $dec_func = PerfectHash::generate_hash_function(\@dec_cp_packed, +my $dec_func = PerfectHash::generate_hash_function(\@dec_cp_packed, $dec_funcname, fixed_key_length => 4); print $OF "/* Perfect hash function for decomposition */\n"; print $OF "static $dec_func\n"; @@ -395,11 +394,11 @@ sub recomp_sort # First sort by the first code point return -1 if $a1 < $b1; - return 1 if $a1 > $b1; + return 1 if $a1 > $b1; # Then sort by the second code point return -1 if $a2 < $b2; - return 1 if $a2 > $b2; + return 1 if $a2 > $b2; # Finally sort by the code point that decomposes into first and # second ones. @@ -407,7 +406,7 @@ sub recomp_sort my $bcode = hex($b->{code}); return -1 if $acode < $bcode; - return 1 if $acode > $bcode; + return 1 if $acode > $bcode; die "found duplicate entries of recomposeable code pairs"; } diff --git a/src/common/unicode/generate-unicode_normprops_table.pl b/src/common/unicode/generate-unicode_normprops_table.pl index d90d6b3585..1b7473180b 100644 --- a/src/common/unicode/generate-unicode_normprops_table.pl +++ b/src/common/unicode/generate-unicode_normprops_table.pl @@ -108,7 +108,7 @@ foreach my $prop (sort keys %data) # Emit the definition of the perfect hash function. my $funcname = $prop . '_hash_func'; - my $f = PerfectHash::generate_hash_function(\@cp_packed, $funcname, + my $f = PerfectHash::generate_hash_function(\@cp_packed, $funcname, fixed_key_length => 4); printf "\n/* Perfect hash function for %s */", $prop; print "\nstatic $f\n"; @@ -119,7 +119,7 @@ foreach my $prop (sort keys %data) printf "\nstatic const pg_unicode_norminfo "; printf "UnicodeNormInfo_%s = {\n", $prop; printf "\tUnicodeNormProps_%s,\n", $prop; - printf "\t%s,\n", $funcname; - printf "\t%d\n", scalar @cp_packed; + printf "\t%s,\n", $funcname; + printf "\t%d\n", scalar @cp_packed; printf "};\n"; } diff --git a/src/fe_utils/print.c b/src/fe_utils/print.c index 3396f9b462..7af1ccb6b5 100644 --- a/src/fe_utils/print.c +++ b/src/fe_utils/print.c @@ -1295,10 +1295,11 @@ print_aligned_vertical_line(const printTableOpt *topt, dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth))); if (opt_border == 1) dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3))); + /* - * Handling the xheader width for border=2 doesn't make - * much sense because this format has an additional - * right border, but keep this for consistency. + * Handling the xheader width for border=2 doesn't make much + * sense because this format has an additional right border, + * but keep this for consistency. */ if (opt_border == 2) dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7))); diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h index 281039ef67..4476ff7fba 100644 --- a/src/include/access/amapi.h +++ b/src/include/access/amapi.h @@ -245,7 +245,7 @@ typedef struct IndexAmRoutine /* does AM use maintenance_work_mem? */ bool amusemaintenanceworkmem; /* does AM store tuple information only at block granularity? */ - bool amsummarizing; + bool amsummarizing; /* OR of parallel vacuum flags. See vacuum.h for flags. */ uint8 amparallelvacuumoptions; /* type of data stored in index, or InvalidOid if variable */ diff --git a/src/include/access/brin_tuple.h b/src/include/access/brin_tuple.h index c56747aca4..6f33ba6b25 100644 --- a/src/include/access/brin_tuple.h +++ b/src/include/access/brin_tuple.h @@ -44,7 +44,7 @@ typedef struct BrinValues typedef struct BrinMemTuple { bool bt_placeholder; /* this is a placeholder tuple */ - bool bt_empty_range; /* range represents no tuples */ + bool bt_empty_range; /* range represents no tuples */ BlockNumber bt_blkno; /* heap blkno that the tuple is for */ MemoryContext bt_context; /* memcxt holding the bt_columns values */ /* output arrays for brin_deform_tuple: */ diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index ee275650bd..3edc740a3f 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -550,6 +550,7 @@ extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup, /* gistbuild.c */ extern IndexBuildResult *gistbuild(Relation heap, Relation index, struct IndexInfo *indexInfo); + /* gistbuildbuffers.c */ extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel); diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index b19d50ecc2..230bc39cc0 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -902,7 +902,7 @@ table_beginscan(Relation rel, Snapshot snapshot, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags); } diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h index 30d20c323e..da32c7db77 100644 --- a/src/include/access/xlogreader.h +++ b/src/include/access/xlogreader.h @@ -332,6 +332,7 @@ extern XLogReaderState *XLogReaderAllocate(int wal_segment_size, const char *waldir, XLogReaderRoutine *routine, void *private_data); + /* Free an XLogReader */ extern void XLogReaderFree(XLogReaderState *state); diff --git a/src/include/catalog/pg_aggregate.dat b/src/include/catalog/pg_aggregate.dat index 283f494bf5..1bc1d97d74 100644 --- a/src/include/catalog/pg_aggregate.dat +++ b/src/include/catalog/pg_aggregate.dat @@ -537,29 +537,29 @@ # array { aggfnoid => 'array_agg(anynonarray)', aggtransfn => 'array_agg_transfn', - aggcombinefn => 'array_agg_combine', aggserialfn => 'array_agg_serialize', - aggdeserialfn => 'array_agg_deserialize', aggfinalfn => 'array_agg_finalfn', - aggfinalextra => 't', aggtranstype => 'internal' }, + aggfinalfn => 'array_agg_finalfn', aggcombinefn => 'array_agg_combine', + aggserialfn => 'array_agg_serialize', + aggdeserialfn => 'array_agg_deserialize', aggfinalextra => 't', + aggtranstype => 'internal' }, { aggfnoid => 'array_agg(anyarray)', aggtransfn => 'array_agg_array_transfn', + aggfinalfn => 'array_agg_array_finalfn', aggcombinefn => 'array_agg_array_combine', aggserialfn => 'array_agg_array_serialize', - aggdeserialfn => 'array_agg_array_deserialize', - aggfinalfn => 'array_agg_array_finalfn', aggfinalextra => 't', + aggdeserialfn => 'array_agg_array_deserialize', aggfinalextra => 't', aggtranstype => 'internal' }, # text { aggfnoid => 'string_agg(text,text)', aggtransfn => 'string_agg_transfn', - aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize', - aggdeserialfn => 'string_agg_deserialize', - aggfinalfn => 'string_agg_finalfn', aggtranstype => 'internal' }, + aggfinalfn => 'string_agg_finalfn', aggcombinefn => 'string_agg_combine', + aggserialfn => 'string_agg_serialize', + aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' }, # bytea { aggfnoid => 'string_agg(bytea,bytea)', aggtransfn => 'bytea_string_agg_transfn', - aggcombinefn => 'string_agg_combine', - aggserialfn => 'string_agg_serialize', - aggdeserialfn => 'string_agg_deserialize', - aggfinalfn => 'bytea_string_agg_finalfn', aggtranstype => 'internal' }, + aggfinalfn => 'bytea_string_agg_finalfn', + aggcombinefn => 'string_agg_combine', aggserialfn => 'string_agg_serialize', + aggdeserialfn => 'string_agg_deserialize', aggtranstype => 'internal' }, # range { aggfnoid => 'range_intersect_agg(anyrange)', diff --git a/src/include/catalog/pg_auth_members.h b/src/include/catalog/pg_auth_members.h index 987d774844..df2b8b29e0 100644 --- a/src/include/catalog/pg_auth_members.h +++ b/src/include/catalog/pg_auth_members.h @@ -34,7 +34,7 @@ CATALOG(pg_auth_members,1261,AuthMemRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_ Oid member BKI_LOOKUP(pg_authid); /* ID of a member of that role */ Oid grantor BKI_LOOKUP(pg_authid); /* who granted the membership */ bool admin_option; /* granted with admin option? */ - bool inherit_option; /* exercise privileges without SET ROLE? */ + bool inherit_option; /* exercise privileges without SET ROLE? */ bool set_option; /* use SET ROLE to the target role? */ } FormData_pg_auth_members; diff --git a/src/include/catalog/pg_database.dat b/src/include/catalog/pg_database.dat index 68dcac1a6e..0754ef1bce 100644 --- a/src/include/catalog/pg_database.dat +++ b/src/include/catalog/pg_database.dat @@ -18,6 +18,7 @@ datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't', datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0', datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE', - datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', daticurules => 'ICU_RULES', datacl => '_null_' }, + datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', + daticurules => 'ICU_RULES', datacl => '_null_' }, ] diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 3ab4d3f121..2e0f9f2b5c 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -1667,8 +1667,9 @@ prorettype => 'internal', proargtypes => 'internal anyarray', prosrc => 'array_agg_array_transfn' }, { oid => '6296', descr => 'aggregate combine function', - proname => 'array_agg_array_combine', proisstrict => 'f', prorettype => 'internal', - proargtypes => 'internal internal', prosrc => 'array_agg_array_combine' }, + proname => 'array_agg_array_combine', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal internal', + prosrc => 'array_agg_array_combine' }, { oid => '6297', descr => 'aggregate serial function', proname => 'array_agg_array_serialize', prorettype => 'bytea', proargtypes => 'internal', prosrc => 'array_agg_array_serialize' }, @@ -5481,10 +5482,9 @@ prorettype => 'oid', proargtypes => 'int4', prosrc => 'pg_stat_get_backend_dbid' }, { oid => '6107', descr => 'statistics: get subtransaction status of backend', - proname => 'pg_stat_get_backend_subxact', provolatile => 's', proparallel => 'r', - prorettype => 'record', proargtypes => 'int4', - proallargtypes => '{int4,int4,bool}', - proargmodes => '{i,o,o}', + proname => 'pg_stat_get_backend_subxact', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => 'int4', + proallargtypes => '{int4,int4,bool}', proargmodes => '{i,o,o}', proargnames => '{bid,subxact_count,subxact_overflowed}', prosrc => 'pg_stat_get_backend_subxact' }, { oid => '1939', descr => 'statistics: user ID of backend', @@ -5731,9 +5731,9 @@ prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' }, { oid => '6214', descr => 'statistics: per backend type IO statistics', - proname => 'pg_stat_get_io', provolatile => 'v', - prorows => '30', proretset => 't', - proparallel => 'r', prorettype => 'record', proargtypes => '', + proname => 'pg_stat_get_io', prorows => '30', proretset => 't', + provolatile => 'v', proparallel => 'r', prorettype => 'record', + proargtypes => '', proallargtypes => '{text,text,text,int8,float8,int8,float8,int8,float8,int8,float8,int8,int8,int8,int8,int8,float8,timestamptz}', proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargnames => '{backend_type,object,context,reads,read_time,writes,write_time,writebacks,writeback_time,extends,extend_time,op_bytes,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}', @@ -6407,8 +6407,9 @@ proname => 'pg_switch_wal', provolatile => 'v', prorettype => 'pg_lsn', proargtypes => '', prosrc => 'pg_switch_wal' }, { oid => '6305', descr => 'log details of the current snapshot to WAL', - proname => 'pg_log_standby_snapshot', provolatile => 'v', prorettype => 'pg_lsn', - proargtypes => '', prosrc => 'pg_log_standby_snapshot' }, + proname => 'pg_log_standby_snapshot', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => '', + prosrc => 'pg_log_standby_snapshot' }, { oid => '3098', descr => 'create a named restore point', proname => 'pg_create_restore_point', provolatile => 'v', prorettype => 'pg_lsn', proargtypes => 'text', @@ -10349,15 +10350,15 @@ proargtypes => 'internal', prosrc => 'window_dense_rank_support' }, { oid => '3103', descr => 'fractional rank within partition', proname => 'percent_rank', prosupport => 'window_percent_rank_support', - prokind => 'w', proisstrict => 'f', prorettype => 'float8', - proargtypes => '', prosrc => 'window_percent_rank' }, + prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '', + prosrc => 'window_percent_rank' }, { oid => '6306', descr => 'planner support for percent_rank', proname => 'window_percent_rank_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'window_percent_rank_support' }, { oid => '3104', descr => 'fractional row number within partition', proname => 'cume_dist', prosupport => 'window_cume_dist_support', - prokind => 'w', proisstrict => 'f', prorettype => 'float8', - proargtypes => '', prosrc => 'window_cume_dist' }, + prokind => 'w', proisstrict => 'f', prorettype => 'float8', proargtypes => '', + prosrc => 'window_cume_dist' }, { oid => '6307', descr => 'planner support for cume_dist', proname => 'window_cume_dist_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'window_cume_dist_support' }, @@ -11824,7 +11825,8 @@ provariadic => 'text', proretset => 't', provolatile => 's', prorettype => 'record', proargtypes => '_text', proallargtypes => '{_text,oid,oid,int2vector,pg_node_tree}', - proargmodes => '{v,o,o,o,o}', proargnames => '{pubname,pubid,relid,attrs,qual}', + proargmodes => '{v,o,o,o,o}', + proargnames => '{pubname,pubid,relid,attrs,qual}', prosrc => 'pg_get_publication_tables' }, { oid => '6121', descr => 'returns whether a relation can be part of a publication', diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 91d729d62d..1d40eebc78 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -88,10 +88,10 @@ CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROW bool subdisableonerr; /* True if a worker error should cause the * subscription to be disabled */ - bool subpasswordrequired; /* Must connection use a password? */ + bool subpasswordrequired; /* Must connection use a password? */ - bool subrunasowner; /* True if replication should execute as - * the subscription owner */ + bool subrunasowner; /* True if replication should execute as the + * subscription owner */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ /* Connection string to the publisher */ diff --git a/src/include/catalog/reformat_dat_file.pl b/src/include/catalog/reformat_dat_file.pl index 7281a65802..725117d846 100755 --- a/src/include/catalog/reformat_dat_file.pl +++ b/src/include/catalog/reformat_dat_file.pl @@ -41,7 +41,7 @@ my $output_path = ''; my $full_tuples = 0; GetOptions( - 'output=s' => \$output_path, + 'output=s' => \$output_path, 'full-tuples' => \$full_tuples) || usage(); # Sanity check arguments. @@ -70,7 +70,7 @@ foreach my $datfile (@ARGV) my $catalog = Catalog::ParseHeader($header); my $catname = $catalog->{catname}; - my $schema = $catalog->{columns}; + my $schema = $catalog->{columns}; push @catnames, $catname; $catalogs{$catname} = $catalog; @@ -219,7 +219,7 @@ sub strip_default_values # data files. sub format_hash { - my $data = shift; + my $data = shift; my @orig_attnames = @_; # Copy attname to new array if it has a value, so we can determine @@ -237,7 +237,7 @@ sub format_hash my $char_count = 1; my $threshold; - my $hash_str = ''; + my $hash_str = ''; my $element_count = 0; foreach my $attname (@attnames) @@ -271,7 +271,7 @@ sub format_hash # Include a leading space in the key-value pair, since this will # always go after either a comma or an additional padding space on # the next line. - my $element = " $attname => '$value'"; + my $element = " $attname => '$value'"; my $element_length = length($element); # If adding the element to the current line would expand the line diff --git a/src/include/catalog/renumber_oids.pl b/src/include/catalog/renumber_oids.pl index 1d2e78fc7d..ec09584959 100755 --- a/src/include/catalog/renumber_oids.pl +++ b/src/include/catalog/renumber_oids.pl @@ -32,16 +32,16 @@ my $FirstGenbkiObjectId = Catalog::FindDefinedSymbol('access/transam.h', '..', 'FirstGenbkiObjectId'); # Process command line switches. -my $output_path = ''; +my $output_path = ''; my $first_mapped_oid = 0; -my $last_mapped_oid = $FirstGenbkiObjectId - 1; -my $target_oid = 0; +my $last_mapped_oid = $FirstGenbkiObjectId - 1; +my $target_oid = 0; GetOptions( - 'output=s' => \$output_path, + 'output=s' => \$output_path, 'first-mapped-oid=i' => \$first_mapped_oid, - 'last-mapped-oid=i' => \$last_mapped_oid, - 'target-oid=i' => \$target_oid) || usage(); + 'last-mapped-oid=i' => \$last_mapped_oid, + 'target-oid=i' => \$target_oid) || usage(); # Sanity check arguments. die "Unexpected non-switch arguments.\n" if @ARGV; @@ -62,7 +62,7 @@ if ($output_path ne '' && substr($output_path, -1) ne '/') # Collect all the existing assigned OIDs (including those to be remapped). my @header_files = glob("pg_*.h"); -my $oids = Catalog::FindAllOidsFromHeaders(@header_files); +my $oids = Catalog::FindAllOidsFromHeaders(@header_files); # Hash-ify the existing OIDs for convenient lookup. my %oidhash; @@ -108,7 +108,7 @@ foreach my $input_file (@header_files) # Write output files to specified directory. # Use a .tmp suffix, then rename into place, in case we're overwriting. - my $output_file = "$output_path$catname.h"; + my $output_file = "$output_path$catname.h"; my $tmp_output_file = "$output_file.tmp"; open my $ofd, '>', $tmp_output_file or die "can't open $tmp_output_file: $!"; @@ -236,7 +236,7 @@ foreach my $input_file (glob("pg_*.dat")) # Write output files to specified directory. # Use a .tmp suffix, then rename into place, in case we're overwriting. - my $output_file = "$output_path$catname.dat"; + my $output_file = "$output_path$catname.dat"; my $tmp_output_file = "$output_file.tmp"; open my $ofd, '>', $tmp_output_file or die "can't open $tmp_output_file: $!"; diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h index 857ca58f6f..cb2a2cde8a 100644 --- a/src/include/executor/hashjoin.h +++ b/src/include/executor/hashjoin.h @@ -372,6 +372,6 @@ typedef struct HashJoinTableData ParallelHashJoinState *parallel_state; ParallelHashJoinBatchAccessor *batches; dsa_pointer current_chunk_shared; -} HashJoinTableData; +} HashJoinTableData; #endif /* HASHJOIN_H */ diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index ff64b7cb98..3d34575a22 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -409,7 +409,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, static inline Datum slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull) { - Assert(attnum < 0); /* caller error */ + Assert(attnum < 0); /* caller error */ if (attnum == TableOidAttributeNumber) { diff --git a/src/include/fe_utils/print.h b/src/include/fe_utils/print.h index 54f783c907..cc6652def9 100644 --- a/src/include/fe_utils/print.h +++ b/src/include/fe_utils/print.h @@ -69,10 +69,13 @@ typedef enum printTextLineWrap typedef enum printXheaderWidthType { /* Expanded header line width variants */ - PRINT_XHEADER_FULL, /* do not truncate header line (this is the default) */ - PRINT_XHEADER_COLUMN, /* only print header line above the first column */ - PRINT_XHEADER_PAGE, /* header line must not be longer than terminal width */ - PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */ + PRINT_XHEADER_FULL, /* do not truncate header line (this is the + * default) */ + PRINT_XHEADER_COLUMN, /* only print header line above the first + * column */ + PRINT_XHEADER_PAGE, /* header line must not be longer than + * terminal width */ + PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */ } printXheaderWidthType; typedef struct printTextFormat @@ -110,8 +113,10 @@ typedef struct printTableOpt enum printFormat format; /* see enum above */ unsigned short int expanded; /* expanded/vertical output (if supported * by output format); 0=no, 1=yes, 2=auto */ - printXheaderWidthType expanded_header_width_type; /* width type for header line in expanded mode */ - int expanded_header_exact_width; /* explicit width for header line in expanded mode */ + printXheaderWidthType expanded_header_width_type; /* width type for header + * line in expanded mode */ + int expanded_header_exact_width; /* explicit width for header + * line in expanded mode */ unsigned short int border; /* Print a border around the table. 0=none, * 1=dividing lines, 2=full */ unsigned short int pager; /* use pager for output (if to stdout and diff --git a/src/include/funcapi.h b/src/include/funcapi.h index 11febb138b..cc0cca3272 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -231,6 +231,7 @@ HeapTupleGetDatum(const HeapTupleData *tuple) { return HeapTupleHeaderGetDatum(tuple->t_data); } + /* obsolete version of above */ #define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple) diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 08e7dae73f..792a743f72 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -1479,6 +1479,7 @@ typedef struct SQLValueFunction { Expr xpr; SQLValueFunctionOp op; /* which function this is */ + /* * Result type/typmod. Type is fully determined by "op", so no need to * include this Oid in the query jumbling. diff --git a/src/include/port/win32ntdll.h b/src/include/port/win32ntdll.h index 18ff6f4b41..1ce9360ec1 100644 --- a/src/include/port/win32ntdll.h +++ b/src/include/port/win32ntdll.h @@ -21,9 +21,9 @@ #define FLUSH_FLAGS_FILE_DATA_SYNC_ONLY 0x4 #endif -typedef NTSTATUS (__stdcall *RtlGetLastNtStatus_t) (void); -typedef ULONG (__stdcall *RtlNtStatusToDosError_t) (NTSTATUS); -typedef NTSTATUS (__stdcall *NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK); +typedef NTSTATUS (__stdcall * RtlGetLastNtStatus_t) (void); +typedef ULONG (__stdcall * RtlNtStatusToDosError_t) (NTSTATUS); +typedef NTSTATUS (__stdcall * NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK); extern PGDLLIMPORT RtlGetLastNtStatus_t pg_RtlGetLastNtStatus; extern PGDLLIMPORT RtlNtStatusToDosError_t pg_RtlNtStatusToDosError; diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index e37f5120eb..1b9db22acb 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -26,7 +26,7 @@ typedef enum { LOGICAL_REP_MODE_BUFFERED, LOGICAL_REP_MODE_IMMEDIATE -} LogicalRepMode; +} LogicalRepMode; /* an individual tuple, stored in one chunk of memory */ typedef struct ReorderBufferTupleBuf diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index 6ab00daa2e..0f5fb6be00 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -89,7 +89,7 @@ typedef enum ExtendBufferedFlags /* internal flags follow */ EB_LOCK_TARGET = (1 << 5), -} ExtendBufferedFlags; +} ExtendBufferedFlags; /* * To identify the relation - either relation or smgr + relpersistence has to diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index 6ae434596a..8575bea25c 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -314,7 +314,7 @@ typedef struct LOCK LOCKMASK grantMask; /* bitmask for lock types already granted */ LOCKMASK waitMask; /* bitmask for lock types awaited */ dlist_head procLocks; /* list of PROCLOCK objects assoc. with lock */ - dclist_head waitProcs; /* list of PGPROC objects waiting on lock */ + dclist_head waitProcs; /* list of PGPROC objects waiting on lock */ int requested[MAX_LOCKMODES]; /* counts of requested locks */ int nRequested; /* total of requested[] array */ int granted[MAX_LOCKMODES]; /* counts of granted locks */ diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index d2c7afb8f4..34169e5889 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -26,10 +26,11 @@ struct PGPROC; /* what state of the wait process is a backend in */ typedef enum LWLockWaitState { - LW_WS_NOT_WAITING, /* not currently waiting / woken up */ - LW_WS_WAITING, /* currently waiting */ - LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet signalled */ -} LWLockWaitState; + LW_WS_NOT_WAITING, /* not currently waiting / woken up */ + LW_WS_WAITING, /* currently waiting */ + LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet + * signalled */ +} LWLockWaitState; /* * Code outside of lwlock.c should not manipulate the contents of this diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h index 142a195d0e..93f84500bf 100644 --- a/src/include/storage/predicate_internals.h +++ b/src/include/storage/predicate_internals.h @@ -196,7 +196,7 @@ typedef struct RWConflictData dlist_node inLink; /* link for list of conflicts in to a sxact */ SERIALIZABLEXACT *sxactOut; SERIALIZABLEXACT *sxactIn; -} RWConflictData; +} RWConflictData; typedef struct RWConflictData *RWConflict; diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 4258cd92c9..ef74f32693 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -387,11 +387,11 @@ typedef struct PROC_HDR /* Head of list of free PGPROC structures */ dlist_head freeProcs; /* Head of list of autovacuum's free PGPROC structures */ - dlist_head autovacFreeProcs; + dlist_head autovacFreeProcs; /* Head of list of bgworker free PGPROC structures */ - dlist_head bgworkerFreeProcs; + dlist_head bgworkerFreeProcs; /* Head of list of walsender free PGPROC structures */ - dlist_head walsenderFreeProcs; + dlist_head walsenderFreeProcs; /* First pgproc waiting for group XID clear */ pg_atomic_uint32 procArrayGroupFirst; /* First pgproc waiting for group transaction status update */ diff --git a/src/include/utils/backend_status.h b/src/include/utils/backend_status.h index 9651cb1d0c..cfb26d2bcc 100644 --- a/src/include/utils/backend_status.h +++ b/src/include/utils/backend_status.h @@ -271,13 +271,13 @@ typedef struct LocalPgBackendStatus /* * Number of cached subtransactions in the current session. */ - int backend_subxact_count; + int backend_subxact_count; /* * The number of subtransactions in the current session which exceeded the * cached subtransaction limit. */ - bool backend_subxact_overflowed; + bool backend_subxact_overflowed; } LocalPgBackendStatus; diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h index 03ab598215..e2a7243542 100644 --- a/src/include/utils/pg_locale.h +++ b/src/include/utils/pg_locale.h @@ -40,7 +40,7 @@ extern PGDLLIMPORT char *locale_messages; extern PGDLLIMPORT char *locale_monetary; extern PGDLLIMPORT char *locale_numeric; extern PGDLLIMPORT char *locale_time; -extern PGDLLIMPORT int icu_validation_level; +extern PGDLLIMPORT int icu_validation_level; /* lc_time localization cache */ extern PGDLLIMPORT char *localized_abbrev_days[]; @@ -49,7 +49,7 @@ extern PGDLLIMPORT char *localized_abbrev_months[]; extern PGDLLIMPORT char *localized_full_months[]; /* is the databases's LC_CTYPE the C locale? */ -extern PGDLLIMPORT bool database_ctype_is_c; +extern PGDLLIMPORT bool database_ctype_is_c; extern bool check_locale(int category, const char *locale, char **canonname); extern char *pg_perm_setlocale(int category, const char *locale); @@ -104,9 +104,9 @@ extern bool pg_locale_deterministic(pg_locale_t locale); extern pg_locale_t pg_newlocale_from_collation(Oid collid); extern char *get_collation_actual_version(char collprovider, const char *collcollate); -extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale); -extern int pg_strncoll(const char *arg1, size_t len1, - const char *arg2, size_t len2, pg_locale_t locale); +extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale); +extern int pg_strncoll(const char *arg1, size_t len1, + const char *arg2, size_t len2, pg_locale_t locale); extern bool pg_strxfrm_enabled(pg_locale_t locale); extern size_t pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale); diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 31f84e90eb..1426a353cd 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -161,7 +161,7 @@ typedef struct RelationData Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */ Bitmapset *rd_pkattr; /* cols included in primary key */ Bitmapset *rd_idattr; /* included in replica identity index */ - Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */ + Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */ Bitmapset *rd_summarizedattr; /* cols indexed by summarizing indexes */ PublicationDesc *rd_pubdesc; /* publication descriptor, or NULL */ diff --git a/src/include/utils/varlena.h b/src/include/utils/varlena.h index e72ebaddbf..77f5b24735 100644 --- a/src/include/utils/varlena.h +++ b/src/include/utils/varlena.h @@ -44,7 +44,7 @@ typedef struct ClosestMatchState int min_d; int max_d; const char *match; -} ClosestMatchState; +} ClosestMatchState; extern void initClosestMatch(ClosestMatchState *state, const char *source, int max_d); extern void updateClosestMatch(ClosestMatchState *state, const char *candidate); diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c index 7036e7c48d..fa56276758 100644 --- a/src/interfaces/ecpg/ecpglib/data.c +++ b/src/interfaces/ecpg/ecpglib/data.c @@ -521,7 +521,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_bytea: { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var + offset * act_tuple); + (struct ECPGgeneric_bytea *) (var + offset * act_tuple); long dst_size, src_size, dec_size; @@ -690,7 +690,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) (var + offset * act_tuple); + (struct ECPGgeneric_varchar *) (var + offset * act_tuple); variable->len = size; if (varcharsize == 0) diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c index 649a71c286..883a210a81 100644 --- a/src/interfaces/ecpg/ecpglib/descriptor.c +++ b/src/interfaces/ecpg/ecpglib/descriptor.c @@ -210,7 +210,7 @@ get_char_item(int lineno, void *var, enum ECPGttype vartype, char *value, int va case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) var; + (struct ECPGgeneric_varchar *) var; if (varcharsize == 0) memcpy(variable->arr, value, strlen(value)); @@ -597,7 +597,7 @@ set_desc_attr(struct descriptor_item *desc_item, struct variable *var, else { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var->value); + (struct ECPGgeneric_bytea *) (var->value); desc_item->is_binary = true; desc_item->data_len = variable->len; diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c index 641851983d..93926fd4fb 100644 --- a/src/interfaces/ecpg/ecpglib/execute.c +++ b/src/interfaces/ecpg/ecpglib/execute.c @@ -820,7 +820,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari case ECPGt_bytea: { struct ECPGgeneric_bytea *variable = - (struct ECPGgeneric_bytea *) (var->value); + (struct ECPGgeneric_bytea *) (var->value); if (!(mallocedval = (char *) ecpg_alloc(variable->len, lineno))) return false; @@ -833,7 +833,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari case ECPGt_varchar: { struct ECPGgeneric_varchar *variable = - (struct ECPGgeneric_varchar *) (var->value); + (struct ECPGgeneric_varchar *) (var->value); if (!(newcopy = (char *) ecpg_alloc(variable->len + 1, lineno))) return false; diff --git a/src/interfaces/ecpg/include/pgtypes_interval.h b/src/interfaces/ecpg/include/pgtypes_interval.h index 8471b609db..2809b356f7 100644 --- a/src/interfaces/ecpg/include/pgtypes_interval.h +++ b/src/interfaces/ecpg/include/pgtypes_interval.h @@ -36,10 +36,10 @@ extern "C" #endif extern interval * PGTYPESinterval_new(void); -extern void PGTYPESinterval_free(interval *intvl); +extern void PGTYPESinterval_free(interval * intvl); extern interval * PGTYPESinterval_from_asc(char *str, char **endptr); -extern char *PGTYPESinterval_to_asc(interval *span); -extern int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest); +extern char *PGTYPESinterval_to_asc(interval * span); +extern int PGTYPESinterval_copy(interval * intvlsrc, interval * intvldest); #ifdef __cplusplus } diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h index 1ec38791f8..00a45799d5 100644 --- a/src/interfaces/ecpg/pgtypeslib/dt.h +++ b/src/interfaces/ecpg/pgtypeslib/dt.h @@ -315,7 +315,7 @@ int DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct tm *tm int DecodeTime(char *str, int *tmask, struct tm *tm, fsec_t *fsec); void EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates); void EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str); -int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp *result); +int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp * result); int DecodeUnits(int field, char *lowtoken, int *val); bool CheckDateTokenTables(void); void EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates); diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c index dc083c1327..936a688381 100644 --- a/src/interfaces/ecpg/pgtypeslib/interval.c +++ b/src/interfaces/ecpg/pgtypeslib/interval.c @@ -780,17 +780,17 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) case INTSTYLE_SQL_STANDARD: { bool has_negative = year < 0 || mon < 0 || - mday < 0 || hour < 0 || - min < 0 || sec < 0 || fsec < 0; + mday < 0 || hour < 0 || + min < 0 || sec < 0 || fsec < 0; bool has_positive = year > 0 || mon > 0 || - mday > 0 || hour > 0 || - min > 0 || sec > 0 || fsec > 0; + mday > 0 || hour > 0 || + min > 0 || sec > 0 || fsec > 0; bool has_year_month = year != 0 || mon != 0; bool has_day_time = mday != 0 || hour != 0 || - min != 0 || sec != 0 || fsec != 0; + min != 0 || sec != 0 || fsec != 0; bool has_day = mday != 0; bool sql_standard_value = !(has_negative && has_positive) && - !(has_year_month && has_day_time); + !(has_year_month && has_day_time); /* * SQL Standard wants only 1 "" preceding the whole diff --git a/src/interfaces/ecpg/pgtypeslib/timestamp.c b/src/interfaces/ecpg/pgtypeslib/timestamp.c index 6185fc1895..f1b143fbd2 100644 --- a/src/interfaces/ecpg/pgtypeslib/timestamp.c +++ b/src/interfaces/ecpg/pgtypeslib/timestamp.c @@ -346,8 +346,8 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm *tm, break; /* - * The preferred date and time representation for - * the current locale. + * The preferred date and time representation for the + * current locale. */ case 'c': /* XXX */ diff --git a/src/interfaces/ecpg/preproc/check_rules.pl b/src/interfaces/ecpg/preproc/check_rules.pl index f28562bf54..5e823fa30e 100644 --- a/src/interfaces/ecpg/preproc/check_rules.pl +++ b/src/interfaces/ecpg/preproc/check_rules.pl @@ -20,16 +20,16 @@ use strict; use warnings; use Getopt::Long; -my $srcdir = '.'; -my $parser = '../../../backend/parser/gram.y'; -my $stamp = ''; +my $srcdir = '.'; +my $parser = '../../../backend/parser/gram.y'; +my $stamp = ''; my $verbose = 0; GetOptions( 'srcdir=s' => \$srcdir, 'parser=s' => \$parser, - 'stamp=s' => \$stamp, - 'verbose' => \$verbose,) or die "wrong arguments"; + 'stamp=s' => \$stamp, + 'verbose' => \$verbose,) or die "wrong arguments"; my $filename = "$srcdir/ecpg.addons"; if ($verbose) @@ -51,14 +51,14 @@ my %replace_line = ( 'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' => 'PREPARE prepared_name prep_type_clause AS PreparableStmt'); -my $block = ''; -my $yaccmode = 0; -my $in_rule = 0; +my $block = ''; +my $yaccmode = 0; +my $in_rule = 0; my $brace_indent = 0; my (@arr, %found); -my $comment = 0; +my $comment = 0; my $non_term_id = ''; -my $cc = 0; +my $cc = 0; open my $parser_fh, '<', $parser or die $!; while (<$parser_fh>) @@ -140,13 +140,14 @@ while (<$parser_fh>) $block = ''; $in_rule = 0 if $arr[$fieldIndexer] eq ';'; } - elsif (($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') - || ( $fieldIndexer + 1 < $n - && $arr[ $fieldIndexer + 1 ] eq ':')) + elsif ( + ($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') + || ( $fieldIndexer + 1 < $n + && $arr[ $fieldIndexer + 1 ] eq ':')) { die "unterminated rule at grammar line $.\n" if $in_rule; - $in_rule = 1; + $in_rule = 1; $non_term_id = $arr[$fieldIndexer]; $non_term_id =~ tr/://d; } diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl index faeb460ef5..7574fc3110 100644 --- a/src/interfaces/ecpg/preproc/parse.pl +++ b/src/interfaces/ecpg/preproc/parse.pl @@ -16,9 +16,9 @@ use strict; use warnings; use Getopt::Long; -my $srcdir = '.'; +my $srcdir = '.'; my $outfile = ''; -my $parser = ''; +my $parser = ''; GetOptions( 'srcdir=s' => \$srcdir, @@ -29,13 +29,13 @@ GetOptions( open(my $parserfh, '<', $parser) or die "could not open parser file $parser"; open(my $outfh, '>', $outfile) or die "could not open output file $outfile"; -my $copymode = 0; -my $brace_indent = 0; -my $yaccmode = 0; -my $in_rule = 0; -my $header_included = 0; +my $copymode = 0; +my $brace_indent = 0; +my $yaccmode = 0; +my $in_rule = 0; +my $header_included = 0; my $feature_not_supported = 0; -my $tokenmode = 0; +my $tokenmode = 0; my (%buff, $infield, $comment, %tokens, %addons); my ($stmt_mode, @fields); @@ -50,67 +50,67 @@ my %replace_token = ( 'FCONST' => 'ecpg_fconst', 'Sconst' => 'ecpg_sconst', 'XCONST' => 'ecpg_xconst', - 'IDENT' => 'ecpg_ident', - 'PARAM' => 'ecpg_param',); + 'IDENT' => 'ecpg_ident', + 'PARAM' => 'ecpg_param',); # or in the block my %replace_string = ( - 'FORMAT_LA' => 'format', - 'NOT_LA' => 'not', - 'NULLS_LA' => 'nulls', - 'WITH_LA' => 'with', - 'WITHOUT_LA' => 'without', - 'TYPECAST' => '::', - 'DOT_DOT' => '..', - 'COLON_EQUALS' => ':=', + 'FORMAT_LA' => 'format', + 'NOT_LA' => 'not', + 'NULLS_LA' => 'nulls', + 'WITH_LA' => 'with', + 'WITHOUT_LA' => 'without', + 'TYPECAST' => '::', + 'DOT_DOT' => '..', + 'COLON_EQUALS' => ':=', 'EQUALS_GREATER' => '=>', - 'LESS_EQUALS' => '<=', + 'LESS_EQUALS' => '<=', 'GREATER_EQUALS' => '>=', - 'NOT_EQUALS' => '<>',); + 'NOT_EQUALS' => '<>',); # specific replace_types for specific non-terminals - never include the ':' # ECPG-only replace_types are defined in ecpg-replace_types my %replace_types = ( - 'PrepareStmt' => '', - 'ExecuteStmt' => '', + 'PrepareStmt' => '', + 'ExecuteStmt' => '', 'opt_array_bounds' => '', # "ignore" means: do not create type and rules for this non-term-id - 'parse_toplevel' => 'ignore', - 'stmtmulti' => 'ignore', - 'CreateAsStmt' => 'ignore', - 'DeallocateStmt' => 'ignore', - 'ColId' => 'ignore', - 'type_function_name' => 'ignore', - 'ColLabel' => 'ignore', - 'Sconst' => 'ignore', + 'parse_toplevel' => 'ignore', + 'stmtmulti' => 'ignore', + 'CreateAsStmt' => 'ignore', + 'DeallocateStmt' => 'ignore', + 'ColId' => 'ignore', + 'type_function_name' => 'ignore', + 'ColLabel' => 'ignore', + 'Sconst' => 'ignore', 'opt_distinct_clause' => 'ignore', - 'PLpgSQL_Expr' => 'ignore', - 'PLAssignStmt' => 'ignore', - 'plassign_target' => 'ignore', - 'plassign_equals' => 'ignore',); + 'PLpgSQL_Expr' => 'ignore', + 'PLAssignStmt' => 'ignore', + 'plassign_target' => 'ignore', + 'plassign_equals' => 'ignore',); # these replace_line commands excise certain keywords from the core keyword # lists. Be sure to account for these in ColLabel and related productions. my %replace_line = ( 'unreserved_keywordCONNECTION' => 'ignore', - 'unreserved_keywordCURRENT_P' => 'ignore', - 'unreserved_keywordDAY_P' => 'ignore', - 'unreserved_keywordHOUR_P' => 'ignore', - 'unreserved_keywordINPUT_P' => 'ignore', - 'unreserved_keywordMINUTE_P' => 'ignore', - 'unreserved_keywordMONTH_P' => 'ignore', - 'unreserved_keywordSECOND_P' => 'ignore', - 'unreserved_keywordYEAR_P' => 'ignore', - 'col_name_keywordCHAR_P' => 'ignore', - 'col_name_keywordINT_P' => 'ignore', - 'col_name_keywordVALUES' => 'ignore', - 'reserved_keywordTO' => 'ignore', - 'reserved_keywordUNION' => 'ignore', + 'unreserved_keywordCURRENT_P' => 'ignore', + 'unreserved_keywordDAY_P' => 'ignore', + 'unreserved_keywordHOUR_P' => 'ignore', + 'unreserved_keywordINPUT_P' => 'ignore', + 'unreserved_keywordMINUTE_P' => 'ignore', + 'unreserved_keywordMONTH_P' => 'ignore', + 'unreserved_keywordSECOND_P' => 'ignore', + 'unreserved_keywordYEAR_P' => 'ignore', + 'col_name_keywordCHAR_P' => 'ignore', + 'col_name_keywordINT_P' => 'ignore', + 'col_name_keywordVALUES' => 'ignore', + 'reserved_keywordTO' => 'ignore', + 'reserved_keywordUNION' => 'ignore', # some other production rules have to be ignored or replaced - 'fetch_argsFORWARDopt_from_incursor_name' => 'ignore', - 'fetch_argsBACKWARDopt_from_incursor_name' => 'ignore', + 'fetch_argsFORWARDopt_from_incursor_name' => 'ignore', + 'fetch_argsBACKWARDopt_from_incursor_name' => 'ignore', "opt_array_boundsopt_array_bounds'['Iconst']'" => 'ignore', 'VariableShowStmtSHOWvar_name' => 'SHOW var_name ecpg_into', 'VariableShowStmtSHOWTIMEZONE' => 'SHOW TIME ZONE ecpg_into', @@ -139,7 +139,7 @@ dump_buffer('tokens'); dump_buffer('types'); dump_buffer('ecpgtype'); dump_buffer('orig_tokens'); -print $outfh '%%', "\n"; +print $outfh '%%', "\n"; print $outfh 'prog: statements;', "\n"; dump_buffer('rules'); include_file('trailer', 'ecpg.trailer'); @@ -177,7 +177,7 @@ sub main if (/^%%/) { $tokenmode = 2; - $copymode = 1; + $copymode = 1; $yaccmode++; $infield = 0; } @@ -212,14 +212,14 @@ sub main } elsif ($arr[0] eq '%type' && $header_included == 0) { - include_file('header', 'ecpg.header'); + include_file('header', 'ecpg.header'); include_file('ecpgtype', 'ecpg.type'); $header_included = 1; } if ($tokenmode == 1) { - my $str = ''; + my $str = ''; my $prior = ''; for my $a (@arr) { @@ -320,9 +320,9 @@ sub main { $copymode = 1; } - @fields = (); + @fields = (); $infield = 0; - $line = ''; + $line = ''; $in_rule = 0; next; } @@ -365,7 +365,7 @@ sub main elsif ($replace_types{$non_term_id} eq 'ignore') { $copymode = 0; - $line = ''; + $line = ''; next line; } $line = $line . ' ' . $arr[$fieldIndexer]; @@ -390,7 +390,7 @@ sub main $stmt_mode = 0; } my $tstr = - '%type ' + '%type ' . $replace_types{$non_term_id} . ' ' . $non_term_id; add_to_buffer('types', $tstr); @@ -399,8 +399,8 @@ sub main { add_to_buffer('rules', $line); } - $line = ''; - @fields = (); + $line = ''; + @fields = (); $infield = 1; die "unterminated rule at grammar line $.\n" if $in_rule; @@ -699,11 +699,11 @@ sub preload_addons { push(@{ $x->{lines} }, @code); } - @code = (); + @code = (); @needsRules = (); } - $record = {}; - $record->{type} = $2; + $record = {}; + $record->{type} = $2; $record->{lines} = []; if (exists $addons{$1}) { die "Ga! there are dups!\n"; } $addons{$1} = $record; diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c index 58119d1102..91adb89de9 100644 --- a/src/interfaces/ecpg/preproc/type.c +++ b/src/interfaces/ecpg/preproc/type.c @@ -78,7 +78,7 @@ ECPGmake_struct_member(const char *name, struct ECPGtype *type, struct ECPGstruc { struct ECPGstruct_member *ptr, *ne = - (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member)); + (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member)); ne->name = mm_strdup(name); ne->type = type; diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index d44dffdc54..30486c59ba 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -1051,9 +1051,9 @@ libpq_prng_init(PGconn *conn) gettimeofday(&tval, NULL); rseed = ((uintptr_t) conn) ^ - ((uint64) getpid()) ^ - ((uint64) tval.tv_usec) ^ - ((uint64) tval.tv_sec); + ((uint64) getpid()) ^ + ((uint64) tval.tv_usec) ^ + ((uint64) tval.tv_sec); pg_prng_seed(&conn->prng_state, rseed); } diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index a16bbf32ef..14d706efd5 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -1448,7 +1448,7 @@ PQsendQueryInternal(PGconn *conn, const char *query, bool newQuery) if (conn->pipelineStatus != PQ_PIPELINE_OFF) { libpq_append_conn_error(conn, "%s not allowed in pipeline mode", - "PQsendQuery"); + "PQsendQuery"); return 0; } @@ -1516,7 +1516,7 @@ PQsendQueryParams(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -1562,7 +1562,7 @@ PQsendPrepare(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -1656,7 +1656,7 @@ PQsendQueryPrepared(PGconn *conn, if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT) { libpq_append_conn_error(conn, "number of parameters must be between 0 and %d", - PQ_QUERY_PARAM_MAX_LIMIT); + PQ_QUERY_PARAM_MAX_LIMIT); return 0; } @@ -2103,10 +2103,9 @@ PQgetResult(PGconn *conn) /* * We're about to return the NULL that terminates the round of - * results from the current query; prepare to send the results - * of the next query, if any, when we're called next. If there's - * no next element in the command queue, this gets us in IDLE - * state. + * results from the current query; prepare to send the results of + * the next query, if any, when we're called next. If there's no + * next element in the command queue, this gets us in IDLE state. */ pqPipelineProcessQueue(conn); res = NULL; /* query is complete */ @@ -3051,6 +3050,7 @@ pqPipelineProcessQueue(PGconn *conn) return; case PGASYNC_IDLE: + /* * If we're in IDLE mode and there's some command in the queue, * get us into PIPELINE_IDLE mode and process normally. Otherwise diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c index 4cb6a46859..206266fd04 100644 --- a/src/interfaces/libpq/fe-lobj.c +++ b/src/interfaces/libpq/fe-lobj.c @@ -142,7 +142,7 @@ lo_truncate(PGconn *conn, int fd, size_t len) if (conn->lobjfuncs->fn_lo_truncate == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_truncate"); + "lo_truncate"); return -1; } @@ -205,7 +205,7 @@ lo_truncate64(PGconn *conn, int fd, pg_int64 len) if (conn->lobjfuncs->fn_lo_truncate64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_truncate64"); + "lo_truncate64"); return -1; } @@ -395,7 +395,7 @@ lo_lseek64(PGconn *conn, int fd, pg_int64 offset, int whence) if (conn->lobjfuncs->fn_lo_lseek64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_lseek64"); + "lo_lseek64"); return -1; } @@ -485,7 +485,7 @@ lo_create(PGconn *conn, Oid lobjId) if (conn->lobjfuncs->fn_lo_create == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_create"); + "lo_create"); return InvalidOid; } @@ -558,7 +558,7 @@ lo_tell64(PGconn *conn, int fd) if (conn->lobjfuncs->fn_lo_tell64 == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_tell64"); + "lo_tell64"); return -1; } @@ -667,7 +667,7 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) if (fd < 0) { /* error */ libpq_append_conn_error(conn, "could not open file \"%s\": %s", - filename, strerror_r(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -723,8 +723,8 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not read from file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -778,8 +778,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not open file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } @@ -799,8 +799,8 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) /* deliberately overwrite any error from lo_close */ pqClearConnErrorState(conn); libpq_append_conn_error(conn, "could not write to file \"%s\": %s", - filename, - strerror_r(save_errno, sebuf, sizeof(sebuf))); + filename, + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } } @@ -822,7 +822,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) if (close(fd) != 0 && result >= 0) { libpq_append_conn_error(conn, "could not write to file \"%s\": %s", - filename, strerror_r(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); result = -1; } @@ -954,56 +954,56 @@ lo_initialize(PGconn *conn) if (lobjfuncs->fn_lo_open == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_open"); + "lo_open"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_close == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_close"); + "lo_close"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_creat == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_creat"); + "lo_creat"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_unlink == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_unlink"); + "lo_unlink"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_lseek == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_lseek"); + "lo_lseek"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_tell == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lo_tell"); + "lo_tell"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_read == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "loread"); + "loread"); free(lobjfuncs); return -1; } if (lobjfuncs->fn_lo_write == 0) { libpq_append_conn_error(conn, "cannot determine OID of function %s", - "lowrite"); + "lowrite"); free(lobjfuncs); return -1; } diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c index 3653a1a8a6..660cdec93c 100644 --- a/src/interfaces/libpq/fe-misc.c +++ b/src/interfaces/libpq/fe-misc.c @@ -749,8 +749,8 @@ retry4: */ definitelyEOF: libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); /* Come here if lower-level code already set a suitable errorMessage */ definitelyFailed: @@ -1067,7 +1067,7 @@ pqSocketCheck(PGconn *conn, int forRead, int forWrite, time_t end_time) char sebuf[PG_STRERROR_R_BUFLEN]; libpq_append_conn_error(conn, "%s() failed: %s", "select", - SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); + SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); } return result; @@ -1280,7 +1280,7 @@ libpq_ngettext(const char *msgid, const char *msgid_plural, unsigned long n) * newline. */ void -libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) +libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...) { int save_errno = errno; bool done; @@ -1309,7 +1309,7 @@ libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) * format should not end with a newline. */ void -libpq_append_conn_error(PGconn *conn, const char *fmt, ...) +libpq_append_conn_error(PGconn *conn, const char *fmt,...) { int save_errno = errno; bool done; diff --git a/src/interfaces/libpq/fe-print.c b/src/interfaces/libpq/fe-print.c index bd60543c03..40620b47e9 100644 --- a/src/interfaces/libpq/fe-print.c +++ b/src/interfaces/libpq/fe-print.c @@ -124,7 +124,7 @@ PQprint(FILE *fout, const PGresult *res, const PQprintOpt *po) { int len; const char *s = (j < numFieldName && po->fieldName[j][0]) ? - po->fieldName[j] : PQfname(res, j); + po->fieldName[j] : PQfname(res, j); fieldNames[j] = s; len = s ? strlen(s) : 0; diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 698124e887..7222adabba 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -466,7 +466,7 @@ static void handleSyncLoss(PGconn *conn, char id, int msgLength) { libpq_append_conn_error(conn, "lost synchronization with server: got message type \"%c\", length %d", - id, msgLength); + id, msgLength); /* build an error result holding the error message */ pqSaveErrorResult(conn); conn->asyncStatus = PGASYNC_READY; /* drop out of PQgetResult wait loop */ diff --git a/src/interfaces/libpq/fe-secure-common.c b/src/interfaces/libpq/fe-secure-common.c index de115b3764..3ecc7bf615 100644 --- a/src/interfaces/libpq/fe-secure-common.c +++ b/src/interfaces/libpq/fe-secure-common.c @@ -226,7 +226,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn, * wrong given the subject matter. */ libpq_append_conn_error(conn, "certificate contains IP address with invalid length %zu", - iplen); + iplen); return -1; } @@ -235,7 +235,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn, if (!addrstr) { libpq_append_conn_error(conn, "could not convert certificate's IP address to string: %s", - strerror_r(errno, sebuf, sizeof(sebuf))); + strerror_r(errno, sebuf, sizeof(sebuf))); return -1; } @@ -292,7 +292,7 @@ pq_verify_peer_name_matches_certificate(PGconn *conn) else if (names_examined == 1) { libpq_append_conn_error(conn, "server certificate for \"%s\" does not match host name \"%s\"", - first_name, host); + first_name, host); } else { diff --git a/src/interfaces/libpq/fe-secure-gssapi.c b/src/interfaces/libpq/fe-secure-gssapi.c index 95ded9eeaa..3b2d0fd140 100644 --- a/src/interfaces/libpq/fe-secure-gssapi.c +++ b/src/interfaces/libpq/fe-secure-gssapi.c @@ -213,8 +213,8 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len) if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "client tried to send oversize GSSAPI packet (%zu > %zu)", - (size_t) output.length, - PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)); + (size_t) output.length, + PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)); errno = EIO; /* for lack of a better idea */ goto cleanup; } @@ -349,8 +349,8 @@ pg_GSS_read(PGconn *conn, void *ptr, size_t len) if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)", - (size_t) input.length, - PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); + (size_t) input.length, + PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); errno = EIO; /* for lack of a better idea */ return -1; } @@ -591,8 +591,8 @@ pqsecure_open_gss(PGconn *conn) if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)", - (size_t) input.length, - PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); + (size_t) input.length, + PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)); return PGRES_POLLING_FAILED; } diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 470e926540..390c888c96 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -213,12 +213,12 @@ rloop: if (result_errno == EPIPE || result_errno == ECONNRESET) libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); else libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); } else { @@ -313,12 +313,12 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len) result_errno = SOCK_ERRNO; if (result_errno == EPIPE || result_errno == ECONNRESET) libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); else libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); } else { @@ -415,7 +415,7 @@ pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len) if (algo_type == NULL) { libpq_append_conn_error(conn, "could not find digest for NID %s", - OBJ_nid2sn(algo_nid)); + OBJ_nid2sn(algo_nid)); return NULL; } break; @@ -1000,7 +1000,7 @@ initialize_SSL(PGconn *conn) if (ssl_min_ver == -1) { libpq_append_conn_error(conn, "invalid value \"%s\" for minimum SSL protocol version", - conn->ssl_min_protocol_version); + conn->ssl_min_protocol_version); SSL_CTX_free(SSL_context); return -1; } @@ -1026,7 +1026,7 @@ initialize_SSL(PGconn *conn) if (ssl_max_ver == -1) { libpq_append_conn_error(conn, "invalid value \"%s\" for maximum SSL protocol version", - conn->ssl_max_protocol_version); + conn->ssl_max_protocol_version); SSL_CTX_free(SSL_context); return -1; } @@ -1091,7 +1091,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read root certificate file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); SSL_CTX_free(SSL_context); return -1; @@ -1161,7 +1161,7 @@ initialize_SSL(PGconn *conn) else fnbuf[0] = '\0'; - if (conn->sslcertmode[0] == 'd') /* disable */ + if (conn->sslcertmode[0] == 'd') /* disable */ { /* don't send a client cert even if we have one */ have_cert = false; @@ -1181,7 +1181,7 @@ initialize_SSL(PGconn *conn) if (errno != ENOENT && errno != ENOTDIR) { libpq_append_conn_error(conn, "could not open certificate file \"%s\": %s", - fnbuf, strerror_r(errno, sebuf, sizeof(sebuf))); + fnbuf, strerror_r(errno, sebuf, sizeof(sebuf))); SSL_CTX_free(SSL_context); return -1; } @@ -1199,7 +1199,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read certificate file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); SSL_CTX_free(SSL_context); return -1; @@ -1298,7 +1298,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not load SSL engine \"%s\": %s", - engine_str, err); + engine_str, err); SSLerrfree(err); free(engine_str); return -1; @@ -1309,7 +1309,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not initialize SSL engine \"%s\": %s", - engine_str, err); + engine_str, err); SSLerrfree(err); ENGINE_free(conn->engine); conn->engine = NULL; @@ -1324,7 +1324,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not read private SSL key \"%s\" from engine \"%s\": %s", - engine_colon, engine_str, err); + engine_colon, engine_str, err); SSLerrfree(err); ENGINE_finish(conn->engine); ENGINE_free(conn->engine); @@ -1337,7 +1337,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "could not load private SSL key \"%s\" from engine \"%s\": %s", - engine_colon, engine_str, err); + engine_colon, engine_str, err); SSLerrfree(err); ENGINE_finish(conn->engine); ENGINE_free(conn->engine); @@ -1374,10 +1374,10 @@ initialize_SSL(PGconn *conn) { if (errno == ENOENT) libpq_append_conn_error(conn, "certificate present, but not private key file \"%s\"", - fnbuf); + fnbuf); else libpq_append_conn_error(conn, "could not stat private key file \"%s\": %m", - fnbuf); + fnbuf); return -1; } @@ -1385,7 +1385,7 @@ initialize_SSL(PGconn *conn) if (!S_ISREG(buf.st_mode)) { libpq_append_conn_error(conn, "private key file \"%s\" is not a regular file", - fnbuf); + fnbuf); return -1; } @@ -1442,7 +1442,7 @@ initialize_SSL(PGconn *conn) if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1) { libpq_append_conn_error(conn, "could not load private key file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); return -1; } @@ -1458,7 +1458,7 @@ initialize_SSL(PGconn *conn) char *err = SSLerrmessage(ERR_get_error()); libpq_append_conn_error(conn, "certificate does not match private key file \"%s\": %s", - fnbuf, err); + fnbuf, err); SSLerrfree(err); return -1; } @@ -1520,8 +1520,8 @@ open_client_SSL(PGconn *conn) * it means that verification failed due to a missing * system CA pool without it being a protocol error. We * inspect the sslrootcert setting to ensure that the user - * was using the system CA pool. For other errors, log them - * using the normal SYSCALL logging. + * was using the system CA pool. For other errors, log + * them using the normal SYSCALL logging. */ if (!save_errno && vcode == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY && strcmp(conn->sslrootcert, "system") == 0) @@ -1529,7 +1529,7 @@ open_client_SSL(PGconn *conn) X509_verify_cert_error_string(vcode)); else if (r == -1) libpq_append_conn_error(conn, "SSL SYSCALL error: %s", - SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf))); + SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf))); else libpq_append_conn_error(conn, "SSL SYSCALL error: EOF detected"); pgtls_close(conn); @@ -1571,12 +1571,12 @@ open_client_SSL(PGconn *conn) case SSL_R_VERSION_TOO_LOW: #endif libpq_append_conn_error(conn, "This may indicate that the server does not support any SSL protocol version between %s and %s.", - conn->ssl_min_protocol_version ? - conn->ssl_min_protocol_version : - MIN_OPENSSL_TLS_VERSION, - conn->ssl_max_protocol_version ? - conn->ssl_max_protocol_version : - MAX_OPENSSL_TLS_VERSION); + conn->ssl_min_protocol_version ? + conn->ssl_min_protocol_version : + MIN_OPENSSL_TLS_VERSION, + conn->ssl_max_protocol_version ? + conn->ssl_max_protocol_version : + MAX_OPENSSL_TLS_VERSION); break; default: break; diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c index 66e401bf3d..8069e38142 100644 --- a/src/interfaces/libpq/fe-secure.c +++ b/src/interfaces/libpq/fe-secure.c @@ -255,14 +255,14 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len) case EPIPE: case ECONNRESET: libpq_append_conn_error(conn, "server closed the connection unexpectedly\n" - "\tThis probably means the server terminated abnormally\n" - "\tbefore or while processing the request."); + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request."); break; default: libpq_append_conn_error(conn, "could not receive data from server: %s", - SOCK_STRERROR(result_errno, - sebuf, sizeof(sebuf))); + SOCK_STRERROR(result_errno, + sebuf, sizeof(sebuf))); break; } } diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 3c9f589278..e985b57cb5 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -919,8 +919,8 @@ extern char *libpq_ngettext(const char *msgid, const char *msgid_plural, unsigne */ #undef _ -extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) pg_attribute_printf(2, 3); -extern void libpq_append_conn_error(PGconn *conn, const char *fmt, ...) pg_attribute_printf(2, 3); +extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...) pg_attribute_printf(2, 3); +extern void libpq_append_conn_error(PGconn *conn, const char *fmt,...) pg_attribute_printf(2, 3); /* * These macros are needed to let error-handling code be portable between diff --git a/src/interfaces/libpq/t/001_uri.pl b/src/interfaces/libpq/t/001_uri.pl index cd659bc1b0..fd062a95c5 100644 --- a/src/interfaces/libpq/t/001_uri.pl +++ b/src/interfaces/libpq/t/001_uri.pl @@ -40,13 +40,13 @@ my @tests = ( q{user='uri-user' host='host' (inet)}, q{}, ], - [ q{postgresql://uri-user@}, q{user='uri-user' (local)}, q{}, ], + [ q{postgresql://uri-user@}, q{user='uri-user' (local)}, q{}, ], [ q{postgresql://host:12345/}, q{host='host' port='12345' (inet)}, q{}, ], - [ q{postgresql://host:12345}, q{host='host' port='12345' (inet)}, q{}, ], - [ q{postgresql://host/db}, q{dbname='db' host='host' (inet)}, q{}, ], - [ q{postgresql://host/}, q{host='host' (inet)}, q{}, ], - [ q{postgresql://host}, q{host='host' (inet)}, q{}, ], - [ q{postgresql://}, q{(local)}, q{}, ], + [ q{postgresql://host:12345}, q{host='host' port='12345' (inet)}, q{}, ], + [ q{postgresql://host/db}, q{dbname='db' host='host' (inet)}, q{}, ], + [ q{postgresql://host/}, q{host='host' (inet)}, q{}, ], + [ q{postgresql://host}, q{host='host' (inet)}, q{}, ], + [ q{postgresql://}, q{(local)}, q{}, ], [ q{postgresql://?hostaddr=127.0.0.1}, q{hostaddr='127.0.0.1' (inet)}, q{}, @@ -101,10 +101,10 @@ my @tests = ( q{postgresql://[200z:db8::1234]/}, q{host='200z:db8::1234' (inet)}, q{}, ], - [ q{postgresql://[::1]}, q{host='::1' (inet)}, q{}, ], - [ q{postgres://}, q{(local)}, q{}, ], - [ q{postgres:///}, q{(local)}, q{}, ], - [ q{postgres:///db}, q{dbname='db' (local)}, q{}, ], + [ q{postgresql://[::1]}, q{host='::1' (inet)}, q{}, ], + [ q{postgres://}, q{(local)}, q{}, ], + [ q{postgres:///}, q{(local)}, q{}, ], + [ q{postgres:///db}, q{dbname='db' (local)}, q{}, ], [ q{postgres://uri-user@/db}, q{user='uri-user' dbname='db' (local)}, q{}, @@ -174,8 +174,8 @@ my @tests = ( q{postgresql://%}, q{}, q{libpq_uri_regress: invalid percent-encoded token: "%"}, ], - [ q{postgres://@host}, q{host='host' (inet)}, q{}, ], - [ q{postgres://host:/}, q{host='host' (inet)}, q{}, ], + [ q{postgres://@host}, q{host='host' (inet)}, q{}, ], + [ q{postgres://host:/}, q{host='host' (inet)}, q{}, ], [ q{postgres://:12345/}, q{port='12345' (local)}, q{}, ], [ q{postgres://otheruser@?host=/no/such/directory}, @@ -230,8 +230,7 @@ my @tests = ( [ q{postgresql://host?sslmode=verify-full}, q{host='host' (inet)}, - q{}, - PGSSLROOTCERT => "system", + q{}, PGSSLROOTCERT => "system", ]); # test to run for each of the above test definitions diff --git a/src/interfaces/libpq/t/003_load_balance_host_list.pl b/src/interfaces/libpq/t/003_load_balance_host_list.pl index 7f1bb0c5bc..21c3b8dd33 100644 --- a/src/interfaces/libpq/t/003_load_balance_host_list.pl +++ b/src/interfaces/libpq/t/003_load_balance_host_list.pl @@ -34,7 +34,8 @@ $node1->connect_fails( expected_stderr => qr/invalid load_balance_hosts value: "doesnotexist"/); # load_balance_hosts=disable should always choose the first one. -$node1->connect_ok("host=$hostlist port=$portlist load_balance_hosts=disable", +$node1->connect_ok( + "host=$hostlist port=$portlist load_balance_hosts=disable", "load_balance_hosts=disable connects to the first node", sql => "SELECT 'connect1'", log_like => [qr/statement: SELECT 'connect1'/]); @@ -42,17 +43,23 @@ $node1->connect_ok("host=$hostlist port=$portlist load_balance_hosts=disable", # Statistically the following loop with load_balance_hosts=random will almost # certainly connect at least once to each of the nodes. The chance of that not # happening is so small that it's negligible: (2/3)^50 = 1.56832855e-9 -foreach my $i (1 .. 50) { - $node1->connect_ok("host=$hostlist port=$portlist load_balance_hosts=random", +foreach my $i (1 .. 50) +{ + $node1->connect_ok( + "host=$hostlist port=$portlist load_balance_hosts=random", "repeated connections with random load balancing", sql => "SELECT 'connect2'"); } -my $node1_occurences = () = $node1->log_content() =~ /statement: SELECT 'connect2'/g; -my $node2_occurences = () = $node2->log_content() =~ /statement: SELECT 'connect2'/g; -my $node3_occurences = () = $node3->log_content() =~ /statement: SELECT 'connect2'/g; +my $node1_occurences = () = + $node1->log_content() =~ /statement: SELECT 'connect2'/g; +my $node2_occurences = () = + $node2->log_content() =~ /statement: SELECT 'connect2'/g; +my $node3_occurences = () = + $node3->log_content() =~ /statement: SELECT 'connect2'/g; -my $total_occurences = $node1_occurences + $node2_occurences + $node3_occurences; +my $total_occurences = + $node1_occurences + $node2_occurences + $node3_occurences; ok($node1_occurences > 1, "received at least one connection on node1"); ok($node2_occurences > 1, "received at least one connection on node2"); @@ -64,15 +71,18 @@ $node2->stop(); # load_balance_hosts=disable should continue trying hosts until it finds a # working one. -$node3->connect_ok("host=$hostlist port=$portlist load_balance_hosts=disable", +$node3->connect_ok( + "host=$hostlist port=$portlist load_balance_hosts=disable", "load_balance_hosts=disable continues until it connects to the a working node", sql => "SELECT 'connect3'", log_like => [qr/statement: SELECT 'connect3'/]); # Also with load_balance_hosts=random we continue to the next nodes if previous # ones are down. Connect a few times to make sure it's not just lucky. -foreach my $i (1 .. 5) { - $node3->connect_ok("host=$hostlist port=$portlist load_balance_hosts=random", +foreach my $i (1 .. 5) +{ + $node3->connect_ok( + "host=$hostlist port=$portlist load_balance_hosts=random", "load_balance_hosts=random continues until it connects to the a working node", sql => "SELECT 'connect4'", log_like => [qr/statement: SELECT 'connect4'/]); diff --git a/src/interfaces/libpq/t/004_load_balance_dns.pl b/src/interfaces/libpq/t/004_load_balance_dns.pl index c66ee2461a..875070e212 100644 --- a/src/interfaces/libpq/t/004_load_balance_dns.pl +++ b/src/interfaces/libpq/t/004_load_balance_dns.pl @@ -34,15 +34,18 @@ if ($ENV{PG_TEST_EXTRA} !~ /\bload_balance\b/) # load balancing method is tested. # Cluster setup which is shared for testing both load balancing methods -my $can_bind_to_127_0_0_2 = $Config{osname} eq 'linux' || $PostgreSQL::Test::Utils::windows_os; +my $can_bind_to_127_0_0_2 = + $Config{osname} eq 'linux' || $PostgreSQL::Test::Utils::windows_os; # Checks for the requirements for testing load balancing method 2 -if (!$can_bind_to_127_0_0_2) { +if (!$can_bind_to_127_0_0_2) +{ plan skip_all => 'load_balance test only supported on Linux and Windows'; } my $hosts_path; -if ($windows_os) { +if ($windows_os) +{ $hosts_path = 'c:\Windows\System32\Drivers\etc\hosts'; } else @@ -52,18 +55,22 @@ else my $hosts_content = PostgreSQL::Test::Utils::slurp_file($hosts_path); -my $hosts_count = () = $hosts_content =~ /127\.0\.0\.[1-3] pg-loadbalancetest/g; -if ($hosts_count != 3) { +my $hosts_count = () = + $hosts_content =~ /127\.0\.0\.[1-3] pg-loadbalancetest/g; +if ($hosts_count != 3) +{ # Host file is not prepared for this test - plan skip_all => "hosts file was not prepared for DNS load balance test" + plan skip_all => "hosts file was not prepared for DNS load balance test"; } $PostgreSQL::Test::Cluster::use_tcp = 1; $PostgreSQL::Test::Cluster::test_pghost = '127.0.0.1'; my $port = PostgreSQL::Test::Cluster::get_free_port(); my $node1 = PostgreSQL::Test::Cluster->new('node1', port => $port); -my $node2 = PostgreSQL::Test::Cluster->new('node2', port => $port, own_host => 1); -my $node3 = PostgreSQL::Test::Cluster->new('node3', port => $port, own_host => 1); +my $node2 = + PostgreSQL::Test::Cluster->new('node2', port => $port, own_host => 1); +my $node3 = + PostgreSQL::Test::Cluster->new('node3', port => $port, own_host => 1); # Create a data directory with initdb $node1->init(); @@ -76,7 +83,8 @@ $node2->start(); $node3->start(); # load_balance_hosts=disable should always choose the first one. -$node1->connect_ok("host=pg-loadbalancetest port=$port load_balance_hosts=disable", +$node1->connect_ok( + "host=pg-loadbalancetest port=$port load_balance_hosts=disable", "load_balance_hosts=disable connects to the first node", sql => "SELECT 'connect1'", log_like => [qr/statement: SELECT 'connect1'/]); @@ -85,17 +93,23 @@ $node1->connect_ok("host=pg-loadbalancetest port=$port load_balance_hosts=disabl # Statistically the following loop with load_balance_hosts=random will almost # certainly connect at least once to each of the nodes. The chance of that not # happening is so small that it's negligible: (2/3)^50 = 1.56832855e-9 -foreach my $i (1 .. 50) { - $node1->connect_ok("host=pg-loadbalancetest port=$port load_balance_hosts=random", +foreach my $i (1 .. 50) +{ + $node1->connect_ok( + "host=pg-loadbalancetest port=$port load_balance_hosts=random", "repeated connections with random load balancing", sql => "SELECT 'connect2'"); } -my $node1_occurences = () = $node1->log_content() =~ /statement: SELECT 'connect2'/g; -my $node2_occurences = () = $node2->log_content() =~ /statement: SELECT 'connect2'/g; -my $node3_occurences = () = $node3->log_content() =~ /statement: SELECT 'connect2'/g; +my $node1_occurences = () = + $node1->log_content() =~ /statement: SELECT 'connect2'/g; +my $node2_occurences = () = + $node2->log_content() =~ /statement: SELECT 'connect2'/g; +my $node3_occurences = () = + $node3->log_content() =~ /statement: SELECT 'connect2'/g; -my $total_occurences = $node1_occurences + $node2_occurences + $node3_occurences; +my $total_occurences = + $node1_occurences + $node2_occurences + $node3_occurences; ok($node1_occurences > 1, "received at least one connection on node1"); ok($node2_occurences > 1, "received at least one connection on node2"); @@ -107,15 +121,18 @@ $node2->stop(); # load_balance_hosts=disable should continue trying hosts until it finds a # working one. -$node3->connect_ok("host=pg-loadbalancetest port=$port load_balance_hosts=disable", +$node3->connect_ok( + "host=pg-loadbalancetest port=$port load_balance_hosts=disable", "load_balance_hosts=disable continues until it connects to the a working node", sql => "SELECT 'connect3'", log_like => [qr/statement: SELECT 'connect3'/]); # Also with load_balance_hosts=random we continue to the next nodes if previous # ones are down. Connect a few times to make sure it's not just lucky. -foreach my $i (1 .. 5) { - $node3->connect_ok("host=pg-loadbalancetest port=$port load_balance_hosts=random", +foreach my $i (1 .. 5) +{ + $node3->connect_ok( + "host=pg-loadbalancetest port=$port load_balance_hosts=random", "load_balance_hosts=random continues until it connects to the a working node", sql => "SELECT 'connect4'", log_like => [qr/statement: SELECT 'connect4'/]); diff --git a/src/pl/plperl/plc_perlboot.pl b/src/pl/plperl/plc_perlboot.pl index 8106c2ce5a..13298013d3 100644 --- a/src/pl/plperl/plc_perlboot.pl +++ b/src/pl/plperl/plc_perlboot.pl @@ -110,7 +110,7 @@ sub ::encode_array_constructor use warnings; use overload - '""' => \&to_str, + '""' => \&to_str, '@{}' => \&to_arr; sub to_str diff --git a/src/pl/plperl/text2macro.pl b/src/pl/plperl/text2macro.pl index 1de4afadbb..933632c0df 100644 --- a/src/pl/plperl/text2macro.pl +++ b/src/pl/plperl/text2macro.pl @@ -32,9 +32,9 @@ use warnings; use Getopt::Long; GetOptions( - 'prefix=s' => \my $opt_prefix, - 'name=s' => \my $opt_name, - 'strip=s' => \my $opt_strip, + 'prefix=s' => \my $opt_prefix, + 'name=s' => \my $opt_name, + 'strip=s' => \my $opt_strip, 'selftest!' => sub { exit selftest() },) or exit 1; die "No text files specified" @@ -80,7 +80,7 @@ exit 0; sub selftest { - my $tmp = "text2macro_tmp"; + my $tmp = "text2macro_tmp"; my $string = q{a '' '\\'' "" "\\"" "\\\\" "\\\\n" b}; open my $fh, '>', "$tmp.pl" or die; diff --git a/src/port/dirmod.c b/src/port/dirmod.c index 6557cf8785..07dd190cbc 100644 --- a/src/port/dirmod.c +++ b/src/port/dirmod.c @@ -145,10 +145,10 @@ pgunlink(const char *path) * the retry loop, but that seems like over-engineering for now. * * In the special case of a STATUS_DELETE_PENDING error (file already - * unlinked, but someone still has it open), we don't want to report ENOENT - * to the caller immediately, because rmdir(parent) would probably fail. - * We want to wait until the file truly goes away so that simple recursive - * directory unlink algorithms work. + * unlinked, but someone still has it open), we don't want to report + * ENOENT to the caller immediately, because rmdir(parent) would probably + * fail. We want to wait until the file truly goes away so that simple + * recursive directory unlink algorithms work. */ if (lstat(path, &st) < 0) { diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 0680f8b07c..12552837a8 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -23,9 +23,9 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; - my $database = shift; - my $role = shift; + my $node = shift; + my $database = shift; + my $role = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -95,7 +95,8 @@ $node->safe_psql( RESET scram_iterations;" ); -my $res = $node->safe_psql('postgres', +my $res = $node->safe_psql( + 'postgres', "SELECT substr(rolpassword,1,19) FROM pg_authid WHERE rolname = 'scram_role_iter'"); @@ -106,8 +107,8 @@ is($res, 'SCRAM-SHA-256$1024:', 'scram_iterations in server side ROLE'); # as earlier version cause the session to time out. SKIP: { - skip "IO::Pty and IPC::Run >= 0.98 required", 1 unless - eval { require IO::Pty; IPC::Run->VERSION('0.98'); }; + skip "IO::Pty and IPC::Run >= 0.98 required", 1 + unless eval { require IO::Pty; IPC::Run->VERSION('0.98'); }; # Alter the password on the created role using \password in psql to ensure # that clientside password changes use the scram_iterations value when @@ -117,16 +118,19 @@ SKIP: $session->set_query_timer_restart(); $session->query("SET password_encryption='scram-sha-256';"); $session->query("SET scram_iterations=42;"); - $session->query_until(qr/Enter new password/, "\\password scram_role_iter\n"); + $session->query_until(qr/Enter new password/, + "\\password scram_role_iter\n"); $session->query_until(qr/Enter it again/, "pass\n"); $session->query_until(qr/postgres=# /, "pass\n"); $session->quit; - $res = $node->safe_psql('postgres', + $res = $node->safe_psql( + 'postgres', "SELECT substr(rolpassword,1,17) FROM pg_authid WHERE rolname = 'scram_role_iter'"); - is($res, 'SCRAM-SHA-256$42:', 'scram_iterations in psql \password command'); + is($res, 'SCRAM-SHA-256$42:', + 'scram_iterations in psql \password command'); } # Create a database to test regular expression. @@ -482,7 +486,7 @@ chmod 0600, $pgpassfile or die; reset_pg_hba($node, 'all', 'all', 'password'); test_conn($node, 'user=scram_role', 'password from pgpass', 0); -test_conn($node, 'user=md5_role', 'password from pgpass', 2); +test_conn($node, 'user=md5_role', 'password from pgpass', 2); append_to_file( $pgpassfile, qq! diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl index c00f4e1b32..ef15831166 100644 --- a/src/test/authentication/t/002_saslprep.pl +++ b/src/test/authentication/t/002_saslprep.pl @@ -20,7 +20,7 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; + my $node = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -34,10 +34,10 @@ sub test_login { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $node = shift; - my $role = shift; - my $password = shift; - my $expected_res = shift; + my $node = shift; + my $role = shift; + my $password = shift; + my $expected_res = shift; my $status_string = 'failed'; $status_string = 'success' if ($expected_res eq 0); @@ -93,25 +93,25 @@ CREATE ROLE saslpreptest7_role LOGIN PASSWORD E'foo\\u0627\\u0031bar'; reset_pg_hba($node, 'scram-sha-256'); # Check that #1 and #5 are treated the same as just 'IX' -test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0); +test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0); test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0); # but different from lower case 'ix' test_login($node, 'saslpreptest1_role', "ix", 2); # Check #4 -test_login($node, 'saslpreptest4a_role', "a", 0); +test_login($node, 'saslpreptest4a_role', "a", 0); test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0); -test_login($node, 'saslpreptest4b_role', "a", 0); +test_login($node, 'saslpreptest4b_role', "a", 0); test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0); # Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password # contains prohibited characters, we use it as is, without normalization. test_login($node, 'saslpreptest6_role', "foo\x07bar", 0); -test_login($node, 'saslpreptest6_role', "foobar", 2); +test_login($node, 'saslpreptest6_role', "foobar", 2); test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0); test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2); -test_login($node, 'saslpreptest7_role', "foobar", 2); +test_login($node, 'saslpreptest7_role', "foobar", 2); done_testing(); diff --git a/src/test/authentication/t/003_peer.pl b/src/test/authentication/t/003_peer.pl index a6be651ea7..3272e52cae 100644 --- a/src/test/authentication/t/003_peer.pl +++ b/src/test/authentication/t/003_peer.pl @@ -20,7 +20,7 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; + my $node = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -33,10 +33,10 @@ sub reset_pg_hba # and then execute a reload to refresh it. sub reset_pg_ident { - my $node = shift; - my $map_name = shift; + my $node = shift; + my $map_name = shift; my $system_user = shift; - my $pg_user = shift; + my $pg_user = shift; unlink($node->data_dir . '/pg_ident.conf'); $node->append_conf('pg_ident.conf', "$map_name $system_user $pg_user"); diff --git a/src/test/authentication/t/004_file_inclusion.pl b/src/test/authentication/t/004_file_inclusion.pl index 8cd2a8dae4..55d28ad586 100644 --- a/src/test/authentication/t/004_file_inclusion.pl +++ b/src/test/authentication/t/004_file_inclusion.pl @@ -37,9 +37,9 @@ my %line_counters = ('hba_rule' => 0, 'ident_rule' => 0); # is loaded by the backend. sub add_hba_line { - my $node = shift; + my $node = shift; my $filename = shift; - my $entry = shift; + my $entry = shift; my $globline; my $fileline; my @tokens; @@ -64,7 +64,7 @@ sub add_hba_line $globline = ++$line_counters{'hba_rule'}; # Generate the expected pg_hba_file_rules line - @tokens = split(/ /, $entry); + @tokens = split(/ /, $entry); $tokens[1] = '{' . $tokens[1] . '}'; # database $tokens[2] = '{' . $tokens[2] . '}'; # user_name @@ -95,9 +95,9 @@ sub add_hba_line # returns an entry to match with pg_ident_file_mappings. sub add_ident_line { - my $node = shift; + my $node = shift; my $filename = shift; - my $entry = shift; + my $entry = shift; my $globline; my $fileline; my @tokens; @@ -136,7 +136,7 @@ sub add_ident_line } # Locations for the entry points of the HBA and ident files. -my $hba_file = 'subdir1/pg_hba_custom.conf'; +my $hba_file = 'subdir1/pg_hba_custom.conf'; my $ident_file = 'subdir2/pg_ident_custom.conf'; my $node = PostgreSQL::Test::Cluster->new('primary'); @@ -147,7 +147,7 @@ my $data_dir = $node->data_dir; note "Generating HBA structure with include directives"; -my $hba_expected = ''; +my $hba_expected = ''; my $ident_expected = ''; # customise main auth file names @@ -230,7 +230,7 @@ mkdir("$data_dir/ident_pos"); $ident_expected .= add_ident_line($node, "$ident_file", "include ../pg_ident_pre.conf"); $ident_expected .= add_ident_line($node, 'pg_ident_pre.conf', "pre foo bar"); -$ident_expected .= add_ident_line($node, "$ident_file", "test a b"); +$ident_expected .= add_ident_line($node, "$ident_file", "test a b"); $ident_expected .= add_ident_line($node, "$ident_file", "include ../ident_pos/pg_ident_pos.conf"); $ident_expected .= diff --git a/src/test/icu/t/010_database.pl b/src/test/icu/t/010_database.pl index 715b1bffd6..d3901f5d3f 100644 --- a/src/test/icu/t/010_database.pl +++ b/src/test/icu/t/010_database.pl @@ -54,7 +54,8 @@ b), # Test error cases in CREATE DATABASE involving locale-related options my ($ret, $stdout, $stderr) = $node1->psql('postgres', - q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' TEMPLATE template0 ENCODING UTF8}); + q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' TEMPLATE template0 ENCODING UTF8} +); isnt($ret, 0, "ICU locale must be specified for ICU provider: exit code not 0"); like( diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl index e2c928349f..39c035de32 100644 --- a/src/test/kerberos/t/001_auth.pl +++ b/src/test/kerberos/t/001_auth.pl @@ -30,26 +30,27 @@ if ($ENV{with_gssapi} ne 'yes') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bkerberos\b/) { - plan skip_all => 'Potentially unsafe test GSSAPI/Kerberos not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test GSSAPI/Kerberos not enabled in PG_TEST_EXTRA'; } my ($krb5_bin_dir, $krb5_sbin_dir); -if ($^O eq 'darwin' && -d "/opt/homebrew" ) +if ($^O eq 'darwin' && -d "/opt/homebrew") { # typical paths for Homebrew on ARM - $krb5_bin_dir = '/opt/homebrew/opt/krb5/bin'; + $krb5_bin_dir = '/opt/homebrew/opt/krb5/bin'; $krb5_sbin_dir = '/opt/homebrew/opt/krb5/sbin'; } elsif ($^O eq 'darwin') { # typical paths for Homebrew on Intel - $krb5_bin_dir = '/usr/local/opt/krb5/bin'; + $krb5_bin_dir = '/usr/local/opt/krb5/bin'; $krb5_sbin_dir = '/usr/local/opt/krb5/sbin'; } elsif ($^O eq 'freebsd') { - $krb5_bin_dir = '/usr/local/bin'; + $krb5_bin_dir = '/usr/local/bin'; $krb5_sbin_dir = '/usr/local/sbin'; } elsif ($^O eq 'linux') @@ -57,44 +58,44 @@ elsif ($^O eq 'linux') $krb5_sbin_dir = '/usr/sbin'; } -my $krb5_config = 'krb5-config'; -my $kinit = 'kinit'; -my $klist = 'klist'; -my $kdb5_util = 'kdb5_util'; +my $krb5_config = 'krb5-config'; +my $kinit = 'kinit'; +my $klist = 'klist'; +my $kdb5_util = 'kdb5_util'; my $kadmin_local = 'kadmin.local'; -my $krb5kdc = 'krb5kdc'; +my $krb5kdc = 'krb5kdc'; if ($krb5_bin_dir && -d $krb5_bin_dir) { $krb5_config = $krb5_bin_dir . '/' . $krb5_config; - $kinit = $krb5_bin_dir . '/' . $kinit; - $klist = $krb5_bin_dir . '/' . $klist; + $kinit = $krb5_bin_dir . '/' . $kinit; + $klist = $krb5_bin_dir . '/' . $klist; } if ($krb5_sbin_dir && -d $krb5_sbin_dir) { - $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; + $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; $kadmin_local = $krb5_sbin_dir . '/' . $kadmin_local; - $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; + $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; } -my $host = 'auth-test-localhost.postgresql.example.com'; +my $host = 'auth-test-localhost.postgresql.example.com'; my $hostaddr = '127.0.0.1'; -my $realm = 'EXAMPLE.COM'; +my $realm = 'EXAMPLE.COM'; -my $krb5_conf = "${PostgreSQL::Test::Utils::tmp_check}/krb5.conf"; -my $kdc_conf = "${PostgreSQL::Test::Utils::tmp_check}/kdc.conf"; -my $krb5_cache = "${PostgreSQL::Test::Utils::tmp_check}/krb5cc"; -my $krb5_log = "${PostgreSQL::Test::Utils::log_path}/krb5libs.log"; -my $kdc_log = "${PostgreSQL::Test::Utils::log_path}/krb5kdc.log"; -my $kdc_port = PostgreSQL::Test::Cluster::get_free_port(); +my $krb5_conf = "${PostgreSQL::Test::Utils::tmp_check}/krb5.conf"; +my $kdc_conf = "${PostgreSQL::Test::Utils::tmp_check}/kdc.conf"; +my $krb5_cache = "${PostgreSQL::Test::Utils::tmp_check}/krb5cc"; +my $krb5_log = "${PostgreSQL::Test::Utils::log_path}/krb5libs.log"; +my $kdc_log = "${PostgreSQL::Test::Utils::log_path}/krb5kdc.log"; +my $kdc_port = PostgreSQL::Test::Cluster::get_free_port(); my $kdc_datadir = "${PostgreSQL::Test::Utils::tmp_check}/krb5kdc"; my $kdc_pidfile = "${PostgreSQL::Test::Utils::tmp_check}/krb5kdc.pid"; -my $keytab = "${PostgreSQL::Test::Utils::tmp_check}/krb5.keytab"; +my $keytab = "${PostgreSQL::Test::Utils::tmp_check}/krb5.keytab"; -my $pgpass = "${PostgreSQL::Test::Utils::tmp_check}/.pgpass"; +my $pgpass = "${PostgreSQL::Test::Utils::tmp_check}/.pgpass"; -my $dbname = 'postgres'; -my $username = 'test1'; +my $dbname = 'postgres'; +my $username = 'test1'; my $application = '001_auth.pl'; note "setting up Kerberos"; @@ -108,10 +109,7 @@ $stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ $krb5_version = $1; # Construct a pgpass file to make sure we don't use it -append_to_file( - $pgpass, - '*:*:*:*:abc123' -); +append_to_file($pgpass, '*:*:*:*:abc123'); chmod 0600, $pgpass; @@ -187,9 +185,9 @@ $realm = { mkdir $kdc_datadir or die; # Ensure that we use test's config and cache files, not global ones. -$ENV{'KRB5_CONFIG'} = $krb5_conf; +$ENV{'KRB5_CONFIG'} = $krb5_conf; $ENV{'KRB5_KDC_PROFILE'} = $kdc_conf; -$ENV{'KRB5CCNAME'} = $krb5_cache; +$ENV{'KRB5CCNAME'} = $krb5_cache; my $service_principal = "$ENV{with_krb_srvnam}/$host"; @@ -224,24 +222,35 @@ $node->start; my $port = $node->port(); $node->safe_psql('postgres', 'CREATE USER test1;'); -$node->safe_psql('postgres', "CREATE USER test2 WITH ENCRYPTED PASSWORD 'abc123';"); +$node->safe_psql('postgres', + "CREATE USER test2 WITH ENCRYPTED PASSWORD 'abc123';"); $node->safe_psql('postgres', 'CREATE EXTENSION postgres_fdw;'); $node->safe_psql('postgres', 'CREATE EXTENSION dblink;'); -$node->safe_psql('postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host '$host', hostaddr '$hostaddr', port '$port', dbname 'postgres');"); -$node->safe_psql('postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (port '$port', dbname 'postgres', passfile '$pgpass');"); +$node->safe_psql('postgres', + "CREATE SERVER s1 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host '$host', hostaddr '$hostaddr', port '$port', dbname 'postgres');" +); +$node->safe_psql('postgres', + "CREATE SERVER s2 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (port '$port', dbname 'postgres', passfile '$pgpass');" +); $node->safe_psql('postgres', 'GRANT USAGE ON FOREIGN SERVER s1 TO test1;'); -$node->safe_psql('postgres', "CREATE USER MAPPING FOR test1 SERVER s1 OPTIONS (user 'test1');"); -$node->safe_psql('postgres', "CREATE USER MAPPING FOR test1 SERVER s2 OPTIONS (user 'test2');"); +$node->safe_psql('postgres', + "CREATE USER MAPPING FOR test1 SERVER s1 OPTIONS (user 'test1');"); +$node->safe_psql('postgres', + "CREATE USER MAPPING FOR test1 SERVER s2 OPTIONS (user 'test2');"); $node->safe_psql('postgres', "CREATE TABLE t1 (c1 int);"); $node->safe_psql('postgres', "INSERT INTO t1 VALUES (1);"); -$node->safe_psql('postgres', "CREATE FOREIGN TABLE tf1 (c1 int) SERVER s1 OPTIONS (schema_name 'public', table_name 't1');"); +$node->safe_psql('postgres', + "CREATE FOREIGN TABLE tf1 (c1 int) SERVER s1 OPTIONS (schema_name 'public', table_name 't1');" +); $node->safe_psql('postgres', "GRANT SELECT ON t1 TO test1;"); $node->safe_psql('postgres', "GRANT SELECT ON tf1 TO test1;"); -$node->safe_psql('postgres', "CREATE FOREIGN TABLE tf2 (c1 int) SERVER s2 OPTIONS (schema_name 'public', table_name 't1');"); +$node->safe_psql('postgres', + "CREATE FOREIGN TABLE tf2 (c1 int) SERVER s2 OPTIONS (schema_name 'public', table_name 't1');" +); $node->safe_psql('postgres', "GRANT SELECT ON tf2 TO test1;"); # Set up a table for SYSTEM_USER parallel worker testing. @@ -302,13 +311,14 @@ sub test_query $node->connect_ok( $connstr, $test_name, - sql => $query, + sql => $query, expected_stdout => $expected); return; } unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss map=mymap @@ -453,7 +463,8 @@ test_query( 'testing system_user with parallel workers'); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 hostgssenc all all $hostaddr/32 gss map=mymap @@ -485,8 +496,7 @@ test_access( "connection authorized: user=$username database=$dbname application_name=$application GSS (authenticated=yes, encrypted=yes, deleg_credentials=no, principal=test1\@$realm)" ); -$node->append_conf('postgresql.conf', - qq{gss_accept_deleg=off}); +$node->append_conf('postgresql.conf', qq{gss_accept_deleg=off}); $node->restart; test_access( @@ -510,8 +520,7 @@ test_access( "connection authorized: user=$username database=$dbname application_name=$application GSS (authenticated=yes, encrypted=yes, deleg_credentials=no, principal=test1\@$realm)" ); -$node->append_conf('postgresql.conf', - qq{gss_accept_deleg=on}); +$node->append_conf('postgresql.conf', qq{gss_accept_deleg=on}); $node->restart; test_access( @@ -560,57 +569,77 @@ my $psql_stderr = ''; my $psql_rc = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test1 dbname=$dbname host=$host hostaddr=$hostaddr port=$port','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink attempt fails without delegated credentials'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work without delegated credentials'); -like($psql_out, qr/^$/,'dblink does not work without delegated credentials'); + stderr => \$psql_stderr); +is($psql_rc, '3', 'dblink attempt fails without delegated credentials'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work without delegated credentials'); +like($psql_out, qr/^$/, 'dblink does not work without delegated credentials'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test2 dbname=$dbname port=$port passfile=$pgpass','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink does not work without delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work without delegated credentials and with passfile'); -like($psql_out, qr/^$/,'dblink does not work without delegated credentials and with passfile'); + stderr => \$psql_stderr); +is($psql_rc, '3', + 'dblink does not work without delegated credentials and with passfile'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work without delegated credentials and with passfile'); +like($psql_out, qr/^$/, + 'dblink does not work without delegated credentials and with passfile'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf1;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','postgres_fdw does not work without delegated credentials'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work without delegated credentials'); -like($psql_out, qr/^$/,'postgres_fdw does not work without delegated credentials'); + stderr => \$psql_stderr); +is($psql_rc, '3', 'postgres_fdw does not work without delegated credentials'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work without delegated credentials'); +like($psql_out, qr/^$/, + 'postgres_fdw does not work without delegated credentials'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf2;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr + stderr => \$psql_stderr); +is($psql_rc, '3', + 'postgres_fdw does not work without delegated credentials and with passfile' +); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work without delegated credentials and with passfile' +); +like($psql_out, qr/^$/, + 'postgres_fdw does not work without delegated credentials and with passfile' ); -is($psql_rc,'3','postgres_fdw does not work without delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work without delegated credentials and with passfile'); -like($psql_out, qr/^$/,'postgres_fdw does not work without delegated credentials and with passfile'); test_access($node, 'test1', 'SELECT true', 2, 'gssencmode=disable', 'fails with GSS encryption disabled and hostgssenc hba'); @@ -626,7 +655,8 @@ $node->connect_ok( "multiple authentication types requested, works with GSS encryption"); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 hostnogssenc all all $hostaddr/32 gss map=mymap @@ -662,7 +692,8 @@ test_query( "SELECT * FROM dblink('user=test1 dbname=$dbname host=$host hostaddr=$hostaddr port=$port','select 1') as t1(c1 int);", qr/^1$/s, 'gssencmode=prefer gssdeleg=enable', - 'dblink works not-encrypted (server not configured to accept encrypted GSSAPI connections)'); + 'dblink works not-encrypted (server not configured to accept encrypted GSSAPI connections)' +); test_query( $node, @@ -670,39 +701,54 @@ test_query( "TABLE tf1;", qr/^1$/s, 'gssencmode=prefer gssdeleg=enable', - 'postgres_fdw works not-encrypted (server not configured to accept encrypted GSSAPI connections)'); + 'postgres_fdw works not-encrypted (server not configured to accept encrypted GSSAPI connections)' +); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test2 dbname=$dbname port=$port passfile=$pgpass','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink does not work with delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work with delegated credentials and with passfile'); -like($psql_out, qr/^$/,'dblink does not work with delegated credentials and with passfile'); + stderr => \$psql_stderr); +is($psql_rc, '3', + 'dblink does not work with delegated credentials and with passfile'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work with delegated credentials and with passfile'); +like($psql_out, qr/^$/, + 'dblink does not work with delegated credentials and with passfile'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf2;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", stdout => \$psql_out, - stderr => \$psql_stderr + stderr => \$psql_stderr); +is($psql_rc, '3', + 'postgres_fdw does not work with delegated credentials and with passfile' +); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work with delegated credentials and with passfile' +); +like($psql_out, qr/^$/, + 'postgres_fdw does not work with delegated credentials and with passfile' ); -is($psql_rc,'3','postgres_fdw does not work with delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work with delegated credentials and with passfile'); -like($psql_out, qr/^$/,'postgres_fdw does not work with delegated credentials and with passfile'); truncate($node->data_dir . '/pg_ident.conf', 0); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss include_realm=0 @@ -729,17 +775,15 @@ test_query( 'dblink works encrypted'); test_query( - $node, - 'test1', - "TABLE tf1;", - qr/^1$/s, + $node, 'test1', "TABLE tf1;", qr/^1$/s, 'gssencmode=require gssdeleg=enable', 'postgres_fdw works encrypted'); # Reset pg_hba.conf, and cause a usermap failure with an authentication # that has passed. unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss include_realm=0 krb_realm=EXAMPLE.ORG diff --git a/src/test/ldap/LdapServer.pm b/src/test/ldap/LdapServer.pm index 3cc05b8255..a4c1a1843c 100644 --- a/src/test/ldap/LdapServer.pm +++ b/src/test/ldap/LdapServer.pm @@ -66,36 +66,36 @@ INIT if ($^O eq 'darwin' && -d '/opt/homebrew/opt/openldap') { # typical paths for Homebrew on ARM - $slapd = '/opt/homebrew/opt/openldap/libexec/slapd'; + $slapd = '/opt/homebrew/opt/openldap/libexec/slapd'; $ldap_schema_dir = '/opt/homebrew/etc/openldap/schema'; } elsif ($^O eq 'darwin' && -d '/usr/local/opt/openldap') { # typical paths for Homebrew on Intel - $slapd = '/usr/local/opt/openldap/libexec/slapd'; + $slapd = '/usr/local/opt/openldap/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } elsif ($^O eq 'darwin' && -d '/opt/local/etc/openldap') { # typical paths for MacPorts - $slapd = '/opt/local/libexec/slapd'; + $slapd = '/opt/local/libexec/slapd'; $ldap_schema_dir = '/opt/local/etc/openldap/schema'; } elsif ($^O eq 'linux') { - $slapd = '/usr/sbin/slapd'; + $slapd = '/usr/sbin/slapd'; $ldap_schema_dir = '/etc/ldap/schema' if -d '/etc/ldap/schema'; $ldap_schema_dir = '/etc/openldap/schema' if -d '/etc/openldap/schema'; } elsif ($^O eq 'freebsd') { - $slapd = '/usr/local/libexec/slapd'; + $slapd = '/usr/local/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } elsif ($^O eq 'openbsd') { - $slapd = '/usr/local/libexec/slapd'; + $slapd = '/usr/local/libexec/slapd'; $ldap_schema_dir = '/usr/local/share/examples/openldap/schema'; } else @@ -137,25 +137,25 @@ sub new { die "no suitable binaries found" unless $setup; - my $class = shift; - my $rootpw = shift; - my $authtype = shift; # 'users' or 'anonymous' + my $class = shift; + my $rootpw = shift; + my $authtype = shift; # 'users' or 'anonymous' my $testname = basename((caller)[1], '.pl'); - my $self = {}; + my $self = {}; my $test_temp = PostgreSQL::Test::Utils::tempdir("ldap-$testname"); - my $ldap_datadir = "$test_temp/openldap-data"; - my $slapd_certs = "$test_temp/slapd-certs"; + my $ldap_datadir = "$test_temp/openldap-data"; + my $slapd_certs = "$test_temp/slapd-certs"; my $slapd_pidfile = "$test_temp/slapd.pid"; - my $slapd_conf = "$test_temp/slapd.conf"; + my $slapd_conf = "$test_temp/slapd.conf"; my $slapd_logfile = "${PostgreSQL::Test::Utils::log_path}/slapd-$testname.log"; my $ldap_server = 'localhost'; - my $ldap_port = PostgreSQL::Test::Cluster::get_free_port(); - my $ldaps_port = PostgreSQL::Test::Cluster::get_free_port(); - my $ldap_url = "ldap://$ldap_server:$ldap_port"; - my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; + my $ldap_port = PostgreSQL::Test::Cluster::get_free_port(); + my $ldaps_port = PostgreSQL::Test::Cluster::get_free_port(); + my $ldap_url = "ldap://$ldap_server:$ldap_port"; + my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; my $ldap_basedn = 'dc=example,dc=net'; my $ldap_rootdn = 'cn=Manager,dc=example,dc=net'; my $ldap_rootpw = $rootpw; @@ -188,7 +188,7 @@ EOC append_to_file($slapd_conf, $conf); mkdir $ldap_datadir or die "making $ldap_datadir: $!"; - mkdir $slapd_certs or die "making $slapd_certs: $!"; + mkdir $slapd_certs or die "making $slapd_certs: $!"; my $certdir = dirname(__FILE__) . "/../ssl/ssl"; @@ -205,7 +205,8 @@ EOC chmod 0600, $ldap_pwfile or die "chmod on $ldap_pwfile"; # -s0 prevents log messages ending up in syslog - system_or_bail $slapd, '-f', $slapd_conf, '-s0', '-h', "$ldap_url $ldaps_url"; + system_or_bail $slapd, '-f', $slapd_conf, '-s0', '-h', + "$ldap_url $ldaps_url"; # wait until slapd accepts requests my $retries = 0; @@ -215,25 +216,25 @@ EOC if ( system_log( "ldapsearch", "-sbase", - "-H", $ldap_url, - "-b", $ldap_basedn, - "-D", $ldap_rootdn, - "-y", $ldap_pwfile, - "-n", "'objectclass=*'") == 0); + "-H", $ldap_url, + "-b", $ldap_basedn, + "-D", $ldap_rootdn, + "-y", $ldap_pwfile, + "-n", "'objectclass=*'") == 0); die "cannot connect to slapd" if ++$retries >= 300; note "waiting for slapd to accept requests..."; Time::HiRes::usleep(1000000); } $self->{pidfile} = $slapd_pidfile; - $self->{pwfile} = $ldap_pwfile; - $self->{url} = $ldap_url; - $self->{s_url} = $ldaps_url; - $self->{server} = $ldap_server; - $self->{port} = $ldap_port; - $self->{s_port} = $ldaps_port; - $self->{basedn} = $ldap_basedn; - $self->{rootdn} = $ldap_rootdn; + $self->{pwfile} = $ldap_pwfile; + $self->{url} = $ldap_url; + $self->{s_url} = $ldaps_url; + $self->{server} = $ldap_server; + $self->{port} = $ldap_port; + $self->{s_port} = $ldaps_port; + $self->{basedn} = $ldap_basedn; + $self->{rootdn} = $ldap_rootdn; bless $self, $class; push @servers, $self; @@ -244,8 +245,8 @@ EOC sub _ldapenv { my $self = shift; - my %env = %ENV; - $env{'LDAPURI'} = $self->{url}; + my %env = %ENV; + $env{'LDAPURI'} = $self->{url}; $env{'LDAPBINDDN'} = $self->{rootdn}; return %env; } @@ -287,8 +288,8 @@ Set the user's password in the LDAP server sub ldapsetpw { - my $self = shift; - my $user = shift; + my $self = shift; + my $user = shift; my $password = shift; local %ENV = $self->_ldapenv; diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl index 1e027ced01..3e113fd6eb 100644 --- a/src/test/ldap/t/001_auth.pl +++ b/src/test/ldap/t/001_auth.pl @@ -37,8 +37,8 @@ $ldap->ldapadd_file('authdata.ldif'); $ldap->ldapsetpw('uid=test1,dc=example,dc=net', 'secret1'); $ldap->ldapsetpw('uid=test2,dc=example,dc=net', 'secret2'); -my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url, - $ldaps_url, $ldap_basedn, $ldap_rootdn +my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url, + $ldaps_url, $ldap_basedn, $ldap_rootdn ) = $ldap->prop(qw(server port s_port url s_url basedn rootdn)); # don't bother to check the server's cert (though perhaps we should) diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl index 8a3a6b40e6..59cc2b1244 100644 --- a/src/test/modules/commit_ts/t/002_standby.pl +++ b/src/test/modules/commit_ts/t/002_standby.pl @@ -11,7 +11,7 @@ use Test::More; use PostgreSQL::Test::Cluster; my $bkplabel = 'backup'; -my $primary = PostgreSQL::Test::Cluster->new('primary'); +my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->append_conf( diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl index f750a8896a..5af511e369 100644 --- a/src/test/modules/commit_ts/t/003_standby_2.pl +++ b/src/test/modules/commit_ts/t/003_standby_2.pl @@ -11,7 +11,7 @@ use Test::More; use PostgreSQL::Test::Cluster; my $bkplabel = 'backup'; -my $primary = PostgreSQL::Test::Cluster->new('primary'); +my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl index 20865217d9..8fe4bedb14 100644 --- a/src/test/modules/commit_ts/t/004_restart.pl +++ b/src/test/modules/commit_ts/t/004_restart.pl @@ -25,12 +25,12 @@ like( ($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]); -is($ret, 0, 'getting ts of BootstrapTransactionId succeeds'); +is($ret, 0, 'getting ts of BootstrapTransactionId succeeds'); is($stdout, '', 'timestamp of BootstrapTransactionId is null'); ($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]); -is($ret, 0, 'getting ts of FrozenTransactionId succeeds'); +is($ret, 0, 'getting ts of FrozenTransactionId succeeds'); is($stdout, '', 'timestamp of FrozenTransactionId is null'); # Since FirstNormalTransactionId will've occurred during initdb, long before we diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl index 4174292d2d..c96c8d7a4d 100644 --- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl +++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl @@ -34,7 +34,7 @@ elsif (!$LdapServer::setup) my $clear_ldap_rootpw = "FooBaR1"; my $rot13_ldap_rootpw = "SbbOnE1"; -my $ldap = LdapServer->new($clear_ldap_rootpw, 'users'); # no anonymous auth +my $ldap = LdapServer->new($clear_ldap_rootpw, 'users'); # no anonymous auth $ldap->ldapadd_file("$FindBin::RealBin/../../../ldap/authdata.ldif"); $ldap->ldapsetpw('uid=test1,dc=example,dc=net', 'secret1'); @@ -47,7 +47,8 @@ note "setting up PostgreSQL instance"; my $node = PostgreSQL::Test::Cluster->new('node'); $node->init; $node->append_conf('postgresql.conf', "log_connections = on\n"); -$node->append_conf('postgresql.conf', "shared_preload_libraries = 'ldap_password_func'"); +$node->append_conf('postgresql.conf', + "shared_preload_libraries = 'ldap_password_func'"); $node->start; $node->safe_psql('postgres', 'CREATE USER test1;'); @@ -82,7 +83,8 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 2, 'search+bind authentication fails with wrong ldapbindpasswd'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with wrong ldapbindpasswd'); unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', @@ -90,7 +92,8 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 2, 'search+bind authentication fails with clear password'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with clear password'); unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', @@ -98,6 +101,7 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 0, 'search+bind authentication succeeds with rot13ed password'); +test_access($node, 'test1', 0, + 'search+bind authentication succeeds with rot13ed password'); done_testing(); diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index f48da7d963..f5b4d4d1ff 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -985,7 +985,7 @@ test_prepared(PGconn *conn) static void notice_processor(void *arg, const char *message) { - int *n_notices = (int *) arg; + int *n_notices = (int *) arg; (*n_notices)++; fprintf(stderr, "NOTICE %d: %s", *n_notices, message); diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl index 7560439fec..056fa5c6d2 100644 --- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl +++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl @@ -53,7 +53,7 @@ for my $testname (@tests) $node->command_ok( [ 'libpq_pipeline', @extraargs, - $testname, $node->connstr('postgres') + $testname, $node->connstr('postgres') ], "libpq_pipeline $testname"); diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl index 378d7b4fc7..2b2c144ee2 100644 --- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl +++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl @@ -69,7 +69,7 @@ my $ret = # with a bad passphrase the server should not start -ok($ret, "pg_ctl fails with bad passphrase"); +ok($ret, "pg_ctl fails with bad passphrase"); ok(!-e "$ddir/postmaster.pid", "postgres not started with bad passphrase"); # just in case diff --git a/src/test/modules/test_custom_rmgrs/t/001_basic.pl b/src/test/modules/test_custom_rmgrs/t/001_basic.pl index 347a001823..50655d3788 100644 --- a/src/test/modules/test_custom_rmgrs/t/001_basic.pl +++ b/src/test/modules/test_custom_rmgrs/t/001_basic.pl @@ -27,7 +27,8 @@ $node->safe_psql('postgres', 'CREATE EXTENSION pg_walinspect'); # make sure checkpoints don't interfere with the test. my $start_lsn = $node->safe_psql('postgres', - qq[SELECT lsn FROM pg_create_physical_replication_slot('regress_test_slot1', true, false);]); + qq[SELECT lsn FROM pg_create_physical_replication_slot('regress_test_slot1', true, false);] +); # write and save the WAL record's returned end LSN for verifying it later my $record_end_lsn = $node->safe_psql('postgres', @@ -36,11 +37,12 @@ my $record_end_lsn = $node->safe_psql('postgres', # ensure the WAL is written and flushed to disk $node->safe_psql('postgres', 'SELECT pg_switch_wal()'); -my $end_lsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_flush_lsn()'); +my $end_lsn = + $node->safe_psql('postgres', 'SELECT pg_current_wal_flush_lsn()'); # check if our custom WAL resource manager has successfully registered with the server -my $row_count = - $node->safe_psql('postgres', +my $row_count = $node->safe_psql( + 'postgres', qq[SELECT count(*) FROM pg_get_wal_resource_managers() WHERE rm_name = 'test_custom_rmgrs';]); is($row_count, '1', @@ -48,14 +50,14 @@ is($row_count, '1', ); # check if our custom WAL resource manager has successfully written a WAL record -my $expected = qq($record_end_lsn|test_custom_rmgrs|TEST_CUSTOM_RMGRS_MESSAGE|0|payload (10 bytes): payload123); -my $result = - $node->safe_psql('postgres', +my $expected = + qq($record_end_lsn|test_custom_rmgrs|TEST_CUSTOM_RMGRS_MESSAGE|0|payload (10 bytes): payload123); +my $result = $node->safe_psql( + 'postgres', qq[SELECT end_lsn, resource_manager, record_type, fpi_length, description FROM pg_get_wal_records_info('$start_lsn', '$end_lsn') WHERE resource_manager = 'test_custom_rmgrs';]); is($result, $expected, - 'custom WAL resource manager has successfully written a WAL record' -); + 'custom WAL resource manager has successfully written a WAL record'); $node->stop; done_testing(); diff --git a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c index 1727910ce7..a304ba54bb 100644 --- a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c +++ b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c @@ -31,7 +31,7 @@ PG_MODULE_MAGIC; */ typedef struct xl_testcustomrmgrs_message { - Size message_size; /* size of the message */ + Size message_size; /* size of the message */ char message[FLEXIBLE_ARRAY_MEMBER]; /* payload */ } xl_testcustomrmgrs_message; diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index b7c6f98577..82f937fca4 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -318,6 +318,7 @@ get_altertable_subcmdinfo(PG_FUNCTION_ARGS) if (OidIsValid(sub->address.objectId)) { char *objdesc; + objdesc = getObjectDescription((const ObjectAddress *) &sub->address, false); values[1] = CStringGetTextDatum(objdesc); } diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl index 64766c1e33..5a07a5d36d 100644 --- a/src/test/modules/test_misc/t/001_constraint_validation.pl +++ b/src/test/modules/test_misc/t/001_constraint_validation.pl @@ -25,8 +25,8 @@ sub run_sql_command $node->psql( 'postgres', $sql, - stderr => \$stderr, - on_error_die => 1, + stderr => \$stderr, + on_error_die => 1, on_error_stop => 1); return $stderr; } diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl index 95cd2b7b65..f774a021a8 100644 --- a/src/test/modules/test_misc/t/002_tablespace.pl +++ b/src/test/modules/test_misc/t/002_tablespace.pl @@ -13,7 +13,7 @@ $node->init; $node->start; # Create a couple of directories to use as tablespaces. -my $basedir = $node->basedir(); +my $basedir = $node->basedir(); my $TS1_LOCATION = "$basedir/ts1"; $TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar mkdir($TS1_LOCATION); diff --git a/src/test/modules/test_misc/t/003_check_guc.pl b/src/test/modules/test_misc/t/003_check_guc.pl index e9f33f3c77..4fd6d03b9e 100644 --- a/src/test/modules/test_misc/t/003_check_guc.pl +++ b/src/test/modules/test_misc/t/003_check_guc.pl @@ -73,8 +73,8 @@ close $contents; # Cross-check that all the GUCs found in the sample file match the ones # fetched above. This maps the arrays to a hash, making the creation of # each exclude and intersection list easier. -my %gucs_in_file_hash = map { $_ => 1 } @gucs_in_file; -my %all_params_hash = map { $_ => 1 } @all_params_array; +my %gucs_in_file_hash = map { $_ => 1 } @gucs_in_file; +my %all_params_hash = map { $_ => 1 } @all_params_array; my %not_in_sample_hash = map { $_ => 1 } @not_in_sample_array; my @missing_from_file = grep(!$gucs_in_file_hash{$_}, @all_params_array); @@ -91,7 +91,9 @@ is(scalar(@sample_intersect), # These would log some information only on errors. foreach my $param (@missing_from_file) { - print("found GUC $param in guc_tables.c, missing from postgresql.conf.sample\n"); + print( + "found GUC $param in guc_tables.c, missing from postgresql.conf.sample\n" + ); } foreach my $param (@missing_from_list) { diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 78e7677423..d00c3544e9 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -46,15 +46,15 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir; my %pgdump_runs = ( binary_upgrade => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/binary_upgrade.sql", '--schema-only', - '--binary-upgrade', '--dbname=postgres', + '--binary-upgrade', '--dbname=postgres', ], }, clean => { dump_cmd => [ 'pg_dump', "--file=$tempdir/clean.sql", - '-c', '--no-sync', + '-c', '--no-sync', '--dbname=postgres', ], }, @@ -151,26 +151,26 @@ my %pgdump_runs = ( }, extension_schema => { dump_cmd => [ - 'pg_dump', '--schema=public', + 'pg_dump', '--schema=public', "--file=$tempdir/extension_schema.sql", 'postgres', ], }, pg_dumpall_globals => { dump_cmd => [ - 'pg_dumpall', '--no-sync', + 'pg_dumpall', '--no-sync', "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], }, no_privs => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_privs.sql", '-x', 'postgres', ], }, no_owner => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_owner.sql", '-O', 'postgres', ], @@ -183,14 +183,14 @@ my %pgdump_runs = ( }, section_pre_data => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/section_pre_data.sql", '--section=pre-data', 'postgres', ], }, section_data => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/section_data.sql", '--section=data', 'postgres', ], @@ -276,15 +276,15 @@ my %pgdump_runs = ( # Tests which are considered 'full' dumps by pg_dump, but there # are flags used to exclude specific items (ACLs, LOs, etc). my %full_runs = ( - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - exclude_table => 1, - no_privs => 1, - no_owner => 1, - with_extension => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, + exclude_table => 1, + no_privs => 1, + no_owner => 1, + with_extension => 1, without_extension => 1); my %tests = ( @@ -302,13 +302,13 @@ my %tests = ( 'CREATE EXTENSION test_pg_dump' => { create_order => 2, - create_sql => 'CREATE EXTENSION test_pg_dump;', - regexp => qr/^ + create_sql => 'CREATE EXTENSION test_pg_dump;', + regexp => qr/^ \QCREATE EXTENSION IF NOT EXISTS test_pg_dump WITH SCHEMA public;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { binary_upgrade => 1, without_extension => 1 }, @@ -316,9 +316,9 @@ my %tests = ( 'CREATE ROLE regress_dump_test_role' => { create_order => 1, - create_sql => 'CREATE ROLE regress_dump_test_role;', - regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m, - like => { pg_dumpall_globals => 1, }, + create_sql => 'CREATE ROLE regress_dump_test_role;', + regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m, + like => { pg_dumpall_globals => 1, }, }, 'GRANT ALTER SYSTEM ON PARAMETER full_page_writes TO regress_dump_test_role' @@ -355,8 +355,8 @@ my %tests = ( 'CREATE SCHEMA public' => { regexp => qr/^CREATE SCHEMA public;/m, - like => { - extension_schema => 1, + like => { + extension_schema => 1, without_extension_explicit_schema => 1, }, }, @@ -400,14 +400,14 @@ my %tests = ( 'SETVAL SEQUENCE regress_seq_dumpable' => { create_order => 6, - create_sql => qq{SELECT nextval('regress_seq_dumpable');}, - regexp => qr/^ + create_sql => qq{SELECT nextval('regress_seq_dumpable');}, + regexp => qr/^ \QSELECT pg_catalog.setval('public.regress_seq_dumpable', 1, true);\E \n/xm, like => { %full_runs, - data_only => 1, - section_data => 1, + data_only => 1, + section_data => 1, extension_schema => 1, }, unlike => { without_extension => 1, }, @@ -429,20 +429,20 @@ my %tests = ( \n/xm, like => { %full_runs, - data_only => 1, - section_data => 1, + data_only => 1, + section_data => 1, extension_schema => 1, }, unlike => { - binary_upgrade => 1, - exclude_table => 1, + binary_upgrade => 1, + exclude_table => 1, without_extension => 1, }, }, 'REVOKE ALL ON FUNCTION wgo_then_no_access' => { create_order => 3, - create_sql => q{ + create_sql => q{ DO $$BEGIN EXECUTE format( 'REVOKE ALL ON FUNCTION wgo_then_no_access() FROM pg_signal_backend, public, %I', @@ -456,7 +456,7 @@ my %tests = ( /xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -464,7 +464,7 @@ my %tests = ( 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE wgo_then_regular' => { create_order => 3, - create_sql => 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE + create_sql => 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE wgo_then_regular FROM pg_signal_backend;', regexp => qr/^ \QREVOKE ALL ON SEQUENCE public.wgo_then_regular FROM pg_signal_backend;\E @@ -473,7 +473,7 @@ my %tests = ( /xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -493,7 +493,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { without_extension => 1, }, @@ -518,7 +518,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -545,14 +545,14 @@ my %tests = ( 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' => { create_order => 4, - create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table + create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role;', regexp => qr/^ \QGRANT SELECT(col2) ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1 }, @@ -568,7 +568,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -583,14 +583,14 @@ my %tests = ( 'REVOKE SELECT(col1) ON regress_pg_dump_table' => { create_order => 3, - create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table + create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table FROM PUBLIC;', regexp => qr/^ \QREVOKE SELECT(col1) ON TABLE public.regress_pg_dump_table FROM PUBLIC;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -699,13 +699,13 @@ my %tests = ( regexp => qr/^ \QALTER INDEX regress_pg_dump_schema.extdependtab_pkey DEPENDS ON EXTENSION test_pg_dump;\E\n /xms, - like => {%pgdump_runs}, + like => {%pgdump_runs}, unlike => { - data_only => 1, - extension_schema => 1, + data_only => 1, + extension_schema => 1, pg_dumpall_globals => 1, - section_data => 1, - section_pre_data => 1, + section_data => 1, + section_pre_data => 1, # Excludes this schema as extension is not listed. without_extension_explicit_schema => 1, }, @@ -715,13 +715,13 @@ my %tests = ( regexp => qr/^ \QALTER INDEX regress_pg_dump_schema.extdependtab_col2_idx DEPENDS ON EXTENSION test_pg_dump;\E\n /xms, - like => {%pgdump_runs}, + like => {%pgdump_runs}, unlike => { - data_only => 1, - extension_schema => 1, + data_only => 1, + extension_schema => 1, pg_dumpall_globals => 1, - section_data => 1, - section_pre_data => 1, + section_data => 1, + section_pre_data => 1, # Excludes this schema as extension is not listed. without_extension_explicit_schema => 1, }, @@ -730,7 +730,7 @@ my %tests = ( # Objects not included in extension, part of schema created by extension 'CREATE TABLE regress_pg_dump_schema.external_tab' => { create_order => 4, - create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab + create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab (col1 int);', regexp => qr/^ \QCREATE TABLE regress_pg_dump_schema.external_tab (\E @@ -738,7 +738,7 @@ my %tests = ( \n\);\n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, # Excludes the extension and keeps the schema's data. without_extension_internal_schema => 1, diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm index 349bebeaea..843f65b448 100644 --- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm +++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm @@ -329,13 +329,13 @@ sub adjust_old_dumpfile # adjust some places where we don't print so many parens anymore my $prefix = "CONSTRAINT (?:sequence|copy)_con CHECK [(][(]"; - my $orig = "((x > 3) AND (y <> 'check failed'::text))"; - my $repl = "(x > 3) AND (y <> 'check failed'::text)"; + my $orig = "((x > 3) AND (y <> 'check failed'::text))"; + my $repl = "(x > 3) AND (y <> 'check failed'::text)"; $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg; $prefix = "CONSTRAINT insert_con CHECK [(][(]"; - $orig = "((x >= 3) AND (y <> 'check failed'::text))"; - $repl = "(x >= 3) AND (y <> 'check failed'::text)"; + $orig = "((x >= 3) AND (y <> 'check failed'::text))"; + $repl = "(x >= 3) AND (y <> 'check failed'::text)"; $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg; $orig = "DEFAULT ((-1) * currval('public.insert_seq'::regclass))"; @@ -406,78 +406,78 @@ sub adjust_old_dumpfile # Data for _mash_view_qualifiers my @_unused_view_qualifiers = ( # Present at least since 9.2 - { obj => 'VIEW public.trigger_test_view', qual => 'trigger_test' }, - { obj => 'VIEW public.domview', qual => 'domtab' }, + { obj => 'VIEW public.trigger_test_view', qual => 'trigger_test' }, + { obj => 'VIEW public.domview', qual => 'domtab' }, { obj => 'VIEW public.my_property_normal', qual => 'customer' }, { obj => 'VIEW public.my_property_secure', qual => 'customer' }, - { obj => 'VIEW public.pfield_v1', qual => 'pf' }, - { obj => 'VIEW public.rtest_v1', qual => 'rtest_t1' }, - { obj => 'VIEW public.rtest_vview1', qual => 'x' }, - { obj => 'VIEW public.rtest_vview2', qual => 'rtest_view1' }, - { obj => 'VIEW public.rtest_vview3', qual => 'x' }, - { obj => 'VIEW public.rtest_vview5', qual => 'rtest_view1' }, - { obj => 'VIEW public.shoelace_obsolete', qual => 'shoelace' }, + { obj => 'VIEW public.pfield_v1', qual => 'pf' }, + { obj => 'VIEW public.rtest_v1', qual => 'rtest_t1' }, + { obj => 'VIEW public.rtest_vview1', qual => 'x' }, + { obj => 'VIEW public.rtest_vview2', qual => 'rtest_view1' }, + { obj => 'VIEW public.rtest_vview3', qual => 'x' }, + { obj => 'VIEW public.rtest_vview5', qual => 'rtest_view1' }, + { obj => 'VIEW public.shoelace_obsolete', qual => 'shoelace' }, { obj => 'VIEW public.shoelace_candelete', qual => 'shoelace_obsolete' }, - { obj => 'VIEW public.toyemp', qual => 'emp' }, - { obj => 'VIEW public.xmlview4', qual => 'emp' }, + { obj => 'VIEW public.toyemp', qual => 'emp' }, + { obj => 'VIEW public.xmlview4', qual => 'emp' }, # Since 9.3 (some of these were removed in 9.6) - { obj => 'VIEW public.tv', qual => 't' }, + { obj => 'VIEW public.tv', qual => 't' }, { obj => 'MATERIALIZED VIEW mvschema.tvm', qual => 'tv' }, - { obj => 'VIEW public.tvv', qual => 'tv' }, - { obj => 'MATERIALIZED VIEW public.tvvm', qual => 'tvv' }, - { obj => 'VIEW public.tvvmv', qual => 'tvvm' }, - { obj => 'MATERIALIZED VIEW public.bb', qual => 'tvvmv' }, - { obj => 'VIEW public.nums', qual => 'nums' }, - { obj => 'VIEW public.sums_1_100', qual => 't' }, - { obj => 'MATERIALIZED VIEW public.tm', qual => 't' }, - { obj => 'MATERIALIZED VIEW public.tmm', qual => 'tm' }, - { obj => 'MATERIALIZED VIEW public.tvmm', qual => 'tvm' }, + { obj => 'VIEW public.tvv', qual => 'tv' }, + { obj => 'MATERIALIZED VIEW public.tvvm', qual => 'tvv' }, + { obj => 'VIEW public.tvvmv', qual => 'tvvm' }, + { obj => 'MATERIALIZED VIEW public.bb', qual => 'tvvmv' }, + { obj => 'VIEW public.nums', qual => 'nums' }, + { obj => 'VIEW public.sums_1_100', qual => 't' }, + { obj => 'MATERIALIZED VIEW public.tm', qual => 't' }, + { obj => 'MATERIALIZED VIEW public.tmm', qual => 'tm' }, + { obj => 'MATERIALIZED VIEW public.tvmm', qual => 'tvm' }, # Since 9.4 { - obj => 'MATERIALIZED VIEW public.citext_matview', + obj => 'MATERIALIZED VIEW public.citext_matview', qual => 'citext_table' }, { - obj => 'OR REPLACE VIEW public.key_dependent_view', + obj => 'OR REPLACE VIEW public.key_dependent_view', qual => 'view_base_table' }, { - obj => 'OR REPLACE VIEW public.key_dependent_view_no_cols', + obj => 'OR REPLACE VIEW public.key_dependent_view_no_cols', qual => 'view_base_table' }, # Since 9.5 { - obj => 'VIEW public.dummy_seclabel_view1', + obj => 'VIEW public.dummy_seclabel_view1', qual => 'dummy_seclabel_tbl2' }, - { obj => 'VIEW public.vv', qual => 'test_tablesample' }, + { obj => 'VIEW public.vv', qual => 'test_tablesample' }, { obj => 'VIEW public.test_tablesample_v1', qual => 'test_tablesample' }, { obj => 'VIEW public.test_tablesample_v2', qual => 'test_tablesample' }, # Since 9.6 { - obj => 'MATERIALIZED VIEW public.test_pg_dump_mv1', + obj => 'MATERIALIZED VIEW public.test_pg_dump_mv1', qual => 'test_pg_dump_t1' }, { obj => 'VIEW public.test_pg_dump_v1', qual => 'test_pg_dump_t1' }, - { obj => 'VIEW public.mvtest_tv', qual => 'mvtest_t' }, + { obj => 'VIEW public.mvtest_tv', qual => 'mvtest_t' }, { - obj => 'MATERIALIZED VIEW mvtest_mvschema.mvtest_tvm', + obj => 'MATERIALIZED VIEW mvtest_mvschema.mvtest_tvm', qual => 'mvtest_tv' }, - { obj => 'VIEW public.mvtest_tvv', qual => 'mvtest_tv' }, + { obj => 'VIEW public.mvtest_tvv', qual => 'mvtest_tv' }, { obj => 'MATERIALIZED VIEW public.mvtest_tvvm', qual => 'mvtest_tvv' }, - { obj => 'VIEW public.mvtest_tvvmv', qual => 'mvtest_tvvm' }, - { obj => 'MATERIALIZED VIEW public.mvtest_bb', qual => 'mvtest_tvvmv' }, - { obj => 'MATERIALIZED VIEW public.mvtest_tm', qual => 'mvtest_t' }, - { obj => 'MATERIALIZED VIEW public.mvtest_tmm', qual => 'mvtest_tm' }, + { obj => 'VIEW public.mvtest_tvvmv', qual => 'mvtest_tvvm' }, + { obj => 'MATERIALIZED VIEW public.mvtest_bb', qual => 'mvtest_tvvmv' }, + { obj => 'MATERIALIZED VIEW public.mvtest_tm', qual => 'mvtest_t' }, + { obj => 'MATERIALIZED VIEW public.mvtest_tmm', qual => 'mvtest_tm' }, { obj => 'MATERIALIZED VIEW public.mvtest_tvmm', qual => 'mvtest_tvm' }, # Since 10 (some removed in 12) - { obj => 'VIEW public.itestv10', qual => 'itest10' }, - { obj => 'VIEW public.itestv11', qual => 'itest11' }, + { obj => 'VIEW public.itestv10', qual => 'itest10' }, + { obj => 'VIEW public.itestv11', qual => 'itest11' }, { obj => 'VIEW public.xmltableview2', qual => '"xmltable"' }, # Since 12 { - obj => 'MATERIALIZED VIEW public.tableam_tblmv_heap2', + obj => 'MATERIALIZED VIEW public.tableam_tblmv_heap2', qual => 'tableam_tbl_heap2' }, # Since 13 @@ -496,7 +496,7 @@ sub _mash_view_qualifiers for my $uvq (@_unused_view_qualifiers) { - my $leader = "CREATE $uvq->{obj} "; + my $leader = "CREATE $uvq->{obj} "; my $qualifier = $uvq->{qual}; # Note: we loop because there are presently some cases where the same # view name appears in multiple databases. Fortunately, the same @@ -507,8 +507,8 @@ sub _mash_view_qualifiers foreach my $chunk (@splitchunks) { my @thischunks = split /;/, $chunk, 2; - my $stmt = shift(@thischunks); - my $ostmt = $stmt; + my $stmt = shift(@thischunks); + my $ostmt = $stmt; # now $stmt is just the body of the CREATE [MATERIALIZED] VIEW $stmt =~ s/$qualifier\.//g; diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm index a9c102949f..924b57ab21 100644 --- a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm +++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm @@ -82,20 +82,28 @@ sub new { my $class = shift; my ($interactive, $psql_params) = @_; - my $psql = {'stdin' => '', 'stdout' => '', 'stderr' => '', 'query_timer_restart' => undef}; + my $psql = { + 'stdin' => '', + 'stdout' => '', + 'stderr' => '', + 'query_timer_restart' => undef + }; my $run; # This constructor should only be called from PostgreSQL::Test::Cluster - my ($package, $file, $line) = caller; - die "Forbidden caller of constructor: package: $package, file: $file:$line" + my ($package, $file, $line) = caller; + die + "Forbidden caller of constructor: package: $package, file: $file:$line" unless $package->isa('PostgreSQL::Test::Cluster'); - $psql->{timeout} = IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); + $psql->{timeout} = + IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); if ($interactive) { $run = IPC::Run::start $psql_params, - '{stdin}, '>pty>', \$psql->{stdout}, '2>', \$psql->{stderr}, + '{stdin}, '>pty>', \$psql->{stdout}, '2>', + \$psql->{stderr}, $psql->{timeout}; } else @@ -126,8 +134,9 @@ sub _wait_connect # errors anyway, but that might be added later.) my $banner = "background_psql: ready"; $self->{stdin} .= "\\echo $banner\n"; - $self->{run}->pump() until $self->{stdout} =~ /$banner/ || $self->{timeout}->is_expired; - $self->{stdout} = ''; # clear out banner + $self->{run}->pump() + until $self->{stdout} =~ /$banner/ || $self->{timeout}->is_expired; + $self->{stdout} = ''; # clear out banner die "psql startup timed out" if $self->{timeout}->is_expired; } @@ -173,10 +182,10 @@ sub reconnect_and_clear # restart $self->{run}->run(); - $self->{stdin} = ''; + $self->{stdin} = ''; $self->{stdout} = ''; - $self->_wait_connect() + $self->_wait_connect(); } =pod @@ -219,7 +228,7 @@ sub query $ret = $self->{stderr} eq "" ? 0 : 1; - return wantarray ? ( $output, $ret ) : $output; + return wantarray ? ($output, $ret) : $output; } =pod diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index bc9b5dc644..baea0fcd1c 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -112,10 +112,10 @@ use PostgreSQL::Version; use PostgreSQL::Test::RecursiveCopy; use Socket; use Test::More; -use PostgreSQL::Test::Utils (); +use PostgreSQL::Test::Utils (); use PostgreSQL::Test::BackgroundPsql (); -use Time::HiRes qw(usleep); -use Scalar::Util qw(blessed); +use Time::HiRes qw(usleep); +use Scalar::Util qw(blessed); our ($use_tcp, $test_localhost, $test_pghost, $last_host_assigned, $last_port_assigned, @all_nodes, $died, $portdir); @@ -132,8 +132,8 @@ INIT # Set PGHOST for backward compatibility. This doesn't work for own_host # nodes, so prefer to not rely on this when writing new tests. - $use_tcp = !$PostgreSQL::Test::Utils::use_unix_sockets; - $test_localhost = "127.0.0.1"; + $use_tcp = !$PostgreSQL::Test::Utils::use_unix_sockets; + $test_localhost = "127.0.0.1"; $last_host_assigned = 1; if ($use_tcp) { @@ -147,7 +147,7 @@ INIT $test_pghost = PostgreSQL::Test::Utils::tempdir_short; $test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; } - $ENV{PGHOST} = $test_pghost; + $ENV{PGHOST} = $test_pghost; $ENV{PGDATABASE} = 'postgres'; # Tracking of last port value assigned to accelerate free port lookup. @@ -160,9 +160,10 @@ INIT $portdir = $ENV{PG_TEST_PORT_DIR}; # Otherwise, try to use a directory at the top of the build tree # or as a last resort use the tmp_check directory - my $build_dir = $ENV{MESON_BUILD_ROOT} + my $build_dir = + $ENV{MESON_BUILD_ROOT} || $ENV{top_builddir} - || $PostgreSQL::Test::Utils::tmp_check ; + || $PostgreSQL::Test::Utils::tmp_check; $portdir ||= "$build_dir/portlock"; $portdir =~ s!\\!/!g; # Make sure the directory exists @@ -408,7 +409,7 @@ sub config_data my @map; foreach my $line (@lines) { - my ($k,$v) = split (/ = /,$line,2); + my ($k, $v) = split(/ = /, $line, 2); push(@map, $k, $v); } return @map; @@ -509,14 +510,14 @@ disabled. sub init { my ($self, %params) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $host = $self->host; + my $host = $self->host; local %ENV = $self->_get_env(); $params{allows_streaming} = 0 unless defined $params{allows_streaming}; - $params{has_archiving} = 0 unless defined $params{has_archiving}; + $params{has_archiving} = 0 unless defined $params{has_archiving}; mkdir $self->backup_dir; mkdir $self->archive_dir; @@ -585,7 +586,7 @@ sub init or die("unable to set permissions for $pgdata/postgresql.conf"); $self->set_replication_conf if $params{allows_streaming}; - $self->enable_archiving if $params{has_archiving}; + $self->enable_archiving if $params{has_archiving}; return; } @@ -680,17 +681,17 @@ sub backup { my ($self, $backup_name, %params) = @_; my $backup_path = $self->backup_dir . '/' . $backup_name; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); print "# Taking pg_basebackup $backup_name from node \"$name\"\n"; PostgreSQL::Test::Utils::system_or_bail( 'pg_basebackup', '-D', - $backup_path, '-h', - $self->host, '-p', - $self->port, '--checkpoint', - 'fast', '--no-sync', + $backup_path, '-h', + $self->host, '-p', + $self->port, '--checkpoint', + 'fast', '--no-sync', @{ $params{backup_options} }); print "# Backup finished\n"; return; @@ -755,14 +756,14 @@ sub init_from_backup { my ($self, $root_node, $backup_name, %params) = @_; my $backup_path = $root_node->backup_dir . '/' . $backup_name; - my $host = $self->host; - my $port = $self->port; - my $node_name = $self->name; - my $root_name = $root_node->name; + my $host = $self->host; + my $port = $self->port; + my $node_name = $self->name; + my $root_name = $root_node->name; $params{has_streaming} = 0 unless defined $params{has_streaming}; $params{has_restoring} = 0 unless defined $params{has_restoring}; - $params{standby} = 1 unless defined $params{standby}; + $params{standby} = 1 unless defined $params{standby}; print "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n"; @@ -780,7 +781,7 @@ sub init_from_backup $backup_path . '/base.tar', '-C', $data_path); PostgreSQL::Test::Utils::system_or_bail( - $params{tar_program}, 'xf', + $params{tar_program}, 'xf', $backup_path . '/pg_wal.tar', '-C', $data_path . '/pg_wal'); } @@ -853,9 +854,9 @@ instead return a true or false value to indicate success or failure. sub start { my ($self, %params) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; my $ret; BAIL_OUT("node \"$name\" is already running") if defined $self->{_pid}; @@ -872,8 +873,8 @@ sub start # -w is now the default but having it here does no harm and helps # compatibility with older versions. $ret = PostgreSQL::Test::Utils::system_log( - 'pg_ctl', '-w', '-D', $self->data_dir, - '-l', $self->logfile, '-o', "--cluster-name=$name", + 'pg_ctl', '-w', '-D', $self->data_dir, + '-l', $self->logfile, '-o', "--cluster-name=$name", 'start'); if ($ret != 0) @@ -938,7 +939,7 @@ sub stop { my ($self, $mode, %params) = @_; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; my $ret; local %ENV = $self->_get_env(); @@ -976,9 +977,9 @@ Reload configuration parameters on the node. sub reload { my ($self) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -998,11 +999,11 @@ Wrapper for pg_ctl restart sub restart { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(PGAPPNAME => undef); @@ -1027,11 +1028,11 @@ Wrapper for pg_ctl promote sub promote { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -1051,11 +1052,11 @@ Wrapper for pg_ctl logrotate sub logrotate { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -1070,7 +1071,7 @@ sub enable_streaming { my ($self, $root_node) = @_; my $root_connstr = $root_node->connstr; - my $name = $self->name; + my $name = $self->name; print "### Enabling streaming replication for node \"$name\"\n"; $self->append_conf( @@ -1155,8 +1156,8 @@ sub set_standby_mode sub enable_archiving { my ($self) = @_; - my $path = $self->archive_dir; - my $name = $self->name; + my $path = $self->archive_dir; + my $name = $self->name; print "### Enabling WAL archiving for node \"$name\"\n"; @@ -1301,7 +1302,7 @@ sub new _host => $host, _basedir => "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data", - _name => $name, + _name => $name, _logfile_generation => 0, _logfile_base => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}", @@ -1354,8 +1355,8 @@ sub new # sub _set_pg_version { - my ($self) = @_; - my $inst = $self->{_install_path}; + my ($self) = @_; + my $inst = $self->{_install_path}; my $pg_config = "pg_config"; if (defined $inst) @@ -1509,7 +1510,7 @@ called from outside the module as C. sub get_free_port { my $found = 0; - my $port = $last_port_assigned; + my $port = $last_port_assigned; while ($found == 0) { @@ -1589,14 +1590,14 @@ sub _reserve_port my $port = shift; # open in rw mode so we don't have to reopen it and lose the lock my $filename = "$portdir/$port.rsv"; - sysopen(my $portfile, $filename, O_RDWR|O_CREAT) + sysopen(my $portfile, $filename, O_RDWR | O_CREAT) || die "opening port file $filename: $!"; # take an exclusive lock to avoid concurrent access flock($portfile, LOCK_EX) || die "locking port file $filename: $!"; # see if someone else has or had a reservation of this port my $pid = <$portfile> || "0"; chomp $pid; - if ($pid +0 > 0) + if ($pid + 0 > 0) { if (kill 0, $pid) { @@ -1609,7 +1610,7 @@ sub _reserve_port # All good, go ahead and reserve the port seek($portfile, 0, SEEK_SET); # print the pid with a fixed width so we don't leave any trailing junk - print $portfile sprintf("%10d\n",$$); + print $portfile sprintf("%10d\n", $$); flock($portfile, LOCK_UN); close($portfile); push(@port_reservation_files, $filename); @@ -1705,9 +1706,9 @@ sub safe_psql my $ret = $self->psql( $dbname, $sql, %params, - stdout => \$stdout, - stderr => \$stderr, - on_error_die => 1, + stdout => \$stdout, + stderr => \$stderr, + on_error_die => 1, on_error_stop => 1); # psql can emit stderr from NOTICEs etc @@ -1819,10 +1820,10 @@ sub psql local %ENV = $self->_get_env(); - my $stdout = $params{stdout}; - my $stderr = $params{stderr}; - my $replication = $params{replication}; - my $timeout = undef; + my $stdout = $params{stdout}; + my $stderr = $params{stderr}; + my $replication = $params{replication}; + my $timeout = undef; my $timeout_exception = 'psql timed out'; # Build the connection string. @@ -1859,7 +1860,7 @@ sub psql } $params{on_error_stop} = 1 unless defined $params{on_error_stop}; - $params{on_error_die} = 0 unless defined $params{on_error_die}; + $params{on_error_die} = 0 unless defined $params{on_error_die}; push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop}; push @psql_params, @{ $params{extra_params} } @@ -1888,7 +1889,7 @@ sub psql local $@; eval { my @ipcrun_opts = (\@psql_params, '<', \$sql); - push @ipcrun_opts, '>', $stdout if defined $stdout; + push @ipcrun_opts, '>', $stdout if defined $stdout; push @ipcrun_opts, '2>', $stderr if defined $stderr; push @ipcrun_opts, $timeout if defined $timeout; @@ -2231,8 +2232,8 @@ sub connect_ok my ($ret, $stdout, $stderr) = $self->psql( 'postgres', $sql, - extra_params => ['-w'], - connstr => "$connstr", + extra_params => ['-w'], + connstr => "$connstr", on_error_stop => 0); is($ret, 0, $test_name); @@ -2306,7 +2307,7 @@ sub connect_fails 'postgres', undef, extra_params => ['-w'], - connstr => "$connstr"); + connstr => "$connstr"); isnt($ret, 0, $test_name); @@ -2353,11 +2354,11 @@ sub poll_query_until my $cmd = [ $self->installed_command('psql'), '-XAt', - '-d', $self->connstr($dbname) + '-d', $self->connstr($dbname) ]; my ($stdout, $stderr); my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { @@ -2531,8 +2532,7 @@ Returns the contents of log of the node sub log_content { my ($self) = @_; - return - PostgreSQL::Test::Utils::slurp_file($self->logfile); + return PostgreSQL::Test::Utils::slurp_file($self->logfile); } @@ -2574,11 +2574,11 @@ sub lsn { my ($self, $mode) = @_; my %modes = ( - 'insert' => 'pg_current_wal_insert_lsn()', - 'flush' => 'pg_current_wal_flush_lsn()', - 'write' => 'pg_current_wal_lsn()', + 'insert' => 'pg_current_wal_insert_lsn()', + 'flush' => 'pg_current_wal_flush_lsn()', + 'write' => 'pg_current_wal_lsn()', 'receive' => 'pg_last_wal_receive_lsn()', - 'replay' => 'pg_last_wal_replay_lsn()'); + 'replay' => 'pg_last_wal_replay_lsn()'); $mode = '' if !defined($mode); croak "unknown mode for 'lsn': '$mode', valid modes are " @@ -2650,7 +2650,8 @@ sub wait_for_catchup } if (!defined($target_lsn)) { - my $isrecovery = $self->safe_psql('postgres', "SELECT pg_is_in_recovery()"); + my $isrecovery = + $self->safe_psql('postgres', "SELECT pg_is_in_recovery()"); chomp($isrecovery); if ($isrecovery eq 't') { @@ -2788,7 +2789,7 @@ sub wait_for_subscription_sync # Wait for all tables to finish initial sync. print "Waiting for all subscriptions in \"$name\" to synchronize data\n"; my $query = - qq[SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');]; + qq[SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');]; $self->poll_query_until($dbname, $query) or croak "timed out waiting for subscriber to synchronize data"; @@ -2821,7 +2822,7 @@ sub wait_for_log $offset = 0 unless defined $offset; my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { @@ -2903,8 +2904,8 @@ sub slot { my ($self, $slot_name) = @_; my @columns = ( - 'plugin', 'slot_type', 'datoid', 'database', - 'active', 'active_pid', 'xmin', 'catalog_xmin', + 'plugin', 'slot_type', 'datoid', 'database', + 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn'); return $self->query_hash( 'postgres', @@ -2943,7 +2944,7 @@ sub pg_recvlogical_upto my $timeout_exception = 'pg_recvlogical timed out'; croak 'slot name must be specified' unless defined($slot_name); - croak 'endpos must be specified' unless defined($endpos); + croak 'endpos must be specified' unless defined($endpos); my @cmd = ( $self->installed_command('pg_recvlogical'), @@ -3057,7 +3058,17 @@ sub create_logical_slot_on_standby my $handle; - $handle = IPC::Run::start(['pg_recvlogical', '-d', $self->connstr($dbname), '-P', 'test_decoding', '-S', $slot_name, '--create-slot'], '>', \$stdout, '2>', \$stderr); + $handle = IPC::Run::start( + [ + 'pg_recvlogical', '-d', + $self->connstr($dbname), '-P', + 'test_decoding', '-S', + $slot_name, '--create-slot' + ], + '>', + \$stdout, + '2>', + \$stderr); # Once the slot's restart_lsn is determined, the standby looks for # xl_running_xacts WAL record from the restart_lsn onwards. First wait @@ -3067,7 +3078,9 @@ sub create_logical_slot_on_standby 'postgres', qq[ SELECT restart_lsn IS NOT NULL FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name' - ]) or die "timed out waiting for logical slot to calculate its restart_lsn"; + ]) + or die + "timed out waiting for logical slot to calculate its restart_lsn"; # Then arrange for the xl_running_xacts record for which pg_recvlogical is # waiting. @@ -3075,8 +3088,9 @@ sub create_logical_slot_on_standby $handle->finish(); - is($self->slot($slot_name)->{'slot_type'}, 'logical', $slot_name . ' on standby created') - or die "could not create slot" . $slot_name; + is($self->slot($slot_name)->{'slot_type'}, + 'logical', $slot_name . ' on standby created') + or die "could not create slot" . $slot_name; } =pod diff --git a/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm index b4a441d6f4..15964e6217 100644 --- a/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm +++ b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm @@ -93,7 +93,7 @@ sub copypath sub _copypath_recurse { my ($base_src_dir, $base_dest_dir, $curr_path, $filterfn) = @_; - my $srcpath = "$base_src_dir/$curr_path"; + my $srcpath = "$base_src_dir/$curr_path"; my $destpath = "$base_dest_dir/$curr_path"; # invoke the filter and skip all further operation if it returns false diff --git a/src/test/perl/PostgreSQL/Test/SimpleTee.pm b/src/test/perl/PostgreSQL/Test/SimpleTee.pm index 029a888023..82099bf503 100644 --- a/src/test/perl/PostgreSQL/Test/SimpleTee.pm +++ b/src/test/perl/PostgreSQL/Test/SimpleTee.pm @@ -27,7 +27,7 @@ BEGIN { $last_time = time; } sub _time_str { - my $tm = time; + my $tm = time; my $diff = $tm - $last_time; $last_time = $tm; my ($sec, $min, $hour) = localtime($tm); @@ -45,12 +45,12 @@ sub TIEHANDLE sub PRINT { my $self = shift; - my $ok = 1; + my $ok = 1; # The first file argument passed to tiehandle in PostgreSQL::Test::Utils is # the original stdout, which is what PROVE sees. Additional decorations # confuse it, so only put out the time string on files after the first. my $skip = 1; - my $ts = _time_str; + my $ts = _time_str; for my $fh (@$self) { print $fh ($skip ? "" : $ts), @_ or $ok = 0; diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index f03d29309d..38cd7d830d 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -146,7 +146,7 @@ BEGIN $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys'; # Check if this environment is MSYS2. $is_msys2 = - $windows_os + $windows_os && -x '/usr/bin/uname' && `uname -or` =~ /^[2-9].*Msys/; @@ -211,15 +211,15 @@ INIT # Hijack STDOUT and STDERR to the log file open(my $orig_stdout, '>&', \*STDOUT); open(my $orig_stderr, '>&', \*STDERR); - open(STDOUT, '>&', $testlog); - open(STDERR, '>&', $testlog); + open(STDOUT, '>&', $testlog); + open(STDERR, '>&', $testlog); # The test output (ok ...) needs to be printed to the original STDOUT so # that the 'prove' program can parse it, and display it to the user in # real time. But also copy it to the log file, to provide more context # in the log. my $builder = Test::More->builder; - my $fh = $builder->output; + my $fh = $builder->output; tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stdout, $testlog; $fh = $builder->failure_output; tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stderr, $testlog; @@ -284,7 +284,7 @@ sub tempdir $prefix = "tmp_test" unless defined $prefix; return File::Temp::tempdir( $prefix . '_XXXX', - DIR => $tmp_check, + DIR => $tmp_check, CLEANUP => 1); } @@ -321,7 +321,7 @@ https://postgr.es/m/20220116210241.GC756210@rfd.leadboat.com for details. sub has_wal_read_bug { return - $Config{osname} eq 'linux' + $Config{osname} eq 'linux' && $Config{archname} =~ /^sparc/ && !run_log([ qw(df -x ext4), $tmp_check ], '>', '/dev/null', '2>&1'); } @@ -563,10 +563,10 @@ sub string_replace_file my ($filename, $find, $replace) = @_; open(my $in, '<', $filename); my $content; - while(<$in>) + while (<$in>) { $_ =~ s/$find/$replace/; - $content = $content.$_; + $content = $content . $_; } close $in; open(my $out, '>', $filename); @@ -595,7 +595,7 @@ sub check_mode_recursive find( { follow_fast => 1, - wanted => sub { + wanted => sub { # Is file in the ignore list? foreach my $ignore ($ignore_list ? @{$ignore_list} : []) { @@ -611,7 +611,7 @@ sub check_mode_recursive unless (defined($file_stat)) { my $is_ENOENT = $!{ENOENT}; - my $msg = "unable to stat $File::Find::name: $!"; + my $msg = "unable to stat $File::Find::name: $!"; if ($is_ENOENT) { warn $msg; @@ -682,7 +682,7 @@ sub chmod_recursive find( { follow_fast => 1, - wanted => sub { + wanted => sub { my $file_stat = stat($File::Find::name); if (defined($file_stat)) diff --git a/src/test/perl/PostgreSQL/Version.pm b/src/test/perl/PostgreSQL/Version.pm index 7e5f5faba5..3705c1bdaf 100644 --- a/src/test/perl/PostgreSQL/Version.pm +++ b/src/test/perl/PostgreSQL/Version.pm @@ -52,7 +52,7 @@ use Scalar::Util qw(blessed); use overload '<=>' => \&_version_cmp, 'cmp' => \&_version_cmp, - '""' => \&_stringify; + '""' => \&_stringify; =pod @@ -74,7 +74,7 @@ of a Postgres command like `psql --version` or `pg_config --version`; sub new { my $class = shift; - my $arg = shift; + my $arg = shift; chomp $arg; diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 76846905a7..0c72ba0944 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -14,7 +14,7 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary'); # and it needs proper authentication configuration. $node_primary->init( allows_streaming => 1, - auth_extra => [ '--create-role', 'repl_role' ]); + auth_extra => [ '--create-role', 'repl_role' ]); $node_primary->start; my $backup_name = 'my_backup'; @@ -91,18 +91,18 @@ sub test_target_session_attrs { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $node1 = shift; - my $node2 = shift; + my $node1 = shift; + my $node2 = shift; my $target_node = shift; - my $mode = shift; - my $status = shift; + my $mode = shift; + my $status = shift; - my $node1_host = $node1->host; - my $node1_port = $node1->port; - my $node1_name = $node1->name; - my $node2_host = $node2->host; - my $node2_port = $node2->port; - my $node2_name = $node2->name; + my $node1_host = $node1->host; + my $node1_port = $node1->port; + my $node1_name = $node1->name; + my $node2_host = $node2->host; + my $node2_port = $node2->port; + my $node2_name = $node2->name; my $target_port = undef; $target_port = $target_node->port if (defined $target_node); my $target_name = undef; @@ -218,11 +218,11 @@ $node_primary->psql( 'postgres', " CREATE ROLE repl_role REPLICATION LOGIN; GRANT pg_read_all_settings TO repl_role;"); -my $primary_host = $node_primary->host; -my $primary_port = $node_primary->port; +my $primary_host = $node_primary->host; +my $primary_port = $node_primary->port; my $connstr_common = "host=$primary_host port=$primary_port user=repl_role"; -my $connstr_rep = "$connstr_common replication=1"; -my $connstr_db = "$connstr_common replication=database dbname=postgres"; +my $connstr_rep = "$connstr_common replication=1"; +my $connstr_db = "$connstr_common replication=database dbname=postgres"; # Test SHOW ALL my ($ret, $stdout, $stderr) = $node_primary->psql( @@ -534,8 +534,8 @@ my $connstr = $node_primary->connstr('postgres') . " replication=database"; # a replication command and a SQL command. $node_primary->command_fails_like( [ - 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)", - '-c', 'BASE_BACKUP', '-d', $connstr + 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)", + '-c', 'BASE_BACKUP', '-d', $connstr ], qr/a backup is already in progress in this session/, 'BASE_BACKUP cannot run in session already running backup'); @@ -553,8 +553,8 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', ''); my $sigchld_bb = IPC::Run::start( [ 'psql', '-X', '-c', "BASE_BACKUP (CHECKPOINT 'fast', MAX_RATE 32);", - '-c', 'SELECT pg_backup_stop()', - '-d', $connstr + '-c', 'SELECT pg_backup_stop()', + '-d', $connstr ], '<', \$sigchld_bb_stdin, @@ -577,7 +577,7 @@ is( $node_primary->poll_query_until( # The psql command should fail on pg_backup_stop(). ok( pump_until( - $sigchld_bb, $sigchld_bb_timeout, + $sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr, qr/backup is not in progress/), 'base backup cleanly cancelled'); $sigchld_bb->finish(); diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl index cccf2677e3..48e00f9e29 100644 --- a/src/test/recovery/t/002_archiving.pl +++ b/src/test/recovery/t/002_archiving.pl @@ -12,7 +12,7 @@ use File::Copy; # Initialize primary node, doing archives my $node_primary = PostgreSQL::Test::Cluster->new('primary'); $node_primary->init( - has_archiving => 1, + has_archiving => 1, allows_streaming => 1); my $backup_name = 'my_backup'; @@ -33,9 +33,9 @@ $node_standby->append_conf('postgresql.conf', # Set archive_cleanup_command and recovery_end_command, checking their # execution by the backend with dummy commands. -my $data_dir = $node_standby->data_dir; +my $data_dir = $node_standby->data_dir; my $archive_cleanup_command_file = "archive_cleanup_command.done"; -my $recovery_end_command_file = "recovery_end_command.done"; +my $recovery_end_command_file = "recovery_end_command.done"; $node_standby->append_conf( 'postgresql.conf', qq( archive_cleanup_command = 'echo archive_cleanup_done > $archive_cleanup_command_file' diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index 84d06096f6..e882ce2077 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -16,12 +16,12 @@ sub test_recovery_standby { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $test_name = shift; - my $node_name = shift; - my $node_primary = shift; + my $test_name = shift; + my $node_name = shift; + my $node_primary = shift; my $recovery_params = shift; - my $num_rows = shift; - my $until_lsn = shift; + my $num_rows = shift; + my $until_lsn = shift; my $node_standby = PostgreSQL::Test::Cluster->new($node_name); $node_standby->init_from_backup($node_primary, 'my_backup', @@ -147,7 +147,7 @@ recovery_target_time = '$recovery_time'"); my $res = run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', + 'pg_ctl', '-D', $node_standby->data_dir, '-l', $node_standby->logfile, 'start' ]); ok(!$res, 'invalid recovery startup fails'); @@ -162,13 +162,13 @@ $node_standby = PostgreSQL::Test::Cluster->new('standby_8'); $node_standby->init_from_backup( $node_primary, 'my_backup', has_restoring => 1, - standby => 0); + standby => 0); $node_standby->append_conf('postgresql.conf', "recovery_target_name = 'does_not_exist'"); run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', + 'pg_ctl', '-D', $node_standby->data_dir, '-l', $node_standby->logfile, 'start' ]); diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl index bc1793ca94..8fadca4204 100644 --- a/src/test/recovery/t/005_replay_delay.pl +++ b/src/test/recovery/t/005_replay_delay.pl @@ -24,7 +24,7 @@ $node_primary->backup($backup_name); # Create streaming standby from backup my $node_standby = PostgreSQL::Test::Cluster->new('standby'); -my $delay = 3; +my $delay = 3; $node_standby->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby->append_conf( diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index fe0319009b..5025d65b1b 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -109,7 +109,7 @@ $node_primary->safe_psql('postgres', my $stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, $expected, @@ -122,7 +122,7 @@ $node_primary->poll_query_until('postgres', $stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 900b03421f..e1273fd0f1 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -10,7 +10,7 @@ use PostgreSQL::Test::Utils; use Test::More; my $psql_out = ''; -my $psql_rc = ''; +my $psql_rc = ''; sub configure_and_reload { @@ -49,7 +49,7 @@ $node_paris->start; # Switch to synchronous replication in both directions configure_and_reload($node_london, "synchronous_standby_names = 'paris'"); -configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); +configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); # Set up nonce names for current primary and standby nodes note "Initially, london is primary and paris is standby"; diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index 993f654a9b..6fbbeedde3 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -187,8 +187,8 @@ my $endpos = $node_replica->safe_psql('postgres', $stdout = $node_replica->pg_recvlogical_upto( 'postgres', 'before_basebackup', - $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + $endpos, $PostgreSQL::Test::Utils::timeout_default, + 'include-xids' => '0', 'skip-empty-xacts' => '1'); # walsender likes to add a newline diff --git a/src/test/recovery/t/012_subtransactions.pl b/src/test/recovery/t/012_subtransactions.pl index 177ab9bc58..91ae79dd51 100644 --- a/src/test/recovery/t/012_subtransactions.pl +++ b/src/test/recovery/t/012_subtransactions.pl @@ -35,7 +35,7 @@ $node_primary->append_conf( $node_primary->psql('postgres', "SELECT pg_reload_conf()"); my $psql_out = ''; -my $psql_rc = ''; +my $psql_rc = ''; ############################################################################### # Check that replay will correctly set SUBTRANS and properly advance nextXid diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 92e7b367df..ce57792f31 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -80,7 +80,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m), 'inserted in-progress-before-sigquit'); $killme_stdout = ''; @@ -164,7 +164,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'inserted in-progress-before-sigkill'); $killme_stdout = ''; diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl index d1e1811fe1..3591b3309e 100644 --- a/src/test/recovery/t/014_unlogged_reinit.pl +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -30,9 +30,9 @@ my $seqUnloggedPath = $node->safe_psql('postgres', # Test that main and init forks exist. ok(-f "$pgdata/${baseUnloggedPath}_init", 'table init fork exists'); -ok(-f "$pgdata/$baseUnloggedPath", 'table main fork exists'); -ok(-f "$pgdata/${seqUnloggedPath}_init", 'sequence init fork exists'); -ok(-f "$pgdata/$seqUnloggedPath", 'sequence main fork exists'); +ok(-f "$pgdata/$baseUnloggedPath", 'table main fork exists'); +ok(-f "$pgdata/${seqUnloggedPath}_init", 'sequence init fork exists'); +ok(-f "$pgdata/$seqUnloggedPath", 'sequence main fork exists'); # Test the sequence is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"), @@ -54,7 +54,7 @@ my $ts1UnloggedPath = $node->safe_psql('postgres', # Test that main and init forks exist. ok(-f "$pgdata/${ts1UnloggedPath}_init", 'init fork in tablespace exists'); -ok(-f "$pgdata/$ts1UnloggedPath", 'main fork in tablespace exists'); +ok(-f "$pgdata/$ts1UnloggedPath", 'main fork in tablespace exists'); # Create more unlogged sequences for testing. $node->safe_psql('postgres', 'CREATE UNLOGGED SEQUENCE seq_unlogged2'); @@ -73,7 +73,7 @@ $node->safe_psql('postgres', 'INSERT INTO tab_seq_unlogged3 DEFAULT VALUES'); $node->stop('immediate'); # Write fake forks to test that they are removed during recovery. -append_to_file("$pgdata/${baseUnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${baseUnloggedPath}_vm", 'TEST_VM'); append_to_file("$pgdata/${baseUnloggedPath}_fsm", 'TEST_FSM'); # Remove main fork to test that it is recopied from init. @@ -83,7 +83,7 @@ unlink("$pgdata/${seqUnloggedPath}") or BAIL_OUT("could not remove \"${seqUnloggedPath}\": $!"); # the same for the tablespace -append_to_file("$pgdata/${ts1UnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${ts1UnloggedPath}_vm", 'TEST_VM'); append_to_file("$pgdata/${ts1UnloggedPath}_fsm", 'TEST_FSM'); unlink("$pgdata/${ts1UnloggedPath}") or BAIL_OUT("could not remove \"${ts1UnloggedPath}\": $!"); diff --git a/src/test/recovery/t/016_min_consistency.pl b/src/test/recovery/t/016_min_consistency.pl index a7e709315f..81f7a43c07 100644 --- a/src/test/recovery/t/016_min_consistency.pl +++ b/src/test/recovery/t/016_min_consistency.pl @@ -20,7 +20,7 @@ use Test::More; sub find_largest_lsn { my $blocksize = int(shift); - my $filename = shift; + my $filename = shift; my ($max_hi, $max_lo) = (0, 0); open(my $fh, "<:raw", $filename) or die "failed to open $filename: $!"; diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl index 13ee7e194c..74359e0e38 100644 --- a/src/test/recovery/t/017_shm.pl +++ b/src/test/recovery/t/017_shm.pl @@ -147,7 +147,7 @@ log_ipcs(); my $pre_existing_msg = qr/pre-existing shared memory block/; { my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { last @@ -194,7 +194,7 @@ sub poll_start my ($node) = @_; my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { diff --git a/src/test/recovery/t/018_wal_optimize.pl b/src/test/recovery/t/018_wal_optimize.pl index 866259580a..1d613eaede 100644 --- a/src/test/recovery/t/018_wal_optimize.pl +++ b/src/test/recovery/t/018_wal_optimize.pl @@ -24,7 +24,7 @@ sub check_orphan_relfilenodes my $db_oid = $node->safe_psql('postgres', "SELECT oid FROM pg_database WHERE datname = 'postgres'"); - my $prefix = "base/$db_oid/"; + my $prefix = "base/$db_oid/"; my $filepaths_referenced = $node->safe_psql( 'postgres', " SELECT pg_relation_filepath(oid) FROM pg_class @@ -145,7 +145,7 @@ wal_skip_threshold = 0 is($result, qq(20000), "wal_level = $wal_level, end-of-xact WAL"); # Data file for COPY query in subsequent tests - my $basedir = $node->basedir; + my $basedir = $node->basedir; my $copy_file = "$basedir/copy_data.txt"; PostgreSQL::Test::Utils::append_to_file( $copy_file, qq(20000,30000 diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index cb047bf77d..a1aba16e14 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -377,7 +377,7 @@ $logstart = get_log_size($node_primary3); kill 'STOP', $senderpid, $receiverpid; advance_wal($node_primary3, 2); -my $msg_logged = 0; +my $msg_logged = 0; my $max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { @@ -402,7 +402,7 @@ $node_primary3->poll_query_until('postgres', "lost") or die "timed out waiting for slot to be lost"; -$msg_logged = 0; +$msg_logged = 0; $max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { diff --git a/src/test/recovery/t/020_archive_status.pl b/src/test/recovery/t/020_archive_status.pl index 13ada994db..fa24153d4b 100644 --- a/src/test/recovery/t/020_archive_status.pl +++ b/src/test/recovery/t/020_archive_status.pl @@ -12,7 +12,7 @@ use Test::More; my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init( - has_archiving => 1, + has_archiving => 1, allows_streaming => 1); $primary->append_conf('postgresql.conf', 'autovacuum = off'); $primary->start; @@ -39,9 +39,9 @@ $primary->safe_psql( # This will be used to track the activity of the archiver. my $segment_name_1 = $primary->safe_psql('postgres', q{SELECT pg_walfile_name(pg_current_wal_lsn())}); -my $segment_path_1 = "pg_wal/archive_status/$segment_name_1"; +my $segment_path_1 = "pg_wal/archive_status/$segment_name_1"; my $segment_path_1_ready = "$segment_path_1.ready"; -my $segment_path_1_done = "$segment_path_1.done"; +my $segment_path_1_done = "$segment_path_1.done"; $primary->safe_psql( 'postgres', q{ CREATE TABLE mine AS SELECT generate_series(1,10) AS x; @@ -115,9 +115,9 @@ is( $primary->safe_psql( # with existing status files. my $segment_name_2 = $primary->safe_psql('postgres', q{SELECT pg_walfile_name(pg_current_wal_lsn())}); -my $segment_path_2 = "pg_wal/archive_status/$segment_name_2"; +my $segment_path_2 = "pg_wal/archive_status/$segment_name_2"; my $segment_path_2_ready = "$segment_path_2.ready"; -my $segment_path_2_done = "$segment_path_2.done"; +my $segment_path_2_done = "$segment_path_2.done"; $primary->safe_psql( 'postgres', q{ INSERT INTO mine SELECT generate_series(10,20) AS x; diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl index 03c8efdfb5..14fd8bfc7f 100644 --- a/src/test/recovery/t/022_crash_temp_files.pl +++ b/src/test/recovery/t/022_crash_temp_files.pl @@ -98,7 +98,7 @@ SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; @@ -205,7 +205,7 @@ SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; diff --git a/src/test/recovery/t/023_pitr_prepared_xact.pl b/src/test/recovery/t/023_pitr_prepared_xact.pl index e55098ef7f..a8cdf4efdd 100644 --- a/src/test/recovery/t/023_pitr_prepared_xact.pl +++ b/src/test/recovery/t/023_pitr_prepared_xact.pl @@ -27,7 +27,7 @@ $node_primary->backup($backup_name); my $node_pitr = PostgreSQL::Test::Cluster->new('node_pitr'); $node_pitr->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl index 43eb421321..d594332b18 100644 --- a/src/test/recovery/t/024_archive_recovery.pl +++ b/src/test/recovery/t/024_archive_recovery.pl @@ -70,15 +70,15 @@ sub test_recovery_wal_level_minimal $recovery_node->init_from_backup( $node, $backup_name, has_restoring => 1, - standby => $standby_setting); + standby => $standby_setting); # Use run_log instead of recovery_node->start because this test expects # that the server ends with an error during recovery. run_log( [ - 'pg_ctl', '-D', + 'pg_ctl', '-D', $recovery_node->data_dir, '-l', - $recovery_node->logfile, 'start' + $recovery_node->logfile, 'start' ]); # wait for postgres to terminate diff --git a/src/test/recovery/t/025_stuck_on_old_timeline.pl b/src/test/recovery/t/025_stuck_on_old_timeline.pl index fc88ceff9d..91309030df 100644 --- a/src/test/recovery/t/025_stuck_on_old_timeline.pl +++ b/src/test/recovery/t/025_stuck_on_old_timeline.pl @@ -51,8 +51,8 @@ my $node_standby = PostgreSQL::Test::Cluster->new('standby'); $node_standby->init_from_backup( $node_primary, $backup_name, allows_streaming => 1, - has_streaming => 1, - has_archiving => 1); + has_streaming => 1, + has_archiving => 1); $node_standby->start; # Take backup of standby, use -Xnone so that pg_wal is empty. diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index 255c45a4ff..f2f4e77626 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -60,7 +60,7 @@ $node_standby_1->append_conf('postgresql.conf', 'max_standby_streaming_delay = 600s'); $node_standby_1->start; -my $dlpath = dirname($ENV{REGRESS_SHLIB}); +my $dlpath = dirname($ENV{REGRESS_SHLIB}); my $outputdir = $PostgreSQL::Test::Utils::tmp_check; # Run the regression tests against the primary. @@ -103,7 +103,7 @@ $node_primary->wait_for_replay_catchup($node_standby_1); command_ok( [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', - '--no-sync', '-p', $node_primary->port, + '--no-sync', '-p', $node_primary->port, '--no-unlogged-table-data' # if unlogged, standby has schema only ], 'dump primary server'); diff --git a/src/test/recovery/t/028_pitr_timelines.pl b/src/test/recovery/t/028_pitr_timelines.pl index b32a12968a..bb29a2d378 100644 --- a/src/test/recovery/t/028_pitr_timelines.pl +++ b/src/test/recovery/t/028_pitr_timelines.pl @@ -64,7 +64,7 @@ INSERT INTO foo VALUES(2); my $node_standby = PostgreSQL::Test::Cluster->new('standby'); $node_standby->init_from_backup( $node_primary, $backup_name, - standby => 1, + standby => 1, has_streaming => 1, has_archiving => 1, has_restoring => 0); @@ -118,7 +118,7 @@ $node_standby->stop; my $node_pitr = PostgreSQL::Test::Cluster->new('node_pitr'); $node_pitr->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr->append_conf( 'postgresql.conf', qq{ @@ -156,7 +156,7 @@ $node_pitr->stop(); my $node_pitr2 = PostgreSQL::Test::Cluster->new('node_pitr2'); $node_pitr2->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr2->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/recovery/t/029_stats_restart.pl b/src/test/recovery/t/029_stats_restart.pl index 83d6647d32..742bd57e28 100644 --- a/src/test/recovery/t/029_stats_restart.pl +++ b/src/test/recovery/t/029_stats_restart.pl @@ -15,7 +15,7 @@ $node->init(allows_streaming => 1); $node->append_conf('postgresql.conf', "track_functions = 'all'"); $node->start; -my $connect_db = 'postgres'; +my $connect_db = 'postgres'; my $db_under_test = 'test'; # create test objects @@ -53,7 +53,7 @@ $node->stop(); my $statsfile = $PostgreSQL::Test::Utils::tmp_check . '/' . "discard_stats1"; ok(!-f "$statsfile", "backup statsfile cannot already exist"); -my $datadir = $node->data_dir(); +my $datadir = $node->data_dir(); my $og_stats = "$datadir/pg_stat/pgstat.stat"; ok(-f "$og_stats", "origin stats file must exist"); copy($og_stats, $statsfile) or die "Copy failed: $!"; @@ -147,12 +147,12 @@ $node->safe_psql($connect_db, "CHECKPOINT; CHECKPOINT;"); ## check checkpoint and wal stats are incremented due to restart my $ckpt_start = checkpoint_stats(); -my $wal_start = wal_stats(); +my $wal_start = wal_stats(); $node->restart; $sect = "post restart"; my $ckpt_restart = checkpoint_stats(); -my $wal_restart = wal_stats(); +my $wal_restart = wal_stats(); cmp_ok( $ckpt_start->{count}, '<', @@ -176,7 +176,7 @@ is($wal_start->{reset}, $wal_restart->{reset}, $node->safe_psql($connect_db, "SELECT pg_stat_reset_shared('bgwriter')"); $sect = "post ckpt reset"; -my $ckpt_reset = checkpoint_stats(); +my $ckpt_reset = checkpoint_stats(); my $wal_ckpt_reset = wal_stats(); cmp_ok($ckpt_restart->{count}, @@ -200,7 +200,7 @@ $node->restart; $sect = "post ckpt reset & restart"; my $ckpt_restart_reset = checkpoint_stats(); -my $wal_restart2 = wal_stats(); +my $wal_restart2 = wal_stats(); # made sure above there's enough checkpoints that this will be stable even on slow machines cmp_ok( diff --git a/src/test/recovery/t/031_recovery_conflict.pl b/src/test/recovery/t/031_recovery_conflict.pl index e29bc6c181..05e83fa854 100644 --- a/src/test/recovery/t/031_recovery_conflict.pl +++ b/src/test/recovery/t/031_recovery_conflict.pl @@ -67,8 +67,8 @@ $node_primary->wait_for_replay_catchup($node_standby); # a longrunning psql that we can use to trigger conflicts -my $psql_standby = $node_standby->background_psql($test_db, - on_error_stop => 0); +my $psql_standby = + $node_standby->background_psql($test_db, on_error_stop => 0); my $expected_conflicts = 0; @@ -96,7 +96,8 @@ my $cursor1 = "test_recovery_conflict_cursor"; # DECLARE and use a cursor on standby, causing buffer with the only block of # the relation to be pinned on the standby -my $res = $psql_standby->query_safe(qq[ +my $res = $psql_standby->query_safe( + qq[ BEGIN; DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1; FETCH FORWARD FROM $cursor1; @@ -131,7 +132,8 @@ $node_primary->safe_psql($test_db, $node_primary->wait_for_replay_catchup($node_standby); # DECLARE and FETCH from cursor on the standby -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1; FETCH FORWARD FROM $cursor1; @@ -159,7 +161,8 @@ $sect = "lock conflict"; $expected_conflicts++; # acquire lock to conflict with -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; LOCK TABLE $table1 IN ACCESS SHARE MODE; SELECT 1; @@ -183,7 +186,8 @@ $expected_conflicts++; # DECLARE a cursor for a query which, with sufficiently low work_mem, will # spill tuples into temp files in the temporary tablespace created during # setup. -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; SET work_mem = '64kB'; DECLARE $cursor1 CURSOR FOR @@ -240,7 +244,8 @@ SELECT txid_current(); $node_primary->wait_for_replay_catchup($node_standby); -$res = $psql_standby->query_until(qr/^1$/m, qq[ +$res = $psql_standby->query_until( + qr/^1$/m, qq[ BEGIN; -- hold pin DECLARE $cursor1 CURSOR FOR SELECT a FROM $table1; @@ -248,7 +253,9 @@ $res = $psql_standby->query_until(qr/^1$/m, qq[ -- wait for lock held by prepared transaction SELECT * FROM $table2; ]); -ok( 1, "$sect: cursor holding conflicting pin, also waiting for lock, established"); +ok(1, + "$sect: cursor holding conflicting pin, also waiting for lock, established" +); # just to make sure we're waiting for lock already ok( $node_standby->poll_query_until( @@ -305,7 +312,7 @@ done_testing(); sub check_conflict_log { - my $message = shift; + my $message = shift; my $old_log_location = $log_location; $log_location = $node_standby->wait_for_log(qr/$message/, $log_location); @@ -318,7 +325,7 @@ sub check_conflict_log sub check_conflict_stat { my $conflict_type = shift; - my $count = $node_standby->safe_psql($test_db, + my $count = $node_standby->safe_psql($test_db, qq[SELECT confl_$conflict_type FROM pg_stat_database_conflicts WHERE datname='$test_db';] ); diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl index 92ec510037..3bc2db1a4f 100644 --- a/src/test/recovery/t/032_relfilenode_reuse.pl +++ b/src/test/recovery/t/032_relfilenode_reuse.pl @@ -141,8 +141,8 @@ $node_primary->safe_psql('postgres', $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;"); cause_eviction(\%psql_primary, \%psql_standby); $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;"); -$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); -$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); +$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); +$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); $node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database'); diff --git a/src/test/recovery/t/033_replay_tsp_drops.pl b/src/test/recovery/t/033_replay_tsp_drops.pl index 42a6e69332..0a35a7bda6 100644 --- a/src/test/recovery/t/033_replay_tsp_drops.pl +++ b/src/test/recovery/t/033_replay_tsp_drops.pl @@ -37,8 +37,7 @@ sub test_tablespace has_streaming => 1); $node_standby->append_conf('postgresql.conf', "allow_in_place_tablespaces = on"); - $node_standby->append_conf('postgresql.conf', - "primary_slot_name = slot"); + $node_standby->append_conf('postgresql.conf', "primary_slot_name = slot"); $node_standby->start; # Make sure the connection is made @@ -137,7 +136,8 @@ while ($max_attempts-- >= 0) last if ( find_in_log( - $node_standby, qr!WARNING: ( [A-Z0-9]+:)? creating missing directory: pg_tblspc/!, + $node_standby, + qr!WARNING: ( [A-Z0-9]+:)? creating missing directory: pg_tblspc/!, $logstart)); usleep(100_000); } diff --git a/src/test/recovery/t/034_create_database.pl b/src/test/recovery/t/034_create_database.pl index 4698cbc391..ed562bba25 100644 --- a/src/test/recovery/t/034_create_database.pl +++ b/src/test/recovery/t/034_create_database.pl @@ -17,7 +17,7 @@ $node->start; # are persisted after creating a database from it using the WAL_LOG strategy, # as a direct copy of the template database's pg_class is used in this case. my $db_template = "template1"; -my $db_new = "test_db_1"; +my $db_new = "test_db_1"; # Create table. It should persist on the template database. $node->safe_psql("postgres", diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl index 2b4a688330..64beec4bd3 100644 --- a/src/test/recovery/t/035_standby_logical_decoding.pl +++ b/src/test/recovery/t/035_standby_logical_decoding.pl @@ -10,17 +10,18 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; -my ($stdin, $stdout, $stderr, - $cascading_stdout, $cascading_stderr, $subscriber_stdin, +my ($stdin, $stdout, $stderr, + $cascading_stdout, $cascading_stderr, $subscriber_stdin, $subscriber_stdout, $subscriber_stderr, $ret, - $handle, $slot); + $handle, $slot); my $node_primary = PostgreSQL::Test::Cluster->new('primary'); my $node_standby = PostgreSQL::Test::Cluster->new('standby'); -my $node_cascading_standby = PostgreSQL::Test::Cluster->new('cascading_standby'); +my $node_cascading_standby = + PostgreSQL::Test::Cluster->new('cascading_standby'); my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); my $default_timeout = $PostgreSQL::Test::Utils::timeout_default; -my $psql_timeout = IPC::Run::timer($default_timeout); +my $psql_timeout = IPC::Run::timer($default_timeout); my $res; # Name for the physical slot on primary @@ -62,8 +63,10 @@ sub create_logical_slots my $active_slot = $slot_prefix . 'activeslot'; my $inactive_slot = $slot_prefix . 'inactiveslot'; - $node->create_logical_slot_on_standby($node_primary, qq($inactive_slot), 'testdb'); - $node->create_logical_slot_on_standby($node_primary, qq($active_slot), 'testdb'); + $node->create_logical_slot_on_standby($node_primary, qq($inactive_slot), + 'testdb'); + $node->create_logical_slot_on_standby($node_primary, qq($active_slot), + 'testdb'); } # Drop the logical slots on standby. @@ -73,8 +76,10 @@ sub drop_logical_slots my $active_slot = $slot_prefix . 'activeslot'; my $inactive_slot = $slot_prefix . 'inactiveslot'; - $node_standby->psql('postgres', qq[SELECT pg_drop_replication_slot('$inactive_slot')]); - $node_standby->psql('postgres', qq[SELECT pg_drop_replication_slot('$active_slot')]); + $node_standby->psql('postgres', + qq[SELECT pg_drop_replication_slot('$inactive_slot')]); + $node_standby->psql('postgres', + qq[SELECT pg_drop_replication_slot('$active_slot')]); } # Acquire one of the standby logical slots created by create_logical_slots(). @@ -86,7 +91,20 @@ sub make_slot_active my $slot_user_handle; my $active_slot = $slot_prefix . 'activeslot'; - $slot_user_handle = IPC::Run::start(['pg_recvlogical', '-d', $node->connstr('testdb'), '-S', qq($active_slot), '-o', 'include-xids=0', '-o', 'skip-empty-xacts=1', '--no-loop', '--start', '-f', '-'], '>', $to_stdout, '2>', $to_stderr); + $slot_user_handle = IPC::Run::start( + [ + 'pg_recvlogical', '-d', + $node->connstr('testdb'), '-S', + qq($active_slot), '-o', + 'include-xids=0', '-o', + 'skip-empty-xacts=1', '--no-loop', + '--start', '-f', + '-' + ], + '>', + $to_stdout, + '2>', + $to_stderr); if ($wait) { @@ -108,7 +126,8 @@ sub check_pg_recvlogical_stderr $slot_user_handle->finish; $return = $?; cmp_ok($return, "!=", 0, "pg_recvlogical exited non-zero"); - if ($return) { + if ($return) + { like($stderr, qr/$check_stderr/, 'slot has been invalidated'); } @@ -121,8 +140,10 @@ sub check_slots_dropped { my ($slot_prefix, $slot_user_handle) = @_; - is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'}, '', 'inactiveslot on standby dropped'); - is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'}, '', 'activeslot on standby dropped'); + is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'}, + '', 'inactiveslot on standby dropped'); + is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'}, + '', 'activeslot on standby dropped'); check_pg_recvlogical_stderr($slot_user_handle, "conflict with recovery"); } @@ -132,7 +153,8 @@ sub change_hot_standby_feedback_and_wait_for_xmins { my ($hsf, $invalidated) = @_; - $node_standby->append_conf('postgresql.conf',qq[ + $node_standby->append_conf( + 'postgresql.conf', qq[ hot_standby_feedback = $hsf ]); @@ -143,19 +165,19 @@ sub change_hot_standby_feedback_and_wait_for_xmins # With hot_standby_feedback on, xmin should advance, # but catalog_xmin should still remain NULL since there is no logical slot. wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NOT NULL AND catalog_xmin IS NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NULL"); } elsif ($hsf) { # With hot_standby_feedback on, xmin and catalog_xmin should advance. wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NOT NULL AND catalog_xmin IS NOT NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NOT NULL"); } else { # Both should be NULL since hs_feedback is off wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NULL AND catalog_xmin IS NULL"); + "xmin IS NULL AND catalog_xmin IS NULL"); } } @@ -168,20 +190,18 @@ sub check_slots_conflicting_status if ($conflicting) { $res = $node_standby->safe_psql( - 'postgres', qq( + 'postgres', qq( select bool_and(conflicting) from pg_replication_slots;)); - is($res, 't', - "Logical slots are reported as conflicting"); + is($res, 't', "Logical slots are reported as conflicting"); } else { $res = $node_standby->safe_psql( - 'postgres', qq( + 'postgres', qq( select bool_or(conflicting) from pg_replication_slots;)); - is($res, 'f', - "Logical slots are reported as non conflicting"); + is($res, 'f', "Logical slots are reported as non conflicting"); } } @@ -199,7 +219,8 @@ sub reactive_slots_change_hfs_and_wait_for_xmins change_hot_standby_feedback_and_wait_for_xmins($hsf, $invalidated); - $handle = make_slot_active($node_standby, $slot_prefix, 1, \$stdout, \$stderr); + $handle = + make_slot_active($node_standby, $slot_prefix, 1, \$stdout, \$stderr); # reset stat: easier to check for confl_active_logicalslot in pg_stat_database_conflicts $node_standby->psql('testdb', q[select pg_stat_reset();]); @@ -215,20 +236,24 @@ sub check_for_invalidation # message should be issued ok( find_in_log( - $node_standby, - "invalidating obsolete replication slot \"$inactive_slot\"", $log_start), + $node_standby, + "invalidating obsolete replication slot \"$inactive_slot\"", + $log_start), "inactiveslot slot invalidation is logged $test_name"); ok( find_in_log( - $node_standby, - "invalidating obsolete replication slot \"$active_slot\"", $log_start), + $node_standby, + "invalidating obsolete replication slot \"$active_slot\"", + $log_start), "activeslot slot invalidation is logged $test_name"); # Verify that pg_stat_database_conflicts.confl_active_logicalslot has been updated ok( $node_standby->poll_query_until( - 'postgres', - "select (confl_active_logicalslot = 1) from pg_stat_database_conflicts where datname = 'testdb'", 't'), - 'confl_active_logicalslot updated') or die "Timed out waiting confl_active_logicalslot to be updated"; + 'postgres', + "select (confl_active_logicalslot = 1) from pg_stat_database_conflicts where datname = 'testdb'", + 't'), + 'confl_active_logicalslot updated' + ) or die "Timed out waiting confl_active_logicalslot to be updated"; } ######################## @@ -236,7 +261,8 @@ sub check_for_invalidation ######################## $node_primary->init(allows_streaming => 1, has_archiving => 1); -$node_primary->append_conf('postgresql.conf', q{ +$node_primary->append_conf( + 'postgresql.conf', q{ wal_level = 'logical' max_replication_slots = 4 max_wal_senders = 4 @@ -246,15 +272,17 @@ $node_primary->start; $node_primary->psql('postgres', q[CREATE DATABASE testdb]); -$node_primary->safe_psql('testdb', qq[SELECT * FROM pg_create_physical_replication_slot('$primary_slotname');]); +$node_primary->safe_psql('testdb', + qq[SELECT * FROM pg_create_physical_replication_slot('$primary_slotname');] +); # Check conflicting is NULL for physical slot $res = $node_primary->safe_psql( - 'postgres', qq[ - SELECT conflicting is null FROM pg_replication_slots where slot_name = '$primary_slotname';]); + 'postgres', qq[ + SELECT conflicting is null FROM pg_replication_slots where slot_name = '$primary_slotname';] +); -is($res, 't', - "Physical slot reports conflicting as NULL"); +is($res, 't', "Physical slot reports conflicting as NULL"); my $backup_name = 'b1'; $node_primary->backup($backup_name); @@ -271,7 +299,8 @@ $node_standby->init_from_backup( $node_primary, $backup_name, has_streaming => 1, has_restoring => 1); -$node_standby->append_conf('postgresql.conf', +$node_standby->append_conf( + 'postgresql.conf', qq[primary_slot_name = '$primary_slotname' max_replication_slots = 5]); $node_standby->start; @@ -284,7 +313,7 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my %psql_subscriber = ( - 'subscriber_stdin' => '', + 'subscriber_stdin' => '', 'subscriber_stdout' => '', 'subscriber_stderr' => ''); $psql_subscriber{run} = IPC::Run::start( @@ -305,13 +334,17 @@ $psql_subscriber{run} = IPC::Run::start( # create the logical slots create_logical_slots($node_standby, 'behaves_ok_'); -$node_primary->safe_psql('testdb', qq[CREATE TABLE decoding_test(x integer, y text);]); -$node_primary->safe_psql('testdb', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE decoding_test(x integer, y text);]); +$node_primary->safe_psql('testdb', + qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;] +); $node_primary->wait_for_replay_catchup($node_standby); my $result = $node_standby->safe_psql('testdb', - qq[SELECT pg_logical_slot_get_changes('behaves_ok_activeslot', NULL, NULL);]); + qq[SELECT pg_logical_slot_get_changes('behaves_ok_activeslot', NULL, NULL);] +); # test if basic decoding works is(scalar(my @foobar = split /^/m, $result), @@ -350,21 +383,21 @@ $node_primary->safe_psql('testdb', $node_primary->wait_for_replay_catchup($node_standby); my $stdout_recv = $node_standby->pg_recvlogical_upto( - 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, - 'include-xids' => '0', - 'skip-empty-xacts' => '1'); + 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, + 'include-xids' => '0', + 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, $expected, - 'got same expected output from pg_recvlogical decoding session'); + 'got same expected output from pg_recvlogical decoding session'); $node_standby->poll_query_until('testdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'behaves_ok_activeslot' AND active_pid IS NULL)" ) or die "slot never became inactive"; $stdout_recv = $node_standby->pg_recvlogical_upto( - 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, - 'include-xids' => '0', - 'skip-empty-xacts' => '1'); + 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, + 'include-xids' => '0', + 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); @@ -374,10 +407,9 @@ $node_primary->safe_psql('postgres', 'CREATE DATABASE otherdb'); # on the standby. $node_primary->wait_for_replay_catchup($node_standby); -($result, $stdout, $stderr) = $node_standby->psql( - 'otherdb', - "SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" - ); +($result, $stdout, $stderr) = $node_standby->psql('otherdb', + "SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" +); ok( $stderr =~ m/replication slot "behaves_ok_activeslot" was not created in this database/, "replaying logical slot from another database fails"); @@ -408,8 +440,7 @@ my $standby_connstr = $node_standby->connstr . ' dbname=postgres'; # and we wouldn't be able to launch pg_log_standby_snapshot() on the primary # while waiting. # psql_subscriber() allows to not wait synchronously. -$psql_subscriber{subscriber_stdin} .= - qq[CREATE SUBSCRIPTION tap_sub +$psql_subscriber{subscriber_stdin} .= qq[CREATE SUBSCRIPTION tap_sub CONNECTION '$standby_connstr' PUBLICATION tap_pub WITH (copy_data = off);]; @@ -451,10 +482,12 @@ $node_subscriber->stop; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum full on pg_class with hot_standby_feedback turned off on # the standby. -reactive_slots_change_hfs_and_wait_for_xmins('behaves_ok_', 'vacuum_full_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('behaves_ok_', 'vacuum_full_', + 0, 1); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); DROP TABLE conflict_test; VACUUM full pg_class; @@ -469,13 +502,16 @@ check_for_invalidation('vacuum_full_', 1, 'with vacuum FULL on pg_class'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'vacuum_full_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'vacuum_full_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"vacuum_full_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"vacuum_full_activeslot\"" +); # Turn hot_standby_feedback back on -change_hot_standby_feedback_and_wait_for_xmins(1,1); +change_hot_standby_feedback_and_wait_for_xmins(1, 1); ################################################## # Verify that invalidated logical slots stay invalidated across a restart. @@ -531,10 +567,12 @@ my $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum on pg_class with hot_standby_feedback turned off on the standby. -reactive_slots_change_hfs_and_wait_for_xmins('vacuum_full_', 'row_removal_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('vacuum_full_', 'row_removal_', + 0, 1); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); DROP TABLE conflict_test; VACUUM pg_class; @@ -549,10 +587,13 @@ check_for_invalidation('row_removal_', $logstart, 'with vacuum on pg_class'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'row_removal_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'row_removal_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"row_removal_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"row_removal_activeslot\"" +); ################################################## # Recovery conflict: Same as Scenario 2 but on a shared catalog table @@ -564,10 +605,12 @@ $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum on pg_class with hot_standby_feedback turned off on the standby. -reactive_slots_change_hfs_and_wait_for_xmins('row_removal_', 'shared_row_removal_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('row_removal_', + 'shared_row_removal_', 0, 1); # Trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE ROLE create_trash; DROP ROLE create_trash; VACUUM pg_authid; @@ -577,15 +620,19 @@ $node_primary->safe_psql('testdb', qq[ $node_primary->wait_for_replay_catchup($node_standby); # Check invalidation in the logfile and in pg_stat_database_conflicts -check_for_invalidation('shared_row_removal_', $logstart, 'with vacuum on pg_authid'); +check_for_invalidation('shared_row_removal_', $logstart, + 'with vacuum on pg_authid'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'shared_row_removal_', 0, \$stdout, \$stderr); +$handle = make_slot_active($node_standby, 'shared_row_removal_', 0, \$stdout, + \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"shared_row_removal_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"shared_row_removal_activeslot\"" +); ################################################## # Recovery conflict: Same as Scenario 2 but on a non catalog table @@ -595,10 +642,12 @@ check_pg_recvlogical_stderr($handle, "can no longer get changes from replication # get the position to search from in the standby logfile $logstart = -s $node_standby->logfile; -reactive_slots_change_hfs_and_wait_for_xmins('shared_row_removal_', 'no_conflict_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('shared_row_removal_', + 'no_conflict_', 0, 1); # This should not trigger a conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); INSERT INTO conflict_test(x,y) SELECT s, s::text FROM generate_series(1,4) s; UPDATE conflict_test set x=1, y=1; @@ -609,20 +658,24 @@ $node_primary->wait_for_replay_catchup($node_standby); # message should not be issued ok( !find_in_log( - $node_standby, - "invalidating obsolete slot \"no_conflict_inactiveslot\"", $logstart), - 'inactiveslot slot invalidation is not logged with vacuum on conflict_test'); + $node_standby, + "invalidating obsolete slot \"no_conflict_inactiveslot\"", $logstart), + 'inactiveslot slot invalidation is not logged with vacuum on conflict_test' +); ok( !find_in_log( - $node_standby, - "invalidating obsolete slot \"no_conflict_activeslot\"", $logstart), - 'activeslot slot invalidation is not logged with vacuum on conflict_test'); + $node_standby, + "invalidating obsolete slot \"no_conflict_activeslot\"", $logstart), + 'activeslot slot invalidation is not logged with vacuum on conflict_test' +); # Verify that pg_stat_database_conflicts.confl_active_logicalslot has not been updated ok( $node_standby->poll_query_until( - 'postgres', - "select (confl_active_logicalslot = 0) from pg_stat_database_conflicts where datname = 'testdb'", 't'), - 'confl_active_logicalslot not updated') or die "Timed out waiting confl_active_logicalslot to be updated"; + 'postgres', + "select (confl_active_logicalslot = 0) from pg_stat_database_conflicts where datname = 'testdb'", + 't'), + 'confl_active_logicalslot not updated' +) or die "Timed out waiting confl_active_logicalslot to be updated"; # Verify slots are reported as non conflicting in pg_replication_slots check_slots_conflicting_status(0); @@ -643,10 +696,13 @@ $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to trigger an on-access pruning # on a relation marked as user_catalog_table. -reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0, 0); +reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0, + 0); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[CREATE TABLE prun(id integer, s char(2000)) WITH (fillfactor = 75, user_catalog_table = true);]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE prun(id integer, s char(2000)) WITH (fillfactor = 75, user_catalog_table = true);] +); $node_primary->safe_psql('testdb', qq[INSERT INTO prun VALUES (1, 'A');]); $node_primary->safe_psql('testdb', qq[UPDATE prun SET s = 'B';]); $node_primary->safe_psql('testdb', qq[UPDATE prun SET s = 'C';]); @@ -664,7 +720,8 @@ check_slots_conflicting_status(1); $handle = make_slot_active($node_standby, 'pruning_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"pruning_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"pruning_activeslot\""); # Turn hot_standby_feedback back on change_hot_standby_feedback_and_wait_for_xmins(1, 1); @@ -683,13 +740,15 @@ drop_logical_slots('pruning_'); # create the logical slots create_logical_slots($node_standby, 'wal_level_'); -$handle = make_slot_active($node_standby, 'wal_level_', 1, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 1, \$stdout, \$stderr); # reset stat: easier to check for confl_active_logicalslot in pg_stat_database_conflicts $node_standby->psql('testdb', q[select pg_stat_reset();]); # Make primary wal_level replica. This will trigger slot conflict. -$node_primary->append_conf('postgresql.conf',q[ +$node_primary->append_conf( + 'postgresql.conf', q[ wal_level = 'replica' ]); $node_primary->restart; @@ -702,20 +761,27 @@ check_for_invalidation('wal_level_', $logstart, 'due to wal_level'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it requires wal_level >= logical on the primary server -check_pg_recvlogical_stderr($handle, "logical decoding on standby requires wal_level >= logical on the primary"); +check_pg_recvlogical_stderr($handle, + "logical decoding on standby requires wal_level >= logical on the primary" +); # Restore primary wal_level -$node_primary->append_conf('postgresql.conf',q[ +$node_primary->append_conf( + 'postgresql.conf', q[ wal_level = 'logical' ]); $node_primary->restart; $node_primary->wait_for_replay_catchup($node_standby); -$handle = make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); # as the slot has been invalidated we should not be able to read -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"wal_level_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"wal_level_activeslot\"" +); ################################################## # DROP DATABASE should drops it's slots, including active slots. @@ -731,24 +797,28 @@ $handle = make_slot_active($node_standby, 'drop_db_', 1, \$stdout, \$stderr); # Create a slot on a database that would not be dropped. This slot should not # get dropped. -$node_standby->create_logical_slot_on_standby($node_primary, 'otherslot', 'postgres'); +$node_standby->create_logical_slot_on_standby($node_primary, 'otherslot', + 'postgres'); # dropdb on the primary to verify slots are dropped on standby $node_primary->safe_psql('postgres', q[DROP DATABASE testdb]); $node_primary->wait_for_replay_catchup($node_standby); -is($node_standby->safe_psql('postgres', - q[SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'testdb')]), 'f', +is( $node_standby->safe_psql( + 'postgres', + q[SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'testdb')]), + 'f', 'database dropped on standby'); check_slots_dropped('drop_db', $handle); -is($node_standby->slot('otherslot')->{'slot_type'}, 'logical', - 'otherslot on standby not dropped'); +is($node_standby->slot('otherslot')->{'slot_type'}, + 'logical', 'otherslot on standby not dropped'); # Cleanup : manually drop the slot that was not dropped. -$node_standby->psql('postgres', q[SELECT pg_drop_replication_slot('otherslot')]); +$node_standby->psql('postgres', + q[SELECT pg_drop_replication_slot('otherslot')]); ################################################## # Test standby promotion and logical decoding behavior @@ -758,7 +828,8 @@ $node_standby->psql('postgres', q[SELECT pg_drop_replication_slot('otherslot')]) $node_standby->reload; $node_primary->psql('postgres', q[CREATE DATABASE testdb]); -$node_primary->safe_psql('testdb', qq[CREATE TABLE decoding_test(x integer, y text);]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE decoding_test(x integer, y text);]); # Wait for the standby to catchup before initializing the cascading standby $node_primary->wait_for_replay_catchup($node_standby); @@ -767,7 +838,9 @@ $node_primary->wait_for_replay_catchup($node_standby); # Keep this step after the "Verify that invalidated logical slots do not lead # to retaining WAL" test (as the physical slot on the standby could prevent the # WAL file removal). -$node_standby->safe_psql('testdb', qq[SELECT * FROM pg_create_physical_replication_slot('$standby_physical_slotname');]); +$node_standby->safe_psql('testdb', + qq[SELECT * FROM pg_create_physical_replication_slot('$standby_physical_slotname');] +); # Initialize cascading standby node $node_standby->backup($backup_name); @@ -775,7 +848,8 @@ $node_cascading_standby->init_from_backup( $node_standby, $backup_name, has_streaming => 1, has_restoring => 1); -$node_cascading_standby->append_conf('postgresql.conf', +$node_cascading_standby->append_conf( + 'postgresql.conf', qq[primary_slot_name = '$standby_physical_slotname' hot_standby_feedback = on]); $node_cascading_standby->start; @@ -784,14 +858,18 @@ $node_cascading_standby->start; create_logical_slots($node_standby, 'promotion_'); # Wait for the cascading standby to catchup before creating the slots -$node_standby->wait_for_replay_catchup($node_cascading_standby, $node_primary); +$node_standby->wait_for_replay_catchup($node_cascading_standby, + $node_primary); # create the logical slots on the cascading standby too create_logical_slots($node_cascading_standby, 'promotion_'); # Make slots actives -$handle = make_slot_active($node_standby, 'promotion_', 1, \$stdout, \$stderr); -my $cascading_handle = make_slot_active($node_cascading_standby, 'promotion_', 1, \$cascading_stdout, \$cascading_stderr); +$handle = + make_slot_active($node_standby, 'promotion_', 1, \$stdout, \$stderr); +my $cascading_handle = + make_slot_active($node_cascading_standby, 'promotion_', 1, + \$cascading_stdout, \$cascading_stderr); # Insert some rows before the promotion $node_primary->safe_psql('testdb', @@ -800,7 +878,8 @@ $node_primary->safe_psql('testdb', # Wait for both standbys to catchup $node_primary->wait_for_replay_catchup($node_standby); -$node_standby->wait_for_replay_catchup($node_cascading_standby, $node_primary); +$node_standby->wait_for_replay_catchup($node_cascading_standby, + $node_primary); # promote $node_standby->promote; @@ -830,35 +909,38 @@ $stdout_sql = $node_standby->safe_psql('testdb', qq[SELECT data FROM pg_logical_slot_peek_changes('promotion_inactiveslot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); -is($stdout_sql, $expected, 'got expected output from SQL decoding session on promoted standby'); +is($stdout_sql, $expected, + 'got expected output from SQL decoding session on promoted standby'); # check that we are decoding pre and post promotion inserted rows # with pg_recvlogical that has started before the promotion my $pump_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default); -ok( pump_until( - $handle, $pump_timeout, \$stdout, qr/^.*COMMIT.*COMMIT$/s), - 'got 2 COMMIT from pg_recvlogical output'); +ok(pump_until($handle, $pump_timeout, \$stdout, qr/^.*COMMIT.*COMMIT$/s), + 'got 2 COMMIT from pg_recvlogical output'); chomp($stdout); is($stdout, $expected, - 'got same expected output from pg_recvlogical decoding session'); + 'got same expected output from pg_recvlogical decoding session'); # check that we are decoding pre and post promotion inserted rows on the cascading standby $stdout_sql = $node_cascading_standby->safe_psql('testdb', qq[SELECT data FROM pg_logical_slot_peek_changes('promotion_inactiveslot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); -is($stdout_sql, $expected, 'got expected output from SQL decoding session on cascading standby'); +is($stdout_sql, $expected, + 'got expected output from SQL decoding session on cascading standby'); # check that we are decoding pre and post promotion inserted rows # with pg_recvlogical that has started before the promotion on the cascading standby ok( pump_until( - $cascading_handle, $pump_timeout, \$cascading_stdout, qr/^.*COMMIT.*COMMIT$/s), - 'got 2 COMMIT from pg_recvlogical output'); + $cascading_handle, $pump_timeout, + \$cascading_stdout, qr/^.*COMMIT.*COMMIT$/s), + 'got 2 COMMIT from pg_recvlogical output'); chomp($cascading_stdout); is($cascading_stdout, $expected, - 'got same expected output from pg_recvlogical decoding session on cascading standby'); + 'got same expected output from pg_recvlogical decoding session on cascading standby' +); done_testing(); diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 88ab255ce6..abf633dc08 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -85,14 +85,14 @@ typedef enum TAPtype TEST_STATUS, PLAN, NONE -} TAPtype; +} TAPtype; /* options settable from command line */ _stringlist *dblist = NULL; bool debug = false; char *inputdir = "."; char *outputdir = "."; -char *expecteddir = "."; +char *expecteddir = "."; char *bindir = PGBINDIR; char *launcher = NULL; static _stringlist *loadextension = NULL; diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index e7956cb1a0..76442de063 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -19,7 +19,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } my $ssl_server = SSL::Server->new(); @@ -78,11 +79,11 @@ note "testing password-protected keys"; switch_server_cert( $node, - certfile => 'server-cn-only', - cafile => 'root+client_ca', - keyfile => 'server-password', + certfile => 'server-cn-only', + cafile => 'root+client_ca', + keyfile => 'server-password', passphrase_cmd => 'echo wrongpassword', - restart => 'no'); + restart => 'no'); command_fails( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], @@ -91,11 +92,11 @@ $node->_update_pid(0); switch_server_cert( $node, - certfile => 'server-cn-only', - cafile => 'root+client_ca', - keyfile => 'server-password', + certfile => 'server-cn-only', + cafile => 'root+client_ca', + keyfile => 'server-password', passphrase_cmd => 'echo secret1', - restart => 'no'); + restart => 'no'); command_ok( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], @@ -468,7 +469,8 @@ $node->connect_fails( qr/could not get server's host name from server certificate/); # Test system trusted roots. -switch_server_cert($node, +switch_server_cert( + $node, certfile => 'server-cn-only+server_ca', keyfile => 'server-cn-only', cafile => 'root_ca'); @@ -481,13 +483,15 @@ $common_connstr = $node->connect_fails( "$common_connstr sslmode=verify-full host=common-name.pg-ssltest.test", "sslrootcert=system does not connect with private CA", - expected_stderr => qr/SSL error: (certificate verify failed|unregistered scheme)/); + expected_stderr => + qr/SSL error: (certificate verify failed|unregistered scheme)/); # Modes other than verify-full cannot be mixed with sslrootcert=system. $node->connect_fails( "$common_connstr sslmode=verify-ca host=common-name.pg-ssltest.test", "sslrootcert=system only accepts sslmode=verify-full", - expected_stderr => qr/weak sslmode "verify-ca" may not be used with sslrootcert=system/); + expected_stderr => + qr/weak sslmode "verify-ca" may not be used with sslrootcert=system/); SKIP: { @@ -503,7 +507,9 @@ SKIP: $node->connect_fails( "$common_connstr host=common-name.pg-ssltest.test.bad", "sslrootcert=system defaults to sslmode=verify-full", - expected_stderr => qr/server certificate for "common-name.pg-ssltest.test" does not match host name "common-name.pg-ssltest.test.bad"/); + expected_stderr => + qr/server certificate for "common-name.pg-ssltest.test" does not match host name "common-name.pg-ssltest.test.bad"/ + ); } # Test that the CRL works @@ -530,10 +536,10 @@ $node->connect_fails( # pg_stat_ssl command_like( [ - 'psql', '-X', - '-A', '-F', - ',', '-P', - 'null=_null_', '-d', + 'psql', '-X', + '-A', '-F', + ',', '-P', + 'null=_null_', '-d', "$common_connstr sslrootcert=invalid", '-c', "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" ], @@ -766,8 +772,8 @@ $node->connect_fails( qr/certificate authentication failed for user "anotheruser"/, # certificate authentication should be logged even on failure # temporarily(?) skip this check due to timing issue -# log_like => -# [qr/connection authenticated: identity="CN=ssltestuser" method=cert/], + # log_like => + # [qr/connection authenticated: identity="CN=ssltestuser" method=cert/], ); # revoked client cert @@ -777,10 +783,10 @@ $node->connect_fails( "certificate authorization fails with revoked client cert", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ], + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ], # revoked certificates should not authenticate the user log_unlike => [qr/connection authenticated:/],); @@ -818,7 +824,7 @@ $node->connect_ok( # intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca'); $common_connstr = - "$default_ssl_connstr user=ssltestuser dbname=certdb " + "$default_ssl_connstr user=ssltestuser dbname=certdb " . sslkey('client.key') . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost"; @@ -831,26 +837,30 @@ $node->connect_fails( "intermediate client certificate is missing", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656576, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656576, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); $node->connect_fails( - "$common_connstr sslmode=require sslcert=ssl/client-long.crt " . sslkey('client-long.key'), + "$common_connstr sslmode=require sslcert=ssl/client-long.crt " + . sslkey('client-long.key'), "logged client certificate Subjects are truncated if they're too long", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "\.\.\./CN=ssl-123456789012345678901234567890123456789012345678901234567890", serial number 2315418733629425152, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "\.\.\./CN=ssl-123456789012345678901234567890123456789012345678901234567890", serial number 2315418733629425152, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); # Use an invalid cafile here so that the next test won't be able to verify the # client CA. -switch_server_cert($node, certfile => 'server-cn-only', cafile => 'server-cn-only'); +switch_server_cert( + $node, + certfile => 'server-cn-only', + cafile => 'server-cn-only'); # intermediate CA is provided but doesn't have a trusted root (checks error # logging for cert chain depths > 0) @@ -859,17 +869,17 @@ $node->connect_fails( "intermediate client certificate is untrusted", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 1: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "/CN=Test CA for PostgreSQL SSL regression test client certs", serial number 2315134995201656577, issuer "/CN=Test root CA for PostgreSQL SSL regression test suite"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 1: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "/CN=Test CA for PostgreSQL SSL regression test client certs", serial number 2315134995201656577, issuer "/CN=Test root CA for PostgreSQL SSL regression test suite"}, + # ] ); # test server-side CRL directory switch_server_cert( $node, certfile => 'server-cn-only', - crldir => 'root+client-crldir'); + crldir => 'root+client-crldir'); # revoked client cert $node->connect_fails( @@ -878,10 +888,10 @@ $node->connect_fails( "certificate authorization fails with revoked client cert with server-side CRL directory", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); # revoked client cert, non-ASCII subject @@ -891,10 +901,10 @@ $node->connect_fails( "certificate authorization fails with revoked UTF-8 client cert with server-side CRL directory", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=\\xce\\x9f\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xad\\xce\\xb1\\xcf\\x82", serial number 2315420958437414144, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=\\xce\\x9f\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xad\\xce\\xb1\\xcf\\x82", serial number 2315420958437414144, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); done_testing(); diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl index 8038135697..28c54bdb09 100644 --- a/src/test/ssl/t/002_scram.pl +++ b/src/test/ssl/t/002_scram.pl @@ -22,7 +22,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } my $ssl_server = SSL::Server->new(); @@ -70,7 +71,7 @@ $node->start; $ssl_server->configure_test_server_for_ssl( $node, $SERVERHOSTADDR, $SERVERHOSTCIDR, "scram-sha-256", - 'password' => "pass", + 'password' => "pass", 'password_enc' => "scram-sha-256"); switch_server_cert($node, certfile => 'server-cn-only'); $ENV{PGPASSWORD} = "pass"; @@ -117,7 +118,7 @@ $node->connect_fails( # because channel binding is not performed. Note that ssl/client.key may # be used in a different test, so the name of this temporary client key # is chosen here to be unique. -my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); +my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); my $client_tmp_key = "$cert_tempdir/client_scram.key"; copy("ssl/client.key", "$cert_tempdir/client_scram.key") or die diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl index c073625213..5306aad802 100644 --- a/src/test/ssl/t/003_sslinfo.pl +++ b/src/test/ssl/t/003_sslinfo.pl @@ -20,7 +20,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } #### Some configuration @@ -172,9 +173,9 @@ is($result, 'CA:FALSE|t', 'extract extension from cert'); # Sanity tests for sslcertmode, using ssl_client_cert_present() my @cases = ( - { opts => "sslcertmode=allow", present => 't' }, + { opts => "sslcertmode=allow", present => 't' }, { opts => "sslcertmode=allow sslcert=invalid", present => 'f' }, - { opts => "sslcertmode=disable", present => 'f' },); + { opts => "sslcertmode=disable", present => 'f' },); if ($supports_sslcertmode_require) { push(@cases, { opts => "sslcertmode=require", present => 't' }); diff --git a/src/test/ssl/t/SSL/Backend/OpenSSL.pm b/src/test/ssl/t/SSL/Backend/OpenSSL.pm index b52035100a..a762f43634 100644 --- a/src/test/ssl/t/SSL/Backend/OpenSSL.pm +++ b/src/test/ssl/t/SSL/Backend/OpenSSL.pm @@ -71,8 +71,8 @@ sub init chmod(0600, glob "$pgdata/server-*.key") or die "failed to change permissions on server keys: $!"; _copy_files("ssl/root+client_ca.crt", $pgdata); - _copy_files("ssl/root_ca.crt", $pgdata); - _copy_files("ssl/root+client.crl", $pgdata); + _copy_files("ssl/root_ca.crt", $pgdata); + _copy_files("ssl/root+client.crl", $pgdata); mkdir("$pgdata/root+client-crldir") or die "unable to create server CRL dir $pgdata/root+client-crldir: $!"; _copy_files("ssl/root+client-crldir/*", "$pgdata/root+client-crldir/"); @@ -84,11 +84,11 @@ sub init # the tests. To get the full path for inclusion in connection strings, the # %key hash can be interrogated. my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); - my @keys = ( - "client.key", "client-revoked.key", - "client-der.key", "client-encrypted-pem.key", + my @keys = ( + "client.key", "client-revoked.key", + "client-der.key", "client-encrypted-pem.key", "client-encrypted-der.key", "client-dn.key", - "client_ext.key", "client-long.key", + "client_ext.key", "client-long.key", "client-revoked-utf8.key"); foreach my $keyfile (@keys) { @@ -174,13 +174,13 @@ sub set_server_cert { my ($self, $params) = @_; - $params->{cafile} = 'root+client_ca' unless defined $params->{cafile}; + $params->{cafile} = 'root+client_ca' unless defined $params->{cafile}; $params->{crlfile} = 'root+client.crl' unless defined $params->{crlfile}; $params->{keyfile} = $params->{certfile} unless defined $params->{keyfile}; my $sslconf = - "ssl_ca_file='$params->{cafile}.crt'\n" + "ssl_ca_file='$params->{cafile}.crt'\n" . "ssl_cert_file='$params->{certfile}.crt'\n" . "ssl_key_file='$params->{keyfile}.key'\n" . "ssl_crl_file='$params->{crlfile}'\n"; diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm index b6344b936a..2c5c055222 100644 --- a/src/test/ssl/t/SSL/Server.pm +++ b/src/test/ssl/t/SSL/Server.pm @@ -94,7 +94,7 @@ sub new bless $self, $class; if ($flavor =~ /\Aopenssl\z/i) { - $self->{flavor} = 'openssl'; + $self->{flavor} = 'openssl'; $self->{backend} = SSL::Backend::OpenSSL->new(); } else @@ -115,7 +115,7 @@ string. sub sslkey { - my $self = shift; + my $self = shift; my $keyfile = shift; my $backend = $self->{backend}; @@ -143,10 +143,10 @@ sub configure_test_server_for_ssl my $self = shift; my ($node, $serverhost, $servercidr, $authmethod, %params) = @_; my $backend = $self->{backend}; - my $pgdata = $node->data_dir; + my $pgdata = $node->data_dir; my @databases = ( - 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', + 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', 'certdb_cn', 'verifydb'); # Create test users and databases @@ -229,7 +229,7 @@ Get the name of the currently used SSL backend. sub ssl_library { - my $self = shift; + my $self = shift; my $backend = $self->{backend}; return $backend->get_library(); @@ -284,11 +284,11 @@ returning. sub switch_server_cert { - my $self = shift; - my $node = shift; + my $self = shift; + my $node = shift; my $backend = $self->{backend}; - my %params = @_; - my $pgdata = $node->data_dir; + my %params = @_; + my $pgdata = $node->data_dir; open my $sslconf, '>', "$pgdata/sslconfig.conf"; print $sslconf "ssl=on\n"; diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 91aa068c95..0a399cdb82 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -233,7 +233,8 @@ $node_subscriber->safe_psql('postgres', ); # Wait for initial table sync to finish -$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_temp1'); +$node_subscriber->wait_for_subscription_sync($node_publisher, + 'tap_sub_temp1'); # Subscriber table will have no rows initially $result = diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl index 297adfb3bb..2f0bf7730b 100644 --- a/src/test/subscription/t/005_encoding.pl +++ b/src/test/subscription/t/005_encoding.pl @@ -11,13 +11,13 @@ use Test::More; my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_publisher->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_publisher->start; my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_subscriber->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=LATIN1' ]); + extra => [ '--locale=C', '--encoding=LATIN1' ]); $node_subscriber->start; my $ddl = "CREATE TABLE test1 (a int, b text);"; @@ -42,7 +42,7 @@ $node_publisher->wait_for_catchup('mysub'); is( $node_subscriber->safe_psql( 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'} - ), # LATIN1 + ), # LATIN1 qq(1), 'data replicated to subscriber'); diff --git a/src/test/subscription/t/012_collation.pl b/src/test/subscription/t/012_collation.pl index 4d947f1375..823550a31b 100644 --- a/src/test/subscription/t/012_collation.pl +++ b/src/test/subscription/t/012_collation.pl @@ -17,13 +17,13 @@ if ($ENV{with_icu} ne 'yes') my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_publisher->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_publisher->start; my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_subscriber->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; diff --git a/src/test/subscription/t/014_binary.pl b/src/test/subscription/t/014_binary.pl index feefbe734e..e5ce849c19 100644 --- a/src/test/subscription/t/014_binary.pl +++ b/src/test/subscription/t/014_binary.pl @@ -57,7 +57,7 @@ $node_publisher->safe_psql( my $publisher_connstring = $node_publisher->connstr . ' dbname=postgres'; $node_subscriber->safe_psql('postgres', - "CREATE SUBSCRIPTION tsub CONNECTION '$publisher_connstring' " + "CREATE SUBSCRIPTION tsub CONNECTION '$publisher_connstring' " . "PUBLICATION tpub WITH (slot_name = tpub_slot, binary = true)"); # Ensure the COPY command is executed in binary format on the publisher diff --git a/src/test/subscription/t/015_stream.pl b/src/test/subscription/t/015_stream.pl index 88344bdbaa..5c00711ef2 100644 --- a/src/test/subscription/t/015_stream.pl +++ b/src/test/subscription/t/015_stream.pl @@ -30,13 +30,13 @@ sub test_streaming # Interleave a pair of transactions, each exceeding the 64kB limit. my $offset = 0; - my $h = $node_publisher->background_psql('postgres', - on_error_stop => 0); + my $h = $node_publisher->background_psql('postgres', on_error_stop => 0); # Check the subscriber log from now on. $offset = -s $node_subscriber->logfile; - $h->query_safe(q{ + $h->query_safe( + q{ BEGIN; INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i); UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0; @@ -52,7 +52,7 @@ sub test_streaming }); $h->query_safe('COMMIT'); - # errors make the next test fail, so ignore them here + # errors make the next test fail, so ignore them here $h->quit; $node_publisher->wait_for_catchup($appname); @@ -211,15 +211,15 @@ $node_subscriber->reload; $node_subscriber->safe_psql('postgres', q{SELECT 1}); # Interleave a pair of transactions, each exceeding the 64kB limit. -my $h = $node_publisher->background_psql('postgres', - on_error_stop => 0); +my $h = $node_publisher->background_psql('postgres', on_error_stop => 0); # Confirm if a deadlock between the leader apply worker and the parallel apply # worker can be detected. my $offset = -s $node_subscriber->logfile; -$h->query_safe(q{ +$h->query_safe( + q{ BEGIN; INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i); }); @@ -260,7 +260,8 @@ $node_subscriber->safe_psql('postgres', # Check the subscriber log from now on. $offset = -s $node_subscriber->logfile; -$h->query_safe(q{ +$h->query_safe( + q{ BEGIN; INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i); }); @@ -296,7 +297,8 @@ is($result, qq(10000), 'data replicated to subscriber after dropping index'); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. @@ -317,7 +319,8 @@ $node_publisher->wait_for_catchup($appname); # Check that transaction is committed on subscriber $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2"); -is($result, qq(15000), 'parallel apply worker replayed all changes from file'); +is($result, qq(15000), + 'parallel apply worker replayed all changes from file'); $node_subscriber->stop; $node_publisher->stop; diff --git a/src/test/subscription/t/018_stream_subxact_abort.pl b/src/test/subscription/t/018_stream_subxact_abort.pl index 2b67ae1e0a..91d19ae672 100644 --- a/src/test/subscription/t/018_stream_subxact_abort.pl +++ b/src/test/subscription/t/018_stream_subxact_abort.pl @@ -205,7 +205,8 @@ test_streaming($node_publisher, $node_subscriber, $appname, 1); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. diff --git a/src/test/subscription/t/023_twophase_stream.pl b/src/test/subscription/t/023_twophase_stream.pl index f4af44414b..fdcc4b359d 100644 --- a/src/test/subscription/t/023_twophase_stream.pl +++ b/src/test/subscription/t/023_twophase_stream.pl @@ -391,7 +391,8 @@ test_streaming($node_publisher, $node_subscriber, $appname, 1); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. diff --git a/src/test/subscription/t/025_rep_changes_for_schema.pl b/src/test/subscription/t/025_rep_changes_for_schema.pl index a22ae0a2ac..8543f52710 100644 --- a/src/test/subscription/t/025_rep_changes_for_schema.pl +++ b/src/test/subscription/t/025_rep_changes_for_schema.pl @@ -63,7 +63,8 @@ $node_subscriber->safe_psql('postgres', ); # Wait for initial table sync to finish -$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_schema'); +$node_subscriber->wait_for_subscription_sync($node_publisher, + 'tap_sub_schema'); # Check the schema table data is synced up my $result = $node_subscriber->safe_psql('postgres', diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl index 96a6d686eb..45e51c5a52 100644 --- a/src/test/subscription/t/026_stats.pl +++ b/src/test/subscription/t/026_stats.pl @@ -43,7 +43,7 @@ sub create_sub_pub_w_errors ]); # Set up publication. - my $pub_name = $table_name . '_pub'; + my $pub_name = $table_name . '_pub'; my $publisher_connstr = $node_publisher->connstr . qq( dbname=$db); $node_publisher->safe_psql($db, diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl index 8a7e79caca..d7a7e3ef5b 100644 --- a/src/test/subscription/t/027_nosuperuser.pl +++ b/src/test/subscription/t/027_nosuperuser.pl @@ -81,7 +81,7 @@ sub grant_superuser # "regress_admin". For partitioned tables, layout the partitions differently # on the publisher than on the subscriber. # -$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_publisher->init(allows_streaming => 'logical'); $node_subscriber->init; @@ -89,10 +89,10 @@ $node_publisher->start; $node_subscriber->start; $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my %remainder_a = ( - publisher => 0, + publisher => 0, subscriber => 1); my %remainder_b = ( - publisher => 1, + publisher => 1, subscriber => 0); for my $node ($node_publisher, $node_subscriber) @@ -197,8 +197,7 @@ publish_insert("alice.hashpart", 103); publish_update("alice.hashpart", 102 => 120); publish_delete("alice.hashpart", 101); expect_replication("alice.hashpart", 2, 103, 120, - "nosuperuser admin with privileges on role can replicate into hashpart" -); + "nosuperuser admin with privileges on role can replicate into hashpart"); # Force RLS on the target table and check that replication fails. $node_subscriber->safe_psql( @@ -223,8 +222,7 @@ $node_subscriber->safe_psql( ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY; )); expect_replication("alice.unpartitioned", 3, 11, 15, - "non-superuser admin can replicate insert if rls is not forced" -); + "non-superuser admin can replicate insert if rls is not forced"); $node_subscriber->safe_psql( 'postgres', qq( @@ -237,8 +235,7 @@ expect_failure( 11, 15, qr/ERROR: ( [A-Z0-9]+:)? user "regress_alice" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi, - "replication of update into table with forced rls fails" -); + "replication of update into table with forced rls fails"); $node_subscriber->safe_psql( 'postgres', qq( ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY; @@ -258,8 +255,7 @@ expect_failure( 13, 17, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "replication of insert fails if table owner lacks insert permission" -); + "replication of insert fails if table owner lacks insert permission"); # alice needs INSERT but not SELECT to replicate an INSERT. $node_subscriber->safe_psql( diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl index b0d4b2d5b1..aec483f785 100644 --- a/src/test/subscription/t/028_row_filter.pl +++ b/src/test/subscription/t/028_row_filter.pl @@ -18,7 +18,7 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $appname = 'tap_sub'; +my $appname = 'tap_sub'; # ==================================================================== # Testcase start: FOR ALL TABLES @@ -544,13 +544,11 @@ is( $result, qq(20 $result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_rowfilter_parent_sync ORDER BY 1"); -is( $result, qq(16), - 'check initial data copy from tab_rowfilter_parent_sync'); +is($result, qq(16), 'check initial data copy from tab_rowfilter_parent_sync'); $result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_rowfilter_child_sync ORDER BY 1"); -is( $result, qq(), - 'check initial data copy from tab_rowfilter_child_sync'); +is($result, qq(), 'check initial data copy from tab_rowfilter_child_sync'); # The following commands are executed after CREATE SUBSCRIPTION, so these SQL # commands are for testing normal logical replication behavior. diff --git a/src/test/subscription/t/030_origin.pl b/src/test/subscription/t/030_origin.pl index b9b1351ddb..9ca1fa25d8 100644 --- a/src/test/subscription/t/030_origin.pl +++ b/src/test/subscription/t/030_origin.pl @@ -9,10 +9,10 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; -my $subname_AB = 'tap_sub_A_B'; +my $subname_AB = 'tap_sub_A_B'; my $subname_AB2 = 'tap_sub_A_B_2'; -my $subname_BA = 'tap_sub_B_A'; -my $subname_BC = 'tap_sub_B_C'; +my $subname_BA = 'tap_sub_B_A'; +my $subname_BC = 'tap_sub_B_C'; my $result; my $stdout; diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl index b67292ba9c..dbff806040 100644 --- a/src/test/subscription/t/031_column_list.pl +++ b/src/test/subscription/t/031_column_list.pl @@ -20,7 +20,7 @@ $node_subscriber->append_conf('postgresql.conf', $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $offset = 0; +my $offset = 0; # setup tables on both nodes diff --git a/src/test/subscription/t/032_subscribe_use_index.pl b/src/test/subscription/t/032_subscribe_use_index.pl index 76d7c85fff..576eec6a57 100644 --- a/src/test/subscription/t/032_subscribe_use_index.pl +++ b/src/test/subscription/t/032_subscribe_use_index.pl @@ -18,8 +18,8 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $appname = 'tap_sub'; -my $result = ''; +my $appname = 'tap_sub'; +my $result = ''; # ============================================================================= # Testcase start: Subscription can use index with multiple rows and columns @@ -60,19 +60,24 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select (idx_scan = 4) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates 4 rows via index"; +$node_subscriber->poll_query_until('postgres', + q{select (idx_scan = 4) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates 4 rows via index"; # make sure that the subscriber has the correct data after the UPDATE $result = $node_subscriber->safe_psql('postgres', - "select count(*) from test_replica_id_full WHERE (x = 100 and y = '200')"); -is($result, qq(2), 'ensure subscriber has the correct data at the end of the test'); + "select count(*) from test_replica_id_full WHERE (x = 100 and y = '200')" +); +is($result, qq(2), + 'ensure subscriber has the correct data at the end of the test'); # make sure that the subscriber has the correct data after the first DELETE $result = $node_subscriber->safe_psql('postgres', "select count(*) from test_replica_id_full where x in (5, 6)"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -145,17 +150,21 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select sum(idx_scan)=3 from pg_stat_all_indexes where indexrelname ilike 'users_table_part_%';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates partitioned table"; +$node_subscriber->poll_query_until('postgres', + q{select sum(idx_scan)=3 from pg_stat_all_indexes where indexrelname ilike 'users_table_part_%';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates partitioned table"; # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select sum(user_id+value_1+value_2) from users_table_part"); -is($result, qq(10907), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(10907), + 'ensure subscriber has the correct data at the end of the test'); $result = $node_subscriber->safe_psql('postgres', "select count(DISTINCT(user_id,value_1, value_2)) from users_table_part"); -is($result, qq(99), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(99), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -182,15 +191,18 @@ $node_subscriber->safe_psql('postgres', # index with only an expression $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names_expr_only ON people ((firstname || ' ' || lastname))"); + "CREATE INDEX people_names_expr_only ON people ((firstname || ' ' || lastname))" +); # partial index $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names_partial ON people(firstname) WHERE (firstname = 'first_name_1')"); + "CREATE INDEX people_names_partial ON people(firstname) WHERE (firstname = 'first_name_1')" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0,200) i"); + "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0,200) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -204,31 +216,41 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # update 2 rows $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'" +); $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_2' AND lastname = 'last_name_2'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_2' AND lastname = 'last_name_2'" +); # make sure none of the indexes is used on the subscriber $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', - "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"); -is($result, qq(0), 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'); + "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')" +); +is($result, qq(0), + 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions' +); $node_publisher->safe_psql('postgres', "DELETE FROM people WHERE firstname = 'first_name_3'"); $node_publisher->safe_psql('postgres', - "DELETE FROM people WHERE firstname = 'first_name_4' AND lastname = 'last_name_4'"); + "DELETE FROM people WHERE firstname = 'first_name_4' AND lastname = 'last_name_4'" +); # make sure the index is not used on the subscriber $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', - "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"); -is($result, qq(0), 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'); + "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')" +); +is($result, qq(0), + 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions' +); # make sure that the subscriber has the correct data -$result = $node_subscriber->safe_psql('postgres', - "SELECT count(*) FROM people"); -is($result, qq(199), 'ensure subscriber has the correct data at the end of the test'); +$result = + $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people"); +is($result, qq(199), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -252,11 +274,13 @@ $node_publisher->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE people (firstname text, lastname text)"); $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names ON people (firstname, lastname, (firstname || ' ' || lastname))"); + "CREATE INDEX people_names ON people (firstname, lastname, (firstname || ' ' || lastname))" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0, 20) i"); + "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0, 20) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -270,7 +294,8 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # update 1 row $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'" +); # delete the updated row $node_publisher->safe_psql('postgres', @@ -278,22 +303,25 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select idx_scan=2 from pg_stat_all_indexes where indexrelname = 'people_names';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full deletes two rows via index scan with index on expressions and columns"; +$node_subscriber->poll_query_until('postgres', + q{select idx_scan=2 from pg_stat_all_indexes where indexrelname = 'people_names';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full deletes two rows via index scan with index on expressions and columns"; # make sure that the subscriber has the correct data -$result = $node_subscriber->safe_psql('postgres', - "SELECT count(*) FROM people"); -is($result, qq(20), 'ensure subscriber has the correct data at the end of the test'); +$result = + $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people"); +is($result, qq(20), + 'ensure subscriber has the correct data at the end of the test'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people WHERE firstname = 'no-name'"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # now, drop the index with the expression, we'll use sequential scan -$node_subscriber->safe_psql('postgres', - "DROP INDEX people_names"); +$node_subscriber->safe_psql('postgres', "DROP INDEX people_names"); # delete 1 row $node_publisher->safe_psql('postgres', @@ -303,7 +331,8 @@ $node_publisher->safe_psql('postgres', $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people WHERE lastname = 'last_name_18'"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -319,19 +348,16 @@ $node_subscriber->safe_psql('postgres', "DROP TABLE people"); # Testcase start: Null values and missing column $node_publisher->safe_psql('postgres', - "CREATE TABLE test_replica_id_full (x int)" -); + "CREATE TABLE test_replica_id_full (x int)"); $node_publisher->safe_psql('postgres', "ALTER TABLE test_replica_id_full REPLICA IDENTITY FULL"); $node_subscriber->safe_psql('postgres', - "CREATE TABLE test_replica_id_full (x int, y int)" -); + "CREATE TABLE test_replica_id_full (x int, y int)"); $node_subscriber->safe_psql('postgres', - "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)" -); + "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)"); # create pub/sub $node_publisher->safe_psql('postgres', @@ -352,19 +378,23 @@ $node_publisher->safe_psql('postgres', # check if the index is used even when the index has NULL values $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select idx_scan=1 from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates test_replica_id_full table"; +$node_subscriber->poll_query_until('postgres', + q{select idx_scan=1 from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates test_replica_id_full table"; # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select sum(x) from test_replica_id_full WHERE y IS NULL"); -is($result, qq(7), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(7), + 'ensure subscriber has the correct data at the end of the test'); # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select count(*) from test_replica_id_full WHERE y IS NULL"); -is($result, qq(3), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(3), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -394,11 +424,13 @@ $node_publisher->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE test_replica_id_full (x int, y int)"); $node_subscriber->safe_psql('postgres', - "CREATE UNIQUE INDEX test_replica_id_full_idxy ON test_replica_id_full(x,y)"); + "CREATE UNIQUE INDEX test_replica_id_full_idxy ON test_replica_id_full(x,y)" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO test_replica_id_full SELECT i, i FROM generate_series(0,21) i"); + "INSERT INTO test_replica_id_full SELECT i, i FROM generate_series(0,21) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -412,7 +444,8 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # duplicate the data in subscriber for y column $node_subscriber->safe_psql('postgres', - "INSERT INTO test_replica_id_full SELECT i+100, i FROM generate_series(0,21) i"); + "INSERT INTO test_replica_id_full SELECT i+100, i FROM generate_series(0,21) i" +); # now, we update only 1 row on the publisher and expect the subscriber to only # update 1 row although there are two tuples with y = 15 on the subscriber @@ -421,15 +454,18 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select (idx_scan = 1) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idxy';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates one row via index"; +$node_subscriber->poll_query_until('postgres', + q{select (idx_scan = 1) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idxy';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates one row via index"; # make sure that the subscriber has the correct data # we only updated 1 row $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_replica_id_full WHERE x = 2000"); -is($result, qq(1), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(1), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); diff --git a/src/test/subscription/t/033_run_as_table_owner.pl b/src/test/subscription/t/033_run_as_table_owner.pl index cabc8a7c59..0aa8a093ef 100644 --- a/src/test/subscription/t/033_run_as_table_owner.pl +++ b/src/test/subscription/t/033_run_as_table_owner.pl @@ -73,7 +73,7 @@ sub revoke_superuser # "regress_admin". For partitioned tables, layout the partitions differently # on the publisher than on the subscriber. # -$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_publisher->init(allows_streaming => 'logical'); $node_subscriber->init; @@ -120,18 +120,14 @@ publish_insert("alice.unpartitioned", 3); publish_insert("alice.unpartitioned", 5); publish_update("alice.unpartitioned", 1 => 7); publish_delete("alice.unpartitioned", 3); -expect_replication("alice.unpartitioned", 2, 5, 7, - "superuser can replicate"); +expect_replication("alice.unpartitioned", 2, 5, 7, "superuser can replicate"); # Revoke superuser privilege for "regress_admin", and verify that we now # fail to replicate an insert. revoke_superuser("regress_admin"); publish_insert("alice.unpartitioned", 9); expect_failure( - "alice.unpartitioned", - 2, - 5, - 7, + "alice.unpartitioned", 2, 5, 7, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, "with no privileges cannot replicate"); @@ -144,8 +140,7 @@ GRANT INSERT,UPDATE,DELETE ON alice.unpartitioned TO regress_admin; REVOKE SELECT ON alice.unpartitioned FROM regress_admin; )); expect_replication("alice.unpartitioned", 3, 5, 9, - "with INSERT privilege can replicate INSERT" -); + "with INSERT privilege can replicate INSERT"); # We can't yet replicate an UPDATE because we don't have SELECT. publish_update("alice.unpartitioned", 5 => 11); @@ -156,8 +151,7 @@ expect_failure( 5, 9, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "without SELECT privilege cannot replicate UPDATE or DELETE" -); + "without SELECT privilege cannot replicate UPDATE or DELETE"); # After granting SELECT, replication resumes. $node_subscriber->safe_psql( @@ -166,8 +160,7 @@ SET SESSION AUTHORIZATION regress_alice; GRANT SELECT ON alice.unpartitioned TO regress_admin; )); expect_replication("alice.unpartitioned", 2, 7, 11, - "with all privileges can replicate" -); + "with all privileges can replicate"); # Remove all privileges again. Instead, give the ability to SET ROLE to # regress_alice. @@ -189,8 +182,7 @@ expect_failure( 7, 11, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "with SET ROLE but not INHERIT cannot replicate" -); + "with SET ROLE but not INHERIT cannot replicate"); # Now remove SET ROLE and add INHERIT and check that things start working. $node_subscriber->safe_psql( @@ -198,7 +190,6 @@ $node_subscriber->safe_psql( GRANT regress_alice TO regress_admin WITH INHERIT TRUE, SET FALSE; )); expect_replication("alice.unpartitioned", 3, 7, 13, - "with INHERIT but not SET ROLE can replicate" -); + "with INHERIT but not SET ROLE can replicate"); done_testing(); diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index b832ddcf63..4fabc44168 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -127,8 +127,8 @@ $node_twoways->start; for my $db (qw(d1 d2)) { $node_twoways->safe_psql('postgres', "CREATE DATABASE $db"); - $node_twoways->safe_psql($db, "CREATE TABLE t (f int)"); - $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)"); + $node_twoways->safe_psql($db, "CREATE TABLE t (f int)"); + $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)"); } my $rows = 3000; @@ -141,7 +141,7 @@ $node_twoways->safe_psql( }); $node_twoways->safe_psql('d2', - "CREATE SUBSCRIPTION testsub CONNECTION \$\$" + "CREATE SUBSCRIPTION testsub CONNECTION \$\$" . $node_twoways->connstr('d1') . "\$\$ PUBLICATION testpub WITH (create_slot=false, " . "slot_name='testslot')"); diff --git a/src/timezone/zic.c b/src/timezone/zic.c index d6c5141923..d605c721ec 100644 --- a/src/timezone/zic.c +++ b/src/timezone/zic.c @@ -906,16 +906,16 @@ namecheck(const char *name) /* Benign characters in a portable file name. */ static char const benign[] = - "-/_" - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + "-/_" + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; /* * Non-control chars in the POSIX portable character set, excluding the * benign characters. */ static char const printable_and_not_benign[] = - " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~"; + " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~"; char const *component = name; @@ -3203,7 +3203,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) else if (jtime == ktime) { char const *dup_rules_msg = - _("two rules for same instant"); + _("two rules for same instant"); eats(zp->z_filename, zp->z_linenum, rp->r_filename, rp->r_linenum); diff --git a/src/tools/PerfectHash.pm b/src/tools/PerfectHash.pm index d1446385be..e54905a3ef 100644 --- a/src/tools/PerfectHash.pm +++ b/src/tools/PerfectHash.pm @@ -96,7 +96,7 @@ sub generate_hash_function { $hash_mult2 = $_; # "foreach $hash_mult2" doesn't work @subresult = _construct_hash_table( - $keys_ref, $hash_mult1, $hash_mult2, + $keys_ref, $hash_mult1, $hash_mult2, $hash_seed1, $hash_seed2); last FIND_PARAMS if @subresult; } @@ -108,8 +108,8 @@ sub generate_hash_function # Extract info from _construct_hash_table's result array. my $elemtype = $subresult[0]; - my @hashtab = @{ $subresult[1] }; - my $nhash = scalar(@hashtab); + my @hashtab = @{ $subresult[1] }; + my $nhash = scalar(@hashtab); # OK, construct the hash function definition including the hash table. my $f = ''; @@ -138,11 +138,11 @@ sub generate_hash_function $f .= sprintf "\tconst unsigned char *k = (const unsigned char *) key;\n"; $f .= sprintf "\tsize_t\t\tkeylen = %d;\n", $options{fixed_key_length} if (defined $options{fixed_key_length}); - $f .= sprintf "\tuint32\t\ta = %d;\n", $hash_seed1; + $f .= sprintf "\tuint32\t\ta = %d;\n", $hash_seed1; $f .= sprintf "\tuint32\t\tb = %d;\n\n", $hash_seed2; $f .= sprintf "\twhile (keylen--)\n\t{\n"; $f .= sprintf "\t\tunsigned char c = *k++"; - $f .= sprintf " | 0x20" if $case_fold; # see comment below + $f .= sprintf " | 0x20" if $case_fold; # see comment below $f .= sprintf ";\n\n"; $f .= sprintf "\t\ta = a * %d + c;\n", $hash_mult1; $f .= sprintf "\t\tb = b * %d + c;\n", $hash_mult2; @@ -344,7 +344,7 @@ sub _construct_hash_table && $hmin + 0x7F >= $nedges) { # int8 will work - $elemtype = 'int8'; + $elemtype = 'int8'; $unused_flag = 0x7F; } elsif ($hmin >= -0x7FFF @@ -352,7 +352,7 @@ sub _construct_hash_table && $hmin + 0x7FFF >= $nedges) { # int16 will work - $elemtype = 'int16'; + $elemtype = 'int16'; $unused_flag = 0x7FFF; } elsif ($hmin >= -0x7FFFFFFF @@ -360,7 +360,7 @@ sub _construct_hash_table && $hmin + 0x3FFFFFFF >= $nedges) { # int32 will work - $elemtype = 'int32'; + $elemtype = 'int32'; $unused_flag = 0x3FFFFFFF; } else diff --git a/src/tools/check_bison_recursion.pl b/src/tools/check_bison_recursion.pl index c856f6ac30..18f14ad127 100755 --- a/src/tools/check_bison_recursion.pl +++ b/src/tools/check_bison_recursion.pl @@ -52,16 +52,16 @@ while (<>) { # first rule for nonterminal - $rule_number = $1; + $rule_number = $1; $cur_nonterminal = $2; - $rhs = $3; + $rhs = $3; } elsif (m/^\s*(\d+)\s+\|\s+(.*)$/) { # additional rule for nonterminal $rule_number = $1; - $rhs = $2; + $rhs = $2; } } diff --git a/src/tools/ci/windows_build_config.pl b/src/tools/ci/windows_build_config.pl index 59268a0bb6..b0d4360c74 100644 --- a/src/tools/ci/windows_build_config.pl +++ b/src/tools/ci/windows_build_config.pl @@ -4,10 +4,10 @@ use warnings; our $config; $config->{"tap_tests"} = 1; -$config->{"asserts"} = 1; +$config->{"asserts"} = 1; $config->{"openssl"} = "c:/openssl/1.1/"; -$config->{"perl"} = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/"; -$config->{"python"} = "c:/python/"; +$config->{"perl"} = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/"; +$config->{"python"} = "c:/python/"; 1; diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl index e870a01233..30c38c757b 100755 --- a/src/tools/copyright.pl +++ b/src/tools/copyright.pl @@ -16,8 +16,8 @@ use File::Find; use File::Basename; use Tie::File; -my $pgdg = 'PostgreSQL Global Development Group'; -my $cc = 'Copyright \(c\)'; +my $pgdg = 'PostgreSQL Global Development Group'; +my $cc = 'Copyright \(c\)'; my $ccliteral = 'Copyright (c)'; # year-1900 is what localtime(time) puts in element 5 diff --git a/src/tools/gen_export.pl b/src/tools/gen_export.pl index 68b3ab8661..ed60abe956 100644 --- a/src/tools/gen_export.pl +++ b/src/tools/gen_export.pl @@ -8,12 +8,15 @@ my $input; my $output; GetOptions( - 'format:s' => \$format, - 'libname:s' => \$libname, + 'format:s' => \$format, + 'libname:s' => \$libname, 'input:s' => \$input, - 'output:s' => \$output) or die "wrong arguments"; + 'output:s' => \$output) or die "wrong arguments"; -if (not ($format eq 'aix' or $format eq 'darwin' or $format eq 'gnu' or $format eq 'win')) +if (not( $format eq 'aix' + or $format eq 'darwin' + or $format eq 'gnu' + or $format eq 'win')) { die "$0: $format is not yet handled (only aix, darwin, gnu, win are)\n"; } diff --git a/src/tools/gen_keywordlist.pl b/src/tools/gen_keywordlist.pl index 345dff6677..97a9ff1b30 100644 --- a/src/tools/gen_keywordlist.pl +++ b/src/tools/gen_keywordlist.pl @@ -38,15 +38,15 @@ use lib $FindBin::RealBin; use PerfectHash; my $output_path = ''; -my $extern = 0; -my $case_fold = 1; -my $varname = 'ScanKeywords'; +my $extern = 0; +my $case_fold = 1; +my $varname = 'ScanKeywords'; GetOptions( - 'output:s' => \$output_path, - 'extern' => \$extern, + 'output:s' => \$output_path, + 'extern' => \$extern, 'case-fold!' => \$case_fold, - 'varname:s' => \$varname) || usage(); + 'varname:s' => \$varname) || usage(); my $kw_input_file = shift @ARGV || die "No input file.\n"; @@ -59,10 +59,10 @@ if ($output_path ne '' && substr($output_path, -1) ne '/') $kw_input_file =~ /(\w+)\.h$/ || die "Input file must be named something.h.\n"; my $base_filename = $1 . '_d'; -my $kw_def_file = $output_path . $base_filename . '.h'; +my $kw_def_file = $output_path . $base_filename . '.h'; -open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n"; -open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n"; +open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n"; +open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n"; # Opening boilerplate for keyword definition header. printf $kwdef < \$brief, - 'details-after' => \$details_after, - 'master-only' => \$master_only, + 'brief' => \$brief, + 'details-after' => \$details_after, + 'master-only' => \$master_only, 'non-master-only' => \$non_master_only, - 'post-date' => \$post_date, - 'oldest-first' => \$oldest_first, - 'since=s' => \$since) || usage(); + 'post-date' => \$post_date, + 'oldest-first' => \$oldest_first, + 'since=s' => \$since) || usage(); usage() if @ARGV; my @git = qw(git log --format=fuller --date=iso); @@ -104,7 +104,7 @@ my %rel_tags; if ($line =~ m|^([a-f0-9]+)\s+commit\s+refs/tags/(\S+)|) { my $commit = $1; - my $tag = $2; + my $tag = $2; if ( $tag =~ /^REL_\d+_\d+$/ || $tag =~ /^REL\d+_\d+$/ || $tag =~ /^REL\d+_\d+_\d+$/) @@ -152,10 +152,10 @@ for my $branch (@BRANCHES) push_commit(\%commit) if %commit; $last_tag = $rel_tags{$1} if defined $rel_tags{$1}; %commit = ( - 'branch' => $branch, - 'commit' => $1, + 'branch' => $branch, + 'commit' => $1, 'last_tag' => $last_tag, - 'message' => '',); + 'message' => '',); if ($line =~ /^commit\s+\S+\s+(\S+)/) { $last_parent = $1; @@ -195,7 +195,7 @@ for my $branch (@BRANCHES) for my $cc (@{ $all_commits_by_branch{'master'} }) { my $commit = $cc->{'commit'}; - my $c = $cc->{'commits'}->[0]; + my $c = $cc->{'commits'}->[0]; $last_tag = $rel_tags{$commit} if defined $rel_tags{$commit}; if (defined $sprout_tags{$commit}) { @@ -243,7 +243,7 @@ while (1) if (!defined $best_branch || $leader->{'timestamp'} > $best_timestamp) { - $best_branch = $branch; + $best_branch = $branch; $best_timestamp = $leader->{'timestamp'}; } } @@ -291,8 +291,8 @@ print @output_buffer if ($oldest_first); sub push_commit { my ($c) = @_; - my $ht = hash_commit($c); - my $ts = parse_datetime($c->{'date'}); + my $ht = hash_commit($c); + my $ts = parse_datetime($c->{'date'}); my $cc; # Note that this code will never merge two commits on the same branch, @@ -316,10 +316,10 @@ sub push_commit if (!defined $cc) { $cc = { - 'author' => $c->{'author'}, - 'message' => $c->{'message'}, - 'commit' => $c->{'commit'}, - 'commits' => [], + 'author' => $c->{'author'}, + 'message' => $c->{'message'}, + 'commit' => $c->{'commit'}, + 'commits' => [], 'timestamp' => $ts }; push @{ $all_commits{$ht} }, $cc; @@ -327,9 +327,9 @@ sub push_commit # stash only the fields we'll need later my $smallc = { - 'branch' => $c->{'branch'}, - 'commit' => $c->{'commit'}, - 'date' => $c->{'date'}, + 'branch' => $c->{'branch'}, + 'commit' => $c->{'commit'}, + 'date' => $c->{'date'}, 'last_tag' => $c->{'last_tag'} }; push @{ $cc->{'commits'} }, $smallc; @@ -385,9 +385,9 @@ sub output_details output_str( "%s [%s] %s\n", - substr($c->{'date'}, 0, 10), + substr($c->{'date'}, 0, 10), substr($c->{'commit'}, 0, 9), - substr($1, 0, 56)); + substr($1, 0, 56)); } else { diff --git a/src/tools/mark_pgdllimport.pl b/src/tools/mark_pgdllimport.pl index 0cf71dbc25..45b4e73bff 100755 --- a/src/tools/mark_pgdllimport.pl +++ b/src/tools/mark_pgdllimport.pl @@ -28,7 +28,7 @@ use warnings; for my $include_file (@ARGV) { open(my $rfh, '<', $include_file) || die "$include_file: $!"; - my $buffer = ''; + my $buffer = ''; my $num_pgdllimport_added = 0; while (my $raw_line = <$rfh>) diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index bf28cd8470..05548d7c0a 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -17,22 +17,22 @@ use File::Find (); use Exporter; our (@ISA, @EXPORT_OK); -@ISA = qw(Exporter); +@ISA = qw(Exporter); @EXPORT_OK = qw(Install); my $insttype; my @client_contribs = ('oid2name', 'pgbench', 'vacuumlo'); my @client_program_files = ( - 'clusterdb', 'createdb', 'createuser', 'dropdb', - 'dropuser', 'ecpg', 'libecpg', 'libecpg_compat', - 'libpgtypes', 'libpq', 'pg_amcheck', 'pg_basebackup', - 'pg_config', 'pg_dump', 'pg_dumpall', 'pg_isready', + 'clusterdb', 'createdb', 'createuser', 'dropdb', + 'dropuser', 'ecpg', 'libecpg', 'libecpg_compat', + 'libpgtypes', 'libpq', 'pg_amcheck', 'pg_basebackup', + 'pg_config', 'pg_dump', 'pg_dumpall', 'pg_isready', 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', - 'reindexdb', 'vacuumdb', @client_contribs); + 'reindexdb', 'vacuumdb', @client_contribs); sub lcopy { - my $src = shift; + my $src = shift; my $target = shift; if (-f $target) @@ -104,7 +104,7 @@ sub Install CopySolutionOutput($conf, $target); my $sample_files = []; - my @top_dir = ("src"); + my @top_dir = ("src"); @top_dir = ("src\\bin", "src\\interfaces") if ($insttype eq "client"); File::Find::find( { @@ -146,9 +146,9 @@ sub Install $target . '/share/'); CopyFiles( 'Information schema data', $target . '/share/', - 'src/backend/catalog/', 'sql_features.txt'); + 'src/backend/catalog/', 'sql_features.txt'); CopyFiles( - 'Error code data', $target . '/share/', + 'Error code data', $target . '/share/', 'src/backend/utils/', 'errcodes.txt'); GenerateTimezoneFiles($target, $conf); GenerateTsearchFiles($target); @@ -162,10 +162,10 @@ sub Install $target . '/share/tsearch_data/'); my $pl_extension_files = []; - my @pldirs = ('src/pl/plpgsql/src'); - push @pldirs, "src/pl/plperl" if $config->{perl}; + my @pldirs = ('src/pl/plpgsql/src'); + push @pldirs, "src/pl/plperl" if $config->{perl}; push @pldirs, "src/pl/plpython" if $config->{python}; - push @pldirs, "src/pl/tcl" if $config->{tcl}; + push @pldirs, "src/pl/tcl" if $config->{tcl}; File::Find::find( { wanted => sub { @@ -200,8 +200,8 @@ sub EnsureDirectories sub CopyFiles { - my $what = shift; - my $target = shift; + my $what = shift; + my $target = shift; my $basedir = shift; print "Copying $what"; @@ -218,8 +218,8 @@ sub CopyFiles sub CopySetOfFiles { - my $what = shift; - my $flist = shift; + my $what = shift; + my $flist = shift; my $target = shift; print "Copying $what" if $what; foreach (@$flist) @@ -234,7 +234,7 @@ sub CopySetOfFiles sub CopySolutionOutput { - my $conf = shift; + my $conf = shift; my $target = shift; my $rem = qr{Project\("\{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942\}"\) = "([^"]+)"}; @@ -361,8 +361,8 @@ sub CopySolutionOutput sub GenerateTimezoneFiles { my $target = shift; - my $conf = shift; - my $mf = read_file("src/timezone/Makefile"); + my $conf = shift; + my $mf = read_file("src/timezone/Makefile"); $mf =~ s{\\\r?\n}{}g; $mf =~ /^TZDATAFILES\s*:?=\s*(.*)$/m @@ -389,7 +389,8 @@ sub GenerateTsearchFiles my $target = shift; print "Generating tsearch script..."; - system('perl', 'src/backend/snowball/snowball_create.pl', + system( + 'perl', 'src/backend/snowball/snowball_create.pl', '--input', 'src/backend/snowball/', '--outdir', "$target/share/"); print "\n"; @@ -409,11 +410,11 @@ sub CopyContribFiles while (my $d = readdir($D)) { # These configuration-based exclusions must match vcregress.pl - next if ($d eq "uuid-ossp" && !defined($config->{uuid})); - next if ($d eq "sslinfo" && !defined($config->{openssl})); - next if ($d eq "pgcrypto" && !defined($config->{openssl})); - next if ($d eq "xml2" && !defined($config->{xml})); - next if ($d =~ /_plperl$/ && !defined($config->{perl})); + next if ($d eq "uuid-ossp" && !defined($config->{uuid})); + next if ($d eq "sslinfo" && !defined($config->{openssl})); + next if ($d eq "pgcrypto" && !defined($config->{openssl})); + next if ($d eq "xml2" && !defined($config->{xml})); + next if ($d =~ /_plperl$/ && !defined($config->{perl})); next if ($d =~ /_plpython$/ && !defined($config->{python})); next if ($d eq "sepgsql"); @@ -494,7 +495,7 @@ sub CopySubdirFiles { $flist = ''; if ($mf =~ /^HEADERS\s*=\s*(.*)$/m) { $flist .= $1 } - my @modlist = (); + my @modlist = (); my %fmodlist = (); while ($mf =~ /^HEADERS_([^\s=]+)\s*=\s*(.*)$/mg) { @@ -559,7 +560,7 @@ sub CopySubdirFiles sub ParseAndCleanRule { my $flist = shift; - my $mf = shift; + my $mf = shift; # Strip out $(addsuffix) rules if (index($flist, '$(addsuffix ') >= 0) @@ -573,10 +574,10 @@ sub ParseAndCleanRule { $pcount++ if (substr($flist, $i, 1) eq '('); $pcount-- if (substr($flist, $i, 1) eq ')'); - last if ($pcount < 0); + last if ($pcount < 0); } $flist = - substr($flist, 0, index($flist, '$(addsuffix ')) + substr($flist, 0, index($flist, '$(addsuffix ')) . substr($flist, $i + 1); } return $flist; @@ -591,8 +592,8 @@ sub CopyIncludeFiles CopyFiles( 'Public headers', $target . '/include/', - 'src/include/', 'postgres_ext.h', - 'pg_config.h', 'pg_config_ext.h', + 'src/include/', 'postgres_ext.h', + 'pg_config.h', 'pg_config_ext.h', 'pg_config_os.h', 'pg_config_manual.h'); lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/') || croak 'Could not copy libpq-fs.h'; @@ -669,8 +670,8 @@ sub CopyIncludeFiles sub GenerateNLSFiles { - my $target = shift; - my $nlspath = shift; + my $target = shift; + my $nlspath = shift; my $majorver = shift; print "Installing NLS files..."; diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm index 4b8917e34f..be17790e32 100644 --- a/src/tools/msvc/MSBuildProject.pm +++ b/src/tools/msvc/MSBuildProject.pm @@ -19,11 +19,11 @@ no warnings qw(redefine); ## no critic sub _new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); $self->{filenameExtension} = '.vcxproj'; - $self->{ToolsVersion} = '4.0'; + $self->{ToolsVersion} = '4.0'; return $self; } @@ -51,7 +51,7 @@ EOF { # remove trailing backslash if necessary. $sdkVersion =~ s/\\$//; - print $f <$sdkVersion EOF } @@ -84,8 +84,8 @@ EOF $self->WriteItemDefinitionGroup( $f, 'Debug', { - defs => "_DEBUG;DEBUG=1", - opt => 'Disabled', + defs => "_DEBUG;DEBUG=1", + opt => 'Disabled', strpool => 'false', runtime => 'MultiThreadedDebugDLL' }); @@ -93,8 +93,8 @@ EOF $f, 'Release', { - defs => "", - opt => 'Full', + defs => "", + opt => 'Full', strpool => 'true', runtime => 'MultiThreadedDLL' }); @@ -141,14 +141,14 @@ sub WriteFiles print $f < EOF - my @grammarFiles = (); + my @grammarFiles = (); my @resourceFiles = (); my %uniquefiles; foreach my $fileNameWithPath (sort keys %{ $self->{files} }) { confess "Bad format filename '$fileNameWithPath'\n" unless ($fileNameWithPath =~ m!^(.*)/([^/]+)\.(c|cpp|y|l|rc)$!); - my $dir = $1; + my $dir = $1; my $fileName = $2; if ($fileNameWithPath =~ /\.y$/ or $fileNameWithPath =~ /\.l$/) { @@ -312,8 +312,7 @@ sub WriteItemDefinitionGroup my $targetmachine = $self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64'; - my $arch = - $self->{platform} eq 'Win32' ? 'x86' : 'x86_64'; + my $arch = $self->{platform} eq 'Win32' ? 'x86' : 'x86_64'; my $includes = join ';', @{ $self->{includes} }, ""; @@ -421,12 +420,12 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '14.00'; + $self->{vcver} = '14.00'; $self->{PlatformToolset} = 'v140'; - $self->{ToolsVersion} = '14.0'; + $self->{ToolsVersion} = '14.0'; return $self; } @@ -446,12 +445,12 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '15.00'; + $self->{vcver} = '15.00'; $self->{PlatformToolset} = 'v141'; - $self->{ToolsVersion} = '15.0'; + $self->{ToolsVersion} = '15.0'; return $self; } @@ -471,12 +470,12 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '16.00'; + $self->{vcver} = '16.00'; $self->{PlatformToolset} = 'v142'; - $self->{ToolsVersion} = '16.0'; + $self->{ToolsVersion} = '16.0'; return $self; } @@ -496,12 +495,12 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '17.00'; + $self->{vcver} = '17.00'; $self->{PlatformToolset} = 'v143'; - $self->{ToolsVersion} = '17.0'; + $self->{ToolsVersion} = '17.0'; return $self; } diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 958206f315..9e05eb91b1 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -23,7 +23,7 @@ use List::Util qw(first); use Exporter; our (@ISA, @EXPORT_OK); -@ISA = qw(Exporter); +@ISA = qw(Exporter); @EXPORT_OK = qw(Mkvcbuild); my $solution; @@ -35,21 +35,21 @@ my $libpq; my @unlink_on_exit; # Set of variables for modules in contrib/ and src/test/modules/ -my $contrib_defines = {}; -my @contrib_uselibpq = (); -my @contrib_uselibpgport = (); +my $contrib_defines = {}; +my @contrib_uselibpq = (); +my @contrib_uselibpgport = (); my @contrib_uselibpgcommon = (); -my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; -my $contrib_extraincludes = {}; -my $contrib_extrasource = {}; -my @contrib_excludes = ( - 'bool_plperl', 'commit_ts', - 'hstore_plperl', 'hstore_plpython', - 'intagg', 'jsonb_plperl', - 'jsonb_plpython', 'ltree_plpython', - 'sepgsql', 'brin', +my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +my $contrib_extraincludes = {}; +my $contrib_extrasource = {}; +my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', + 'hstore_plperl', 'hstore_plpython', + 'intagg', 'jsonb_plperl', + 'jsonb_plpython', 'ltree_plpython', + 'sepgsql', 'brin', 'test_extensions', 'test_misc', - 'test_pg_dump', 'snapshot_too_old', + 'test_pg_dump', 'snapshot_too_old', 'unsafe_tests'); # Set of variables for frontend modules @@ -57,25 +57,25 @@ my $frontend_defines = { 'pgbench' => 'FD_SETSIZE=1024' }; my @frontend_uselibpq = ('pg_amcheck', 'pg_ctl', 'pg_upgrade', 'pgbench', 'psql', 'initdb'); my @frontend_uselibpgport = ( - 'pg_amcheck', 'pg_archivecleanup', + 'pg_amcheck', 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', - 'pg_upgrade', 'pg_waldump', + 'pg_upgrade', 'pg_waldump', 'pgbench'); my @frontend_uselibpgcommon = ( - 'pg_amcheck', 'pg_archivecleanup', + 'pg_amcheck', 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', - 'pg_upgrade', 'pg_waldump', + 'pg_upgrade', 'pg_waldump', 'pgbench'); my $frontend_extralibs = { - 'initdb' => ['ws2_32.lib'], + 'initdb' => ['ws2_32.lib'], 'pg_amcheck' => ['ws2_32.lib'], 'pg_restore' => ['ws2_32.lib'], - 'pgbench' => ['ws2_32.lib'], - 'psql' => ['ws2_32.lib'] + 'pgbench' => ['ws2_32.lib'], + 'psql' => ['ws2_32.lib'] }; my $frontend_extraincludes = { 'initdb' => ['src/timezone'], - 'psql' => ['src/backend'] + 'psql' => ['src/backend'] }; my $frontend_extrasource = { 'psql' => ['src/bin/psql/psqlscanslash.l'], @@ -83,7 +83,7 @@ my $frontend_extrasource = { [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ] }; my @frontend_excludes = ( - 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump', + 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump', 'pg_waldump', 'scripts'); sub mkvcbuild @@ -191,7 +191,7 @@ sub mkvcbuild 'src/backend/port/win32_sema.c'); $postgres->ReplaceFile('src/backend/port/pg_shmem.c', 'src/backend/port/win32_shmem.c'); - $postgres->AddFiles('src/port', @pgportfiles); + $postgres->AddFiles('src/port', @pgportfiles); $postgres->AddFiles('src/common', @pgcommonbkndfiles); $postgres->AddDir('src/timezone'); @@ -204,7 +204,7 @@ sub mkvcbuild $postgres->AddFiles('src/backend/utils/misc', 'guc-file.l'); $postgres->AddFiles( 'src/backend/replication', 'repl_scanner.l', - 'repl_gram.y', 'syncrep_scanner.l', + 'repl_gram.y', 'syncrep_scanner.l', 'syncrep_gram.y'); $postgres->AddFiles('src/backend/utils/adt', 'jsonpath_scan.l', 'jsonpath_gram.y'); @@ -334,7 +334,7 @@ sub mkvcbuild my $libecpgcompat = $solution->AddProject( 'libecpg_compat', 'dll', - 'interfaces', 'src/interfaces/ecpg/compatlib'); + 'interfaces', 'src/interfaces/ecpg/compatlib'); $libecpgcompat->AddIncludeDir('src/interfaces/ecpg/include'); $libecpgcompat->AddIncludeDir('src/interfaces/libpq'); $libecpgcompat->UseDef('src/interfaces/ecpg/compatlib/compatlib.def'); @@ -536,19 +536,19 @@ sub mkvcbuild # Add transform modules dependent on plpython my $hstore_plpython = AddTransformModule( 'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython', - 'plpython' . $pymajorver, 'src/pl/plpython', - 'hstore', 'contrib'); + 'plpython' . $pymajorver, 'src/pl/plpython', + 'hstore', 'contrib'); $hstore_plpython->AddDefine( 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); my $jsonb_plpython = AddTransformModule( 'jsonb_plpython' . $pymajorver, 'contrib/jsonb_plpython', - 'plpython' . $pymajorver, 'src/pl/plpython'); + 'plpython' . $pymajorver, 'src/pl/plpython'); $jsonb_plpython->AddDefine( 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); my $ltree_plpython = AddTransformModule( 'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython', - 'plpython' . $pymajorver, 'src/pl/plpython', - 'ltree', 'contrib'); + 'plpython' . $pymajorver, 'src/pl/plpython', + 'ltree', 'contrib'); $ltree_plpython->AddDefine( 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); } @@ -612,9 +612,9 @@ sub mkvcbuild if ($solution->{platform} eq 'Win32') { my $source_file = 'conftest.c'; - my $obj = 'conftest.obj'; - my $exe = 'conftest.exe'; - my @conftest = ($source_file, $obj, $exe); + my $obj = 'conftest.obj'; + my $exe = 'conftest.exe'; + my @conftest = ($source_file, $obj, $exe); push @unlink_on_exit, @conftest; unlink $source_file; open my $o, '>', $source_file @@ -689,8 +689,8 @@ sub mkvcbuild }; my $define_32bit_time = '_USE_32BIT_TIME_T'; - my $ok_now = $try_define->(undef); - my $ok_32bit = $try_define->($define_32bit_time); + my $ok_now = $try_define->(undef); + my $ok_32bit = $try_define->($define_32bit_time); unlink @conftest; if (!$ok_now && !$ok_32bit) { @@ -790,14 +790,14 @@ sub mkvcbuild # Add transform modules dependent on plperl my $bool_plperl = AddTransformModule( 'bool_plperl', 'contrib/bool_plperl', - 'plperl', 'src/pl/plperl'); + 'plperl', 'src/pl/plperl'); my $hstore_plperl = AddTransformModule( 'hstore_plperl', 'contrib/hstore_plperl', - 'plperl', 'src/pl/plperl', - 'hstore', 'contrib'); + 'plperl', 'src/pl/plperl', + 'hstore', 'contrib'); my $jsonb_plperl = AddTransformModule( 'jsonb_plperl', 'contrib/jsonb_plperl', - 'plperl', 'src/pl/plperl'); + 'plperl', 'src/pl/plperl'); foreach my $f (@perl_embed_ccflags) { @@ -880,7 +880,7 @@ sub mkvcbuild # Add a simple frontend project (exe) sub AddSimpleFrontend { - my $n = shift; + my $n = shift; my $uselibpq = shift; my $p = $solution->AddProject($n, 'exe', 'bin'); @@ -901,12 +901,12 @@ sub AddSimpleFrontend # Add a simple transform module sub AddTransformModule { - my $n = shift; - my $n_src = shift; + my $n = shift; + my $n_src = shift; my $pl_proj_name = shift; - my $pl_src = shift; - my $type_name = shift; - my $type_src = shift; + my $pl_src = shift; + my $type_name = shift; + my $type_src = shift; my $type_proj = undef; if ($type_name) @@ -969,9 +969,9 @@ sub AddTransformModule # Add a simple contrib project sub AddContrib { - my $subdir = shift; - my $n = shift; - my $mf = Project::read_file("$subdir/$n/Makefile"); + my $subdir = shift; + my $n = shift; + my $mf = Project::read_file("$subdir/$n/Makefile"); my @projects = (); if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg) @@ -1084,7 +1084,7 @@ sub AddContrib sub GenerateContribSqlFiles { - my $n = shift; + my $n = shift; my $mf = shift; $mf =~ s{\\\r?\n}{}g; if ($mf =~ /^DATA_built\s*=\s*(.*)$/mg) @@ -1100,7 +1100,7 @@ sub GenerateContribSqlFiles { $pcount++ if (substr($l, $i, 1) eq '('); $pcount-- if (substr($l, $i, 1) eq ')'); - last if ($pcount < 0); + last if ($pcount < 0); } $l = substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); @@ -1108,14 +1108,14 @@ sub GenerateContribSqlFiles foreach my $d (split /\s+/, $l) { - my $in = "$d.in"; + my $in = "$d.in"; my $out = "$d"; if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) { print "Building $out from $in (contrib/$n)...\n"; my $cont = Project::read_file("contrib/$n/$in"); - my $dn = $out; + my $dn = $out; $dn =~ s/\.sql$//; $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; my $o; @@ -1133,10 +1133,10 @@ sub AdjustContribProj { my $proj = shift; AdjustModule( - $proj, $contrib_defines, - \@contrib_uselibpq, \@contrib_uselibpgport, + $proj, $contrib_defines, + \@contrib_uselibpq, \@contrib_uselibpgport, \@contrib_uselibpgcommon, $contrib_extralibs, - $contrib_extrasource, $contrib_extraincludes); + $contrib_extrasource, $contrib_extraincludes); return; } @@ -1144,24 +1144,24 @@ sub AdjustFrontendProj { my $proj = shift; AdjustModule( - $proj, $frontend_defines, - \@frontend_uselibpq, \@frontend_uselibpgport, + $proj, $frontend_defines, + \@frontend_uselibpq, \@frontend_uselibpgport, \@frontend_uselibpgcommon, $frontend_extralibs, - $frontend_extrasource, $frontend_extraincludes); + $frontend_extrasource, $frontend_extraincludes); return; } sub AdjustModule { - my $proj = shift; - my $module_defines = shift; - my $module_uselibpq = shift; - my $module_uselibpgport = shift; + my $proj = shift; + my $module_defines = shift; + my $module_uselibpq = shift; + my $module_uselibpgport = shift; my $module_uselibpgcommon = shift; - my $module_extralibs = shift; - my $module_extrasource = shift; - my $module_extraincludes = shift; - my $n = $proj->{name}; + my $module_extralibs = shift; + my $module_extrasource = shift; + my $module_extraincludes = shift; + my $n = $proj->{name}; if ($module_defines->{$n}) { diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm index 1453979a76..0507ad08c5 100644 --- a/src/tools/msvc/Project.pm +++ b/src/tools/msvc/Project.pm @@ -23,20 +23,20 @@ sub _new }; confess("Bad project type: $type\n") unless exists $good_types->{$type}; my $self = { - name => $name, - type => $type, - guid => $^O eq "MSWin32" ? Win32::GuidGen() : 'FAKE', - files => {}, - references => [], - libraries => [], - suffixlib => [], - includes => [], - prefixincludes => '', - defines => ';', - solution => $solution, - disablewarnings => '4018;4244;4273;4101;4102;4090;4267', + name => $name, + type => $type, + guid => $^O eq "MSWin32" ? Win32::GuidGen() : 'FAKE', + files => {}, + references => [], + libraries => [], + suffixlib => [], + includes => [], + prefixincludes => '', + defines => ';', + solution => $solution, + disablewarnings => '4018;4244;4273;4101;4102;4090;4267', disablelinkerwarnings => '', - platform => $solution->{platform}, + platform => $solution->{platform}, }; bless($self, $classname); @@ -63,7 +63,7 @@ sub AddDependantFiles sub AddFiles { my $self = shift; - my $dir = shift; + my $dir = shift; while (my $f = shift) { @@ -76,11 +76,11 @@ sub AddFiles # name but a different file extension and add those files too. sub FindAndAddAdditionalFiles { - my $self = shift; + my $self = shift; my $fname = shift; $fname =~ /(.*)(\.[^.]+)$/; my $filenoext = $1; - my $fileext = $2; + my $fileext = $2; # For .c files, check if either a .l or .y file of the same name # exists and add that too. @@ -229,8 +229,8 @@ sub FullExportDLL my ($self, $libname) = @_; $self->{builddef} = 1; - $self->{def} = "./__CFGNAME__/$self->{name}/$self->{name}.def"; - $self->{implib} = "__CFGNAME__/$self->{name}/$libname"; + $self->{def} = "./__CFGNAME__/$self->{name}/$self->{name}.def"; + $self->{implib} = "__CFGNAME__/$self->{name}/$libname"; return; } @@ -263,13 +263,13 @@ sub AddDir } while ($mf =~ m{^(?:EXTRA_)?OBJS[^=]*=\s*(.*)$}m) { - my $s = $1; + my $s = $1; my $filter_re = qr{\$\(filter ([^,]+),\s+\$\(([^\)]+)\)\)}; while ($s =~ /$filter_re/) { # Process $(filter a b c, $(VAR)) expressions - my $list = $1; + my $list = $1; my $filter = $2; $list =~ s/\.o/\.c/g; my @pieces = split /\s+/, $list; @@ -321,8 +321,8 @@ sub AddDir qr{^([^:\n\$]+\.c)\s*:\s*(?:%\s*: )?\$(\([^\)]+\))\/(.*)\/[^\/]+\n}m; while ($mf =~ m{$replace_re}m) { - my $match = $1; - my $top = $2; + my $match = $1; + my $top = $2; my $target = $3; my @pieces = split /\s+/, $match; foreach my $fn (@pieces) diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index ef10cda576..b6d31c3583 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -18,14 +18,14 @@ no warnings qw(redefine); ## no critic sub _new { my $classname = shift; - my $options = shift; - my $self = { - projects => {}, - options => $options, - VisualStudioVersion => undef, + my $options = shift; + my $self = { + projects => {}, + options => $options, + VisualStudioVersion => undef, MinimumVisualStudioVersion => undef, - vcver => undef, - platform => undef, + vcver => undef, + platform => undef, }; bless($self, $classname); @@ -105,7 +105,7 @@ sub IsNewer sub copyFile { my ($src, $dest) = @_; - open(my $i, '<', $src) || croak "Could not open $src"; + open(my $i, '<', $src) || croak "Could not open $src"; open(my $o, '>', $dest) || croak "Could not open $dest"; while (<$i>) { @@ -147,8 +147,8 @@ sub GetOpenSSLVersion sub GenerateFiles { - my $self = shift; - my $bits = $self->{platform} eq 'Win32' ? 32 : 64; + my $self = shift; + my $bits = $self->{platform} eq 'Win32' ? 32 : 64; my $ac_init_found = 0; my $package_name; my $package_version; @@ -168,8 +168,8 @@ sub GenerateFiles { $ac_init_found = 1; - $package_name = $1; - $package_version = $2; + $package_name = $1; + $package_version = $2; $package_bugreport = $3; #$package_tarname = $4; $package_url = $5; @@ -184,7 +184,7 @@ sub GenerateFiles elsif (/\bAC_DEFINE\(OPENSSL_API_COMPAT, \[([0-9xL]+)\]/) { $ac_define_openssl_api_compat_found = 1; - $openssl_api_compat = $1; + $openssl_api_compat = $1; } } close($c); @@ -205,285 +205,285 @@ sub GenerateFiles # Every symbol in pg_config.h.in must be accounted for here. Set # to undef if the symbol should not be defined. my %define = ( - ALIGNOF_DOUBLE => 8, - ALIGNOF_INT => 4, - ALIGNOF_LONG => 4, - ALIGNOF_LONG_LONG_INT => 8, - ALIGNOF_PG_INT128_TYPE => undef, - ALIGNOF_SHORT => 2, - AC_APPLE_UNIVERSAL_BUILD => undef, - BLCKSZ => 1024 * $self->{options}->{blocksize}, - CONFIGURE_ARGS => '"' . $self->GetFakeConfigure() . '"', - DEF_PGPORT => $port, - DEF_PGPORT_STR => qq{"$port"}, - DLSUFFIX => '".dll"', - ENABLE_GSS => $self->{options}->{gss} ? 1 : undef, - ENABLE_NLS => $self->{options}->{nls} ? 1 : undef, - ENABLE_THREAD_SAFETY => 1, - HAVE_APPEND_HISTORY => undef, + ALIGNOF_DOUBLE => 8, + ALIGNOF_INT => 4, + ALIGNOF_LONG => 4, + ALIGNOF_LONG_LONG_INT => 8, + ALIGNOF_PG_INT128_TYPE => undef, + ALIGNOF_SHORT => 2, + AC_APPLE_UNIVERSAL_BUILD => undef, + BLCKSZ => 1024 * $self->{options}->{blocksize}, + CONFIGURE_ARGS => '"' . $self->GetFakeConfigure() . '"', + DEF_PGPORT => $port, + DEF_PGPORT_STR => qq{"$port"}, + DLSUFFIX => '".dll"', + ENABLE_GSS => $self->{options}->{gss} ? 1 : undef, + ENABLE_NLS => $self->{options}->{nls} ? 1 : undef, + ENABLE_THREAD_SAFETY => 1, + HAVE_APPEND_HISTORY => undef, HAVE_ASN1_STRING_GET0_DATA => undef, - HAVE_ATOMICS => 1, - HAVE_ATOMIC_H => undef, - HAVE_BACKTRACE_SYMBOLS => undef, - HAVE_BIO_GET_DATA => undef, - HAVE_BIO_METH_NEW => undef, - HAVE_COMPUTED_GOTO => undef, - HAVE_COPYFILE => undef, - HAVE_COPYFILE_H => undef, - HAVE_CRTDEFS_H => undef, - HAVE_CRYPTO_LOCK => undef, - HAVE_DECL_FDATASYNC => 0, - HAVE_DECL_F_FULLFSYNC => 0, + HAVE_ATOMICS => 1, + HAVE_ATOMIC_H => undef, + HAVE_BACKTRACE_SYMBOLS => undef, + HAVE_BIO_GET_DATA => undef, + HAVE_BIO_METH_NEW => undef, + HAVE_COMPUTED_GOTO => undef, + HAVE_COPYFILE => undef, + HAVE_COPYFILE_H => undef, + HAVE_CRTDEFS_H => undef, + HAVE_CRYPTO_LOCK => undef, + HAVE_DECL_FDATASYNC => 0, + HAVE_DECL_F_FULLFSYNC => 0, HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER => 0, - HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER => 0, - HAVE_DECL_LLVMGETHOSTCPUNAME => 0, - HAVE_DECL_LLVMGETHOSTCPUFEATURES => 0, - HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN => 0, - HAVE_DECL_POSIX_FADVISE => 0, - HAVE_DECL_PREADV => 0, - HAVE_DECL_PWRITEV => 0, - HAVE_DECL_STRLCAT => 0, - HAVE_DECL_STRLCPY => 0, - HAVE_DECL_STRNLEN => 1, - HAVE_EDITLINE_HISTORY_H => undef, - HAVE_EDITLINE_READLINE_H => undef, - HAVE_EXECINFO_H => undef, - HAVE_EXPLICIT_BZERO => undef, - HAVE_FSEEKO => 1, - HAVE_GCC__ATOMIC_INT32_CAS => undef, - HAVE_GCC__ATOMIC_INT64_CAS => undef, - HAVE_GCC__SYNC_CHAR_TAS => undef, - HAVE_GCC__SYNC_INT32_CAS => undef, - HAVE_GCC__SYNC_INT32_TAS => undef, - HAVE_GCC__SYNC_INT64_CAS => undef, - HAVE_GETIFADDRS => undef, - HAVE_GETOPT => undef, - HAVE_GETOPT_H => undef, - HAVE_GETOPT_LONG => undef, - HAVE_GETPEEREID => undef, - HAVE_GETPEERUCRED => undef, - HAVE_GSSAPI_EXT_H => undef, - HAVE_GSSAPI_GSSAPI_EXT_H => undef, - HAVE_GSSAPI_GSSAPI_H => undef, - HAVE_GSSAPI_H => undef, - HAVE_HMAC_CTX_FREE => undef, - HAVE_HMAC_CTX_NEW => undef, - HAVE_HISTORY_H => undef, - HAVE_HISTORY_TRUNCATE_FILE => undef, - HAVE_IFADDRS_H => undef, - HAVE_INET_ATON => undef, - HAVE_INET_PTON => 1, - HAVE_INT_TIMEZONE => 1, - HAVE_INT64 => undef, - HAVE_INT8 => undef, - HAVE_INTTYPES_H => undef, - HAVE_INT_OPTERR => undef, - HAVE_INT_OPTRESET => undef, - HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P => undef, - HAVE_KQUEUE => undef, - HAVE_LANGINFO_H => undef, - HAVE_LDAP_INITIALIZE => undef, - HAVE_LIBCRYPTO => undef, - HAVE_LIBLDAP => undef, - HAVE_LIBLZ4 => undef, - HAVE_LIBM => undef, - HAVE_LIBPAM => undef, - HAVE_LIBREADLINE => undef, - HAVE_LIBSELINUX => undef, - HAVE_LIBSSL => undef, - HAVE_LIBWLDAP32 => undef, - HAVE_LIBXML2 => undef, - HAVE_LIBXSLT => undef, - HAVE_LIBZ => $self->{options}->{zlib} ? 1 : undef, - HAVE_LIBZSTD => undef, - HAVE_LOCALE_T => 1, - HAVE_LONG_INT_64 => undef, - HAVE_LONG_LONG_INT_64 => 1, - HAVE_MBARRIER_H => undef, - HAVE_MBSTOWCS_L => 1, - HAVE_MEMORY_H => 1, - HAVE_MEMSET_S => undef, - HAVE_MKDTEMP => undef, - HAVE_OPENSSL_INIT_SSL => undef, - HAVE_OSSP_UUID_H => undef, - HAVE_PAM_PAM_APPL_H => undef, - HAVE_POSIX_FADVISE => undef, - HAVE_POSIX_FALLOCATE => undef, - HAVE_PPOLL => undef, - HAVE_PTHREAD => undef, - HAVE_PTHREAD_BARRIER_WAIT => undef, + HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER => 0, + HAVE_DECL_LLVMGETHOSTCPUNAME => 0, + HAVE_DECL_LLVMGETHOSTCPUFEATURES => 0, + HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN => 0, + HAVE_DECL_POSIX_FADVISE => 0, + HAVE_DECL_PREADV => 0, + HAVE_DECL_PWRITEV => 0, + HAVE_DECL_STRLCAT => 0, + HAVE_DECL_STRLCPY => 0, + HAVE_DECL_STRNLEN => 1, + HAVE_EDITLINE_HISTORY_H => undef, + HAVE_EDITLINE_READLINE_H => undef, + HAVE_EXECINFO_H => undef, + HAVE_EXPLICIT_BZERO => undef, + HAVE_FSEEKO => 1, + HAVE_GCC__ATOMIC_INT32_CAS => undef, + HAVE_GCC__ATOMIC_INT64_CAS => undef, + HAVE_GCC__SYNC_CHAR_TAS => undef, + HAVE_GCC__SYNC_INT32_CAS => undef, + HAVE_GCC__SYNC_INT32_TAS => undef, + HAVE_GCC__SYNC_INT64_CAS => undef, + HAVE_GETIFADDRS => undef, + HAVE_GETOPT => undef, + HAVE_GETOPT_H => undef, + HAVE_GETOPT_LONG => undef, + HAVE_GETPEEREID => undef, + HAVE_GETPEERUCRED => undef, + HAVE_GSSAPI_EXT_H => undef, + HAVE_GSSAPI_GSSAPI_EXT_H => undef, + HAVE_GSSAPI_GSSAPI_H => undef, + HAVE_GSSAPI_H => undef, + HAVE_HMAC_CTX_FREE => undef, + HAVE_HMAC_CTX_NEW => undef, + HAVE_HISTORY_H => undef, + HAVE_HISTORY_TRUNCATE_FILE => undef, + HAVE_IFADDRS_H => undef, + HAVE_INET_ATON => undef, + HAVE_INET_PTON => 1, + HAVE_INT_TIMEZONE => 1, + HAVE_INT64 => undef, + HAVE_INT8 => undef, + HAVE_INTTYPES_H => undef, + HAVE_INT_OPTERR => undef, + HAVE_INT_OPTRESET => undef, + HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P => undef, + HAVE_KQUEUE => undef, + HAVE_LANGINFO_H => undef, + HAVE_LDAP_INITIALIZE => undef, + HAVE_LIBCRYPTO => undef, + HAVE_LIBLDAP => undef, + HAVE_LIBLZ4 => undef, + HAVE_LIBM => undef, + HAVE_LIBPAM => undef, + HAVE_LIBREADLINE => undef, + HAVE_LIBSELINUX => undef, + HAVE_LIBSSL => undef, + HAVE_LIBWLDAP32 => undef, + HAVE_LIBXML2 => undef, + HAVE_LIBXSLT => undef, + HAVE_LIBZ => $self->{options}->{zlib} ? 1 : undef, + HAVE_LIBZSTD => undef, + HAVE_LOCALE_T => 1, + HAVE_LONG_INT_64 => undef, + HAVE_LONG_LONG_INT_64 => 1, + HAVE_MBARRIER_H => undef, + HAVE_MBSTOWCS_L => 1, + HAVE_MEMORY_H => 1, + HAVE_MEMSET_S => undef, + HAVE_MKDTEMP => undef, + HAVE_OPENSSL_INIT_SSL => undef, + HAVE_OSSP_UUID_H => undef, + HAVE_PAM_PAM_APPL_H => undef, + HAVE_POSIX_FADVISE => undef, + HAVE_POSIX_FALLOCATE => undef, + HAVE_PPOLL => undef, + HAVE_PTHREAD => undef, + HAVE_PTHREAD_BARRIER_WAIT => undef, HAVE_PTHREAD_IS_THREADED_NP => undef, - HAVE_PTHREAD_PRIO_INHERIT => undef, - HAVE_READLINE_H => undef, - HAVE_READLINE_HISTORY_H => undef, - HAVE_READLINE_READLINE_H => undef, - HAVE_RL_COMPLETION_MATCHES => undef, - HAVE_RL_COMPLETION_SUPPRESS_QUOTE => undef, - HAVE_RL_FILENAME_COMPLETION_FUNCTION => undef, - HAVE_RL_FILENAME_QUOTE_CHARACTERS => undef, - HAVE_RL_FILENAME_QUOTING_FUNCTION => undef, - HAVE_RL_RESET_SCREEN_SIZE => undef, - HAVE_RL_VARIABLE_BIND => undef, - HAVE_SECURITY_PAM_APPL_H => undef, - HAVE_SETPROCTITLE => undef, - HAVE_SETPROCTITLE_FAST => undef, - HAVE_SOCKLEN_T => 1, - HAVE_SPINLOCKS => 1, - HAVE_SSL_CTX_SET_CERT_CB => undef, - HAVE_STDBOOL_H => 1, - HAVE_STDINT_H => 1, - HAVE_STDLIB_H => 1, - HAVE_STRCHRNUL => undef, - HAVE_STRERROR_R => undef, - HAVE_STRINGS_H => undef, - HAVE_STRING_H => 1, - HAVE_STRLCAT => undef, - HAVE_STRLCPY => undef, - HAVE_STRNLEN => 1, - HAVE_STRSIGNAL => undef, - HAVE_STRUCT_OPTION => undef, - HAVE_STRUCT_SOCKADDR_SA_LEN => undef, - HAVE_STRUCT_TM_TM_ZONE => undef, - HAVE_SYNC_FILE_RANGE => undef, - HAVE_SYNCFS => undef, - HAVE_SYSLOG => undef, - HAVE_SYS_EPOLL_H => undef, - HAVE_SYS_EVENT_H => undef, - HAVE_SYS_PERSONALITY_H => undef, - HAVE_SYS_PRCTL_H => undef, - HAVE_SYS_PROCCTL_H => undef, - HAVE_SYS_SIGNALFD_H => undef, - HAVE_SYS_STAT_H => 1, - HAVE_SYS_TYPES_H => 1, - HAVE_SYS_UCRED_H => undef, - HAVE_TERMIOS_H => undef, - HAVE_TYPEOF => undef, - HAVE_UCRED_H => undef, - HAVE_UINT64 => undef, - HAVE_UINT8 => undef, - HAVE_UNION_SEMUN => undef, - HAVE_UNISTD_H => 1, - HAVE_USELOCALE => undef, - HAVE_UUID_BSD => undef, - HAVE_UUID_E2FS => undef, - HAVE_UUID_OSSP => undef, - HAVE_UUID_H => undef, - HAVE_UUID_UUID_H => undef, - HAVE_WCSTOMBS_L => 1, - HAVE_VISIBILITY_ATTRIBUTE => undef, - HAVE_X509_GET_SIGNATURE_NID => 1, - HAVE_X509_GET_SIGNATURE_INFO => undef, - HAVE_X86_64_POPCNTQ => undef, - HAVE__BOOL => undef, - HAVE__BUILTIN_BSWAP16 => undef, - HAVE__BUILTIN_BSWAP32 => undef, - HAVE__BUILTIN_BSWAP64 => undef, - HAVE__BUILTIN_CLZ => undef, - HAVE__BUILTIN_CONSTANT_P => undef, - HAVE__BUILTIN_CTZ => undef, - HAVE__BUILTIN_FRAME_ADDRESS => undef, - HAVE__BUILTIN_OP_OVERFLOW => undef, - HAVE__BUILTIN_POPCOUNT => undef, - HAVE__BUILTIN_TYPES_COMPATIBLE_P => undef, - HAVE__BUILTIN_UNREACHABLE => undef, - HAVE__CONFIGTHREADLOCALE => 1, - HAVE__CPUID => 1, - HAVE__GET_CPUID => undef, - HAVE__STATIC_ASSERT => undef, - INT64_MODIFIER => qq{"ll"}, - LOCALE_T_IN_XLOCALE => undef, - MAXIMUM_ALIGNOF => 8, - MEMSET_LOOP_LIMIT => 1024, - OPENSSL_API_COMPAT => $openssl_api_compat, - PACKAGE_BUGREPORT => qq{"$package_bugreport"}, - PACKAGE_NAME => qq{"$package_name"}, - PACKAGE_STRING => qq{"$package_name $package_version"}, - PACKAGE_TARNAME => lc qq{"$package_name"}, - PACKAGE_URL => qq{"$package_url"}, - PACKAGE_VERSION => qq{"$package_version"}, - PG_INT128_TYPE => undef, - PG_INT64_TYPE => 'long long int', - PG_KRB_SRVNAM => qq{"postgres"}, - PG_MAJORVERSION => qq{"$majorver"}, + HAVE_PTHREAD_PRIO_INHERIT => undef, + HAVE_READLINE_H => undef, + HAVE_READLINE_HISTORY_H => undef, + HAVE_READLINE_READLINE_H => undef, + HAVE_RL_COMPLETION_MATCHES => undef, + HAVE_RL_COMPLETION_SUPPRESS_QUOTE => undef, + HAVE_RL_FILENAME_COMPLETION_FUNCTION => undef, + HAVE_RL_FILENAME_QUOTE_CHARACTERS => undef, + HAVE_RL_FILENAME_QUOTING_FUNCTION => undef, + HAVE_RL_RESET_SCREEN_SIZE => undef, + HAVE_RL_VARIABLE_BIND => undef, + HAVE_SECURITY_PAM_APPL_H => undef, + HAVE_SETPROCTITLE => undef, + HAVE_SETPROCTITLE_FAST => undef, + HAVE_SOCKLEN_T => 1, + HAVE_SPINLOCKS => 1, + HAVE_SSL_CTX_SET_CERT_CB => undef, + HAVE_STDBOOL_H => 1, + HAVE_STDINT_H => 1, + HAVE_STDLIB_H => 1, + HAVE_STRCHRNUL => undef, + HAVE_STRERROR_R => undef, + HAVE_STRINGS_H => undef, + HAVE_STRING_H => 1, + HAVE_STRLCAT => undef, + HAVE_STRLCPY => undef, + HAVE_STRNLEN => 1, + HAVE_STRSIGNAL => undef, + HAVE_STRUCT_OPTION => undef, + HAVE_STRUCT_SOCKADDR_SA_LEN => undef, + HAVE_STRUCT_TM_TM_ZONE => undef, + HAVE_SYNC_FILE_RANGE => undef, + HAVE_SYNCFS => undef, + HAVE_SYSLOG => undef, + HAVE_SYS_EPOLL_H => undef, + HAVE_SYS_EVENT_H => undef, + HAVE_SYS_PERSONALITY_H => undef, + HAVE_SYS_PRCTL_H => undef, + HAVE_SYS_PROCCTL_H => undef, + HAVE_SYS_SIGNALFD_H => undef, + HAVE_SYS_STAT_H => 1, + HAVE_SYS_TYPES_H => 1, + HAVE_SYS_UCRED_H => undef, + HAVE_TERMIOS_H => undef, + HAVE_TYPEOF => undef, + HAVE_UCRED_H => undef, + HAVE_UINT64 => undef, + HAVE_UINT8 => undef, + HAVE_UNION_SEMUN => undef, + HAVE_UNISTD_H => 1, + HAVE_USELOCALE => undef, + HAVE_UUID_BSD => undef, + HAVE_UUID_E2FS => undef, + HAVE_UUID_OSSP => undef, + HAVE_UUID_H => undef, + HAVE_UUID_UUID_H => undef, + HAVE_WCSTOMBS_L => 1, + HAVE_VISIBILITY_ATTRIBUTE => undef, + HAVE_X509_GET_SIGNATURE_NID => 1, + HAVE_X509_GET_SIGNATURE_INFO => undef, + HAVE_X86_64_POPCNTQ => undef, + HAVE__BOOL => undef, + HAVE__BUILTIN_BSWAP16 => undef, + HAVE__BUILTIN_BSWAP32 => undef, + HAVE__BUILTIN_BSWAP64 => undef, + HAVE__BUILTIN_CLZ => undef, + HAVE__BUILTIN_CONSTANT_P => undef, + HAVE__BUILTIN_CTZ => undef, + HAVE__BUILTIN_FRAME_ADDRESS => undef, + HAVE__BUILTIN_OP_OVERFLOW => undef, + HAVE__BUILTIN_POPCOUNT => undef, + HAVE__BUILTIN_TYPES_COMPATIBLE_P => undef, + HAVE__BUILTIN_UNREACHABLE => undef, + HAVE__CONFIGTHREADLOCALE => 1, + HAVE__CPUID => 1, + HAVE__GET_CPUID => undef, + HAVE__STATIC_ASSERT => undef, + INT64_MODIFIER => qq{"ll"}, + LOCALE_T_IN_XLOCALE => undef, + MAXIMUM_ALIGNOF => 8, + MEMSET_LOOP_LIMIT => 1024, + OPENSSL_API_COMPAT => $openssl_api_compat, + PACKAGE_BUGREPORT => qq{"$package_bugreport"}, + PACKAGE_NAME => qq{"$package_name"}, + PACKAGE_STRING => qq{"$package_name $package_version"}, + PACKAGE_TARNAME => lc qq{"$package_name"}, + PACKAGE_URL => qq{"$package_url"}, + PACKAGE_VERSION => qq{"$package_version"}, + PG_INT128_TYPE => undef, + PG_INT64_TYPE => 'long long int', + PG_KRB_SRVNAM => qq{"postgres"}, + PG_MAJORVERSION => qq{"$majorver"}, PG_MAJORVERSION_NUM => $majorver, PG_MINORVERSION_NUM => $minorver, PG_PRINTF_ATTRIBUTE => undef, - PG_USE_STDBOOL => 1, - PG_VERSION => qq{"$package_version$extraver"}, - PG_VERSION_NUM => sprintf("%d%04d", $majorver, $minorver), + PG_USE_STDBOOL => 1, + PG_VERSION => qq{"$package_version$extraver"}, + PG_VERSION_NUM => sprintf("%d%04d", $majorver, $minorver), PG_VERSION_STR => qq{"PostgreSQL $package_version$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"}, - PROFILE_PID_DIR => undef, + PROFILE_PID_DIR => undef, PTHREAD_CREATE_JOINABLE => undef, - RELSEG_SIZE => (1024 / $self->{options}->{blocksize}) * + RELSEG_SIZE => (1024 / $self->{options}->{blocksize}) * $self->{options}->{segsize} * 1024, - SIZEOF_BOOL => 1, - SIZEOF_LONG => 4, - SIZEOF_OFF_T => undef, - SIZEOF_SIZE_T => $bits / 8, - SIZEOF_VOID_P => $bits / 8, - STDC_HEADERS => 1, - STRERROR_R_INT => undef, - USE_ARMV8_CRC32C => undef, + SIZEOF_BOOL => 1, + SIZEOF_LONG => 4, + SIZEOF_OFF_T => undef, + SIZEOF_SIZE_T => $bits / 8, + SIZEOF_VOID_P => $bits / 8, + STDC_HEADERS => 1, + STRERROR_R_INT => undef, + USE_ARMV8_CRC32C => undef, USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK => undef, USE_ASSERT_CHECKING => $self->{options}->{asserts} ? 1 : undef, - USE_BONJOUR => undef, - USE_BSD_AUTH => undef, + USE_BONJOUR => undef, + USE_BSD_AUTH => undef, USE_ICU => $self->{options}->{icu} ? 1 : undef, - USE_LIBXML => undef, - USE_LIBXSLT => undef, - USE_LZ4 => undef, - USE_LDAP => $self->{options}->{ldap} ? 1 : undef, - USE_LLVM => undef, + USE_LIBXML => undef, + USE_LIBXSLT => undef, + USE_LZ4 => undef, + USE_LDAP => $self->{options}->{ldap} ? 1 : undef, + USE_LLVM => undef, USE_NAMED_POSIX_SEMAPHORES => undef, - USE_OPENSSL => undef, - USE_PAM => undef, - USE_SLICING_BY_8_CRC32C => undef, - USE_SSE42_CRC32C => undef, + USE_OPENSSL => undef, + USE_PAM => undef, + USE_SLICING_BY_8_CRC32C => undef, + USE_SSE42_CRC32C => undef, USE_SSE42_CRC32C_WITH_RUNTIME_CHECK => 1, - USE_SYSTEMD => undef, - USE_SYSV_SEMAPHORES => undef, - USE_SYSV_SHARED_MEMORY => undef, - USE_UNNAMED_POSIX_SEMAPHORES => undef, - USE_WIN32_SEMAPHORES => 1, - USE_WIN32_SHARED_MEMORY => 1, - USE_ZSTD => undef, - WCSTOMBS_L_IN_XLOCALE => undef, - WORDS_BIGENDIAN => undef, - XLOG_BLCKSZ => 1024 * $self->{options}->{wal_blocksize}, + USE_SYSTEMD => undef, + USE_SYSV_SEMAPHORES => undef, + USE_SYSV_SHARED_MEMORY => undef, + USE_UNNAMED_POSIX_SEMAPHORES => undef, + USE_WIN32_SEMAPHORES => 1, + USE_WIN32_SHARED_MEMORY => 1, + USE_ZSTD => undef, + WCSTOMBS_L_IN_XLOCALE => undef, + WORDS_BIGENDIAN => undef, + XLOG_BLCKSZ => 1024 * $self->{options}->{wal_blocksize}, _FILE_OFFSET_BITS => undef, _LARGEFILE_SOURCE => undef, - _LARGE_FILES => undef, - inline => '__inline', - pg_restrict => '__restrict', + _LARGE_FILES => undef, + inline => '__inline', + pg_restrict => '__restrict', # not defined, because it'd conflict with __declspec(restrict) restrict => undef, - typeof => undef,); + typeof => undef,); if ($self->{options}->{uuid}) { $define{HAVE_UUID_OSSP} = 1; - $define{HAVE_UUID_H} = 1; + $define{HAVE_UUID_H} = 1; } if ($self->{options}->{xml}) { $define{HAVE_LIBXML2} = 1; - $define{USE_LIBXML} = 1; + $define{USE_LIBXML} = 1; } if ($self->{options}->{xslt}) { $define{HAVE_LIBXSLT} = 1; - $define{USE_LIBXSLT} = 1; + $define{USE_LIBXSLT} = 1; } if ($self->{options}->{lz4}) { $define{HAVE_LIBLZ4} = 1; - $define{USE_LZ4} = 1; + $define{USE_LZ4} = 1; } if ($self->{options}->{zstd}) { $define{HAVE_LIBZSTD} = 1; - $define{USE_ZSTD} = 1; + $define{USE_ZSTD} = 1; } if ($self->{options}->{openssl}) { @@ -503,11 +503,11 @@ sub GenerateFiles || ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0')) { $define{HAVE_ASN1_STRING_GET0_DATA} = 1; - $define{HAVE_BIO_GET_DATA} = 1; - $define{HAVE_BIO_METH_NEW} = 1; - $define{HAVE_HMAC_CTX_FREE} = 1; - $define{HAVE_HMAC_CTX_NEW} = 1; - $define{HAVE_OPENSSL_INIT_SSL} = 1; + $define{HAVE_BIO_GET_DATA} = 1; + $define{HAVE_BIO_METH_NEW} = 1; + $define{HAVE_HMAC_CTX_FREE} = 1; + $define{HAVE_HMAC_CTX_NEW} = 1; + $define{HAVE_OPENSSL_INIT_SSL} = 1; } # Symbols needed with OpenSSL 1.0.2 and above. @@ -519,7 +519,7 @@ sub GenerateFiles } } - $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1); + $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1); $self->GenerateConfigHeader('src/include/pg_config_ext.h', \%define, 0); $self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h', \%define, 0); @@ -580,7 +580,9 @@ sub GenerateFiles { print "Generating lwlocknames.c and lwlocknames.h...\n"; my $lmgr = 'src/backend/storage/lmgr'; - system("perl $lmgr/generate-lwlocknames.pl --outdir $lmgr $lmgr/lwlocknames.txt"); + system( + "perl $lmgr/generate-lwlocknames.pl --outdir $lmgr $lmgr/lwlocknames.txt" + ); } if (IsNewer( 'src/include/storage/lwlocknames.h', @@ -642,21 +644,22 @@ sub GenerateFiles ); } - if (IsNewer('contrib/fuzzystrmatch/daitch_mokotoff.h', - 'contrib/fuzzystrmatch/daitch_mokotoff_header.pl')) + if (IsNewer( + 'contrib/fuzzystrmatch/daitch_mokotoff.h', + 'contrib/fuzzystrmatch/daitch_mokotoff_header.pl')) { print "Generating daitch_mokotoff.h...\n"; - system( - 'perl contrib/fuzzystrmatch/daitch_mokotoff_header.pl ' . - 'contrib/fuzzystrmatch/daitch_mokotoff.h' - ); + system( 'perl contrib/fuzzystrmatch/daitch_mokotoff_header.pl ' + . 'contrib/fuzzystrmatch/daitch_mokotoff.h'); } if (IsNewer('src/bin/psql/sql_help.h', 'src/bin/psql/create_help.pl')) { print "Generating sql_help.h...\n"; my $psql = 'src/bin/psql'; - system("perl $psql/create_help.pl --docdir doc/src/sgml/ref --outdir $psql --basename sql_help"); + system( + "perl $psql/create_help.pl --docdir doc/src/sgml/ref --outdir $psql --basename sql_help" + ); } if (IsNewer('src/common/kwlist_d.h', 'src/include/parser/kwlist.h')) @@ -710,7 +713,9 @@ sub GenerateFiles { print "Generating preproc.y...\n"; my $ecpg = 'src/interfaces/ecpg'; - system("perl $ecpg/preproc/parse.pl --srcdir $ecpg/preproc --parser src/backend/parser/gram.y --output $ecpg/preproc/preproc.y"); + system( + "perl $ecpg/preproc/parse.pl --srcdir $ecpg/preproc --parser src/backend/parser/gram.y --output $ecpg/preproc/preproc.y" + ); } unless (-f "src/port/pg_config_paths.h") @@ -825,7 +830,9 @@ EOF if ($need_node_support) { - system("perl src/backend/nodes/gen_node_support.pl --outdir src/backend/nodes @node_files"); + system( + "perl src/backend/nodes/gen_node_support.pl --outdir src/backend/nodes @node_files" + ); open(my $f, '>', 'src/backend/nodes/node-support-stamp') || confess "Could not touch node-support-stamp"; close($f); @@ -880,7 +887,7 @@ sub GenerateConfigHeader { if (m/^#(\s*)undef\s+(\w+)/) { - my $ws = $1; + my $ws = $1; my $macro = $2; if (exists $defines->{$macro}) { @@ -974,23 +981,23 @@ sub AddProject if (-e "$self->{options}->{openssl}/lib/VC/sslcrypto32MD.lib") { # Win32 here, with a debugging library set. - $dbgsuffix = 1; - $libsslpath = '\lib\VC\libssl32.lib'; + $dbgsuffix = 1; + $libsslpath = '\lib\VC\libssl32.lib'; $libcryptopath = '\lib\VC\libcrypto32.lib'; } elsif (-e "$self->{options}->{openssl}/lib/VC/sslcrypto64MD.lib") { # Win64 here, with a debugging library set. - $dbgsuffix = 1; - $libsslpath = '\lib\VC\libssl64.lib'; + $dbgsuffix = 1; + $libsslpath = '\lib\VC\libssl64.lib'; $libcryptopath = '\lib\VC\libcrypto64.lib'; } else { # On both Win32 and Win64 the same library # names are used without a debugging context. - $dbgsuffix = 0; - $libsslpath = '\lib\libssl.lib'; + $dbgsuffix = 0; + $libsslpath = '\lib\libssl.lib'; $libcryptopath = '\lib\libcrypto.lib'; } @@ -1193,23 +1200,23 @@ sub GetFakeConfigure my $self = shift; my $cfg = '--enable-thread-safety'; - $cfg .= ' --enable-cassert' if ($self->{options}->{asserts}); - $cfg .= ' --enable-nls' if ($self->{options}->{nls}); + $cfg .= ' --enable-cassert' if ($self->{options}->{asserts}); + $cfg .= ' --enable-nls' if ($self->{options}->{nls}); $cfg .= ' --enable-tap-tests' if ($self->{options}->{tap_tests}); - $cfg .= ' --with-ldap' if ($self->{options}->{ldap}); + $cfg .= ' --with-ldap' if ($self->{options}->{ldap}); $cfg .= ' --without-zlib' unless ($self->{options}->{zlib}); $cfg .= ' --with-extra-version' if ($self->{options}->{extraver}); - $cfg .= ' --with-ssl=openssl' if ($self->{options}->{openssl}); - $cfg .= ' --with-uuid' if ($self->{options}->{uuid}); - $cfg .= ' --with-libxml' if ($self->{options}->{xml}); - $cfg .= ' --with-libxslt' if ($self->{options}->{xslt}); - $cfg .= ' --with-lz4' if ($self->{options}->{lz4}); - $cfg .= ' --with-zstd' if ($self->{options}->{zstd}); - $cfg .= ' --with-gssapi' if ($self->{options}->{gss}); - $cfg .= ' --with-icu' if ($self->{options}->{icu}); - $cfg .= ' --with-tcl' if ($self->{options}->{tcl}); - $cfg .= ' --with-perl' if ($self->{options}->{perl}); - $cfg .= ' --with-python' if ($self->{options}->{python}); + $cfg .= ' --with-ssl=openssl' if ($self->{options}->{openssl}); + $cfg .= ' --with-uuid' if ($self->{options}->{uuid}); + $cfg .= ' --with-libxml' if ($self->{options}->{xml}); + $cfg .= ' --with-libxslt' if ($self->{options}->{xslt}); + $cfg .= ' --with-lz4' if ($self->{options}->{lz4}); + $cfg .= ' --with-zstd' if ($self->{options}->{zstd}); + $cfg .= ' --with-gssapi' if ($self->{options}->{gss}); + $cfg .= ' --with-icu' if ($self->{options}->{icu}); + $cfg .= ' --with-tcl' if ($self->{options}->{tcl}); + $cfg .= ' --with-perl' if ($self->{options}->{perl}); + $cfg .= ' --with-python' if ($self->{options}->{python}); my $port = $self->{options}->{'--with-pgport'}; $cfg .= " --with-pgport=$port" if defined($port); @@ -1232,13 +1239,13 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '14.00'; - $self->{visualStudioName} = 'Visual Studio 2015'; - $self->{VisualStudioVersion} = '14.0.24730.2'; + $self->{solutionFileVersion} = '12.00'; + $self->{vcver} = '14.00'; + $self->{visualStudioName} = 'Visual Studio 2015'; + $self->{VisualStudioVersion} = '14.0.24730.2'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; @@ -1260,13 +1267,13 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '15.00'; - $self->{visualStudioName} = 'Visual Studio 2017'; - $self->{VisualStudioVersion} = '15.0.26730.3'; + $self->{solutionFileVersion} = '12.00'; + $self->{vcver} = '15.00'; + $self->{visualStudioName} = 'Visual Studio 2017'; + $self->{VisualStudioVersion} = '15.0.26730.3'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; @@ -1288,13 +1295,13 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '16.00'; - $self->{visualStudioName} = 'Visual Studio 2019'; - $self->{VisualStudioVersion} = '16.0.28729.10'; + $self->{solutionFileVersion} = '12.00'; + $self->{vcver} = '16.00'; + $self->{visualStudioName} = 'Visual Studio 2019'; + $self->{VisualStudioVersion} = '16.0.28729.10'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; @@ -1316,13 +1323,13 @@ no warnings qw(redefine); ## no critic sub new { my $classname = shift; - my $self = $classname->SUPER::_new(@_); + my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '17.00'; - $self->{visualStudioName} = 'Visual Studio 2022'; - $self->{VisualStudioVersion} = '17.0.31903.59'; + $self->{solutionFileVersion} = '12.00'; + $self->{vcver} = '17.00'; + $self->{visualStudioName} = 'Visual Studio 2022'; + $self->{VisualStudioVersion} = '17.0.31903.59'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm index ecbd066bd9..9df2ab4282 100644 --- a/src/tools/msvc/VSObjectFactory.pm +++ b/src/tools/msvc/VSObjectFactory.pm @@ -19,7 +19,7 @@ use Solution; use MSBuildProject; our (@ISA, @EXPORT); -@ISA = qw(Exporter); +@ISA = qw(Exporter); @EXPORT = qw(CreateSolution CreateProject DetermineVisualStudioVersion); no warnings qw(redefine); ## no critic diff --git a/src/tools/msvc/build.pl b/src/tools/msvc/build.pl index 28a593145b..9853e5c3d8 100644 --- a/src/tools/msvc/build.pl +++ b/src/tools/msvc/build.pl @@ -54,9 +54,9 @@ do "./src/tools/msvc/config.pl" if (-f "src/tools/msvc/config.pl"); my $vcver = Mkvcbuild::mkvcbuild($config); # check what sort of build we are doing -my $bconf = $ENV{CONFIG} || "Release"; -my $msbflags = $ENV{MSBFLAGS} || ""; -my $buildwhat = $ARGV[1] || ""; +my $bconf = $ENV{CONFIG} || "Release"; +my $msbflags = $ENV{MSBFLAGS} || ""; +my $buildwhat = $ARGV[1] || ""; if (defined($ARGV[0])) { diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl index 70b44d1531..8945e772c2 100644 --- a/src/tools/msvc/config_default.pl +++ b/src/tools/msvc/config_default.pl @@ -10,23 +10,23 @@ our $config = { # blocksize => 8, # --with-blocksize, 8kB by default # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default - ldap => 1, # --with-ldap - extraver => undef, # --with-extra-version= - gss => undef, # --with-gssapi= - icu => undef, # --with-icu= - lz4 => undef, # --with-lz4= - zstd => undef, # --with-zstd= - nls => undef, # --enable-nls= + ldap => 1, # --with-ldap + extraver => undef, # --with-extra-version= + gss => undef, # --with-gssapi= + icu => undef, # --with-icu= + lz4 => undef, # --with-lz4= + zstd => undef, # --with-zstd= + nls => undef, # --enable-nls= tap_tests => undef, # --enable-tap-tests - tcl => undef, # --with-tcl= - perl => undef, # --with-perl= - python => undef, # --with-python= - openssl => undef, # --with-ssl=openssl with - uuid => undef, # --with-uuid= - xml => undef, # --with-libxml= - xslt => undef, # --with-libxslt= - iconv => undef, # (not in configure, path to iconv) - zlib => undef # --with-zlib= + tcl => undef, # --with-tcl= + perl => undef, # --with-perl= + python => undef, # --with-python= + openssl => undef, # --with-ssl=openssl with + uuid => undef, # --with-uuid= + xml => undef, # --with-libxml= + xslt => undef, # --with-libxslt= + iconv => undef, # (not in configure, path to iconv) + zlib => undef # --with-zlib= }; 1; diff --git a/src/tools/msvc/dummylib/Win32/Registry.pm b/src/tools/msvc/dummylib/Win32/Registry.pm index 90f37add27..e14636eb31 100644 --- a/src/tools/msvc/dummylib/Win32/Registry.pm +++ b/src/tools/msvc/dummylib/Win32/Registry.pm @@ -10,7 +10,7 @@ use vars qw($HKEY_LOCAL_MACHINE); use Exporter (); our (@EXPORT, @ISA); -@ISA = qw(Exporter); +@ISA = qw(Exporter); @EXPORT = qw($HKEY_LOCAL_MACHINE); 1; diff --git a/src/tools/msvc/dummylib/Win32API/File.pm b/src/tools/msvc/dummylib/Win32API/File.pm index 0ea7cbe826..7baf34c4e5 100644 --- a/src/tools/msvc/dummylib/Win32API/File.pm +++ b/src/tools/msvc/dummylib/Win32API/File.pm @@ -10,8 +10,8 @@ use constant { SEM_FAILCRITICALERRORS => 1, SEM_NOGPFAULTERRORBOX => 2 }; sub SetErrormode { } use Exporter; our (@ISA, @EXPORT_OK, %EXPORT_TAGS); -@ISA = qw(Exporter); -@EXPORT_OK = qw(SetErrorMode SEM_FAILCRITICALERRORS SEM_NOGPFAULTERRORBOX); +@ISA = qw(Exporter); +@EXPORT_OK = qw(SetErrorMode SEM_FAILCRITICALERRORS SEM_NOGPFAULTERRORBOX); %EXPORT_TAGS = (SEM_ => [qw(SEM_FAILCRITICALERRORS SEM_NOGPFAULTERRORBOX)]); 1; diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl index e7cbefcbc3..cf83d7d056 100644 --- a/src/tools/msvc/gendef.pl +++ b/src/tools/msvc/gendef.pl @@ -155,7 +155,7 @@ my $deffile; my $tempdir = '.'; GetOptions( - 'arch:s' => \$arch, + 'arch:s' => \$arch, 'deffile:s' => \$deffile, 'tempdir:s' => \$tempdir,) or usage(); diff --git a/src/tools/msvc/pgbison.pl b/src/tools/msvc/pgbison.pl index 014b14ec3d..25df6699b5 100644 --- a/src/tools/msvc/pgbison.pl +++ b/src/tools/msvc/pgbison.pl @@ -13,7 +13,7 @@ use File::Basename; do './src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl'; -my ($bisonver) = `bison -V`; # grab first line +my ($bisonver) = `bison -V`; # grab first line $bisonver = (split(/\s+/, $bisonver))[3]; # grab version number unless ($bisonver ge '2.3') diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 372f6d1b65..78170d105d 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -25,7 +25,7 @@ my $startdir = getcwd(); chdir "../../.." if (-d "../../../src/tools/msvc"); -my $topdir = getcwd(); +my $topdir = getcwd(); my $tmp_installdir = "$topdir/tmp_install"; do './src/tools/msvc/config_default.pl'; @@ -64,16 +64,16 @@ else # use a capital C here because config.pl has $config my $Config = -e "release/postgres/postgres.exe" ? "Release" : "Debug"; -copy("$Config/refint/refint.dll", "src/test/regress"); -copy("$Config/autoinc/autoinc.dll", "src/test/regress"); -copy("$Config/regress/regress.dll", "src/test/regress"); +copy("$Config/refint/refint.dll", "src/test/regress"); +copy("$Config/autoinc/autoinc.dll", "src/test/regress"); +copy("$Config/regress/regress.dll", "src/test/regress"); copy("$Config/dummy_seclabel/dummy_seclabel.dll", "src/test/regress"); # Configuration settings used by TAP tests -$ENV{with_ssl} = $config->{openssl} ? 'openssl' : 'no'; -$ENV{with_ldap} = $config->{ldap} ? 'yes' : 'no'; -$ENV{with_icu} = $config->{icu} ? 'yes' : 'no'; -$ENV{with_gssapi} = $config->{gss} ? 'yes' : 'no'; +$ENV{with_ssl} = $config->{openssl} ? 'openssl' : 'no'; +$ENV{with_ldap} = $config->{ldap} ? 'yes' : 'no'; +$ENV{with_icu} = $config->{icu} ? 'yes' : 'no'; +$ENV{with_gssapi} = $config->{gss} ? 'yes' : 'no'; $ENV{with_krb_srvnam} = $config->{krb_srvnam} || 'postgres'; $ENV{with_readline} = 'no'; @@ -99,17 +99,17 @@ $temp_config = "--temp-config=\"$ENV{TEMP_CONFIG}\"" chdir "src/test/regress"; my %command = ( - CHECK => \&check, - PLCHECK => \&plcheck, - INSTALLCHECK => \&installcheck, - ECPGCHECK => \&ecpgcheck, - CONTRIBCHECK => \&contribcheck, - MODULESCHECK => \&modulescheck, + CHECK => \&check, + PLCHECK => \&plcheck, + INSTALLCHECK => \&installcheck, + ECPGCHECK => \&ecpgcheck, + CONTRIBCHECK => \&contribcheck, + MODULESCHECK => \&modulescheck, ISOLATIONCHECK => \&isolationcheck, - BINCHECK => \&bincheck, - RECOVERYCHECK => \&recoverycheck, - UPGRADECHECK => \&upgradecheck, # no-op - TAPTEST => \&taptest,); + BINCHECK => \&bincheck, + RECOVERYCHECK => \&recoverycheck, + UPGRADECHECK => \&upgradecheck, # no-op + TAPTEST => \&taptest,); my $proc = $command{$what}; @@ -124,7 +124,7 @@ exit 0; # Helper function for set_command_env, to set one environment command. sub set_single_env { - my $envname = shift; + my $envname = shift; my $envdefault = shift; # If a command is defined by the environment, just use it. @@ -145,9 +145,9 @@ sub set_single_env sub set_command_env { set_single_env('GZIP_PROGRAM', 'gzip'); - set_single_env('LZ4', 'lz4'); - set_single_env('OPENSSL', 'openssl'); - set_single_env('ZSTD', 'zstd'); + set_single_env('LZ4', 'lz4'); + set_single_env('OPENSSL', 'openssl'); + set_single_env('ZSTD', 'zstd'); } sub installcheck_internal @@ -156,8 +156,8 @@ sub installcheck_internal # for backwards compatibility, "serial" runs the tests in # parallel_schedule one by one. my $maxconn = $maxconn; - $maxconn = "--max-connections=1" if $schedule eq 'serial'; - $schedule = 'parallel' if $schedule eq 'serial'; + $maxconn = "--max-connections=1" if $schedule eq 'serial'; + $schedule = 'parallel' if $schedule eq 'serial'; my @args = ( "../../../$Config/pg_regress/pg_regress", @@ -187,8 +187,8 @@ sub check # for backwards compatibility, "serial" runs the tests in # parallel_schedule one by one. my $maxconn = $maxconn; - $maxconn = "--max-connections=1" if $schedule eq 'serial'; - $schedule = 'parallel' if $schedule eq 'serial'; + $maxconn = "--max-connections=1" if $schedule eq 'serial'; + $schedule = 'parallel' if $schedule eq 'serial'; InstallTemp(); chdir "${topdir}/src/test/regress"; @@ -201,7 +201,7 @@ sub check "--encoding=${encoding}", "--no-locale", "--temp-instance=./tmp_check"); - push(@args, $maxconn) if $maxconn; + push(@args, $maxconn) if $maxconn; push(@args, $temp_config) if $temp_config; system(@args); my $status = $? >> 8; @@ -219,7 +219,7 @@ sub ecpgcheck InstallTemp(); chdir "$topdir/src/interfaces/ecpg/test"; my $schedule = "ecpg"; - my @args = ( + my @args = ( "../../../../$Config/pg_regress_ecpg/pg_regress_ecpg", "--bindir=", "--dbname=ecpg1_regression,ecpg2_regression", @@ -287,8 +287,8 @@ sub tap_check # adjust the environment for just this test local %ENV = %ENV; - $ENV{PERL5LIB} = "$topdir/src/test/perl;$ENV{PERL5LIB}"; - $ENV{PG_REGRESS} = "$topdir/$Config/pg_regress/pg_regress"; + $ENV{PERL5LIB} = "$topdir/src/test/perl;$ENV{PERL5LIB}"; + $ENV{PG_REGRESS} = "$topdir/$Config/pg_regress/pg_regress"; $ENV{REGRESS_SHLIB} = "$topdir/src/test/regress/regress.dll"; $ENV{TESTDATADIR} = "$dir/tmp_check"; @@ -467,11 +467,11 @@ sub contribcheck foreach my $module (glob("*")) { # these configuration-based exclusions must match Install.pm - next if ($module eq "uuid-ossp" && !defined($config->{uuid})); - next if ($module eq "sslinfo" && !defined($config->{openssl})); - next if ($module eq "pgcrypto" && !defined($config->{openssl})); - next if ($module eq "xml2" && !defined($config->{xml})); - next if ($module =~ /_plperl$/ && !defined($config->{perl})); + next if ($module eq "uuid-ossp" && !defined($config->{uuid})); + next if ($module eq "sslinfo" && !defined($config->{openssl})); + next if ($module eq "pgcrypto" && !defined($config->{openssl})); + next if ($module eq "xml2" && !defined($config->{xml})); + next if ($module =~ /_plperl$/ && !defined($config->{perl})); next if ($module =~ /_plpython$/ && !defined($config->{python})); next if ($module eq "sepgsql"); @@ -501,7 +501,7 @@ sub recoverycheck { InstallTemp(); - my $dir = "$topdir/src/test/recovery"; + my $dir = "$topdir/src/test/recovery"; my $status = tap_check($dir); exit $status if $status; return; @@ -608,7 +608,7 @@ sub fetchTests my $pgptests = $config->{zlib} - ? GetTests("ZLIB_TST", $m) + ? GetTests("ZLIB_TST", $m) : GetTests("ZLIB_OFF_TST", $m); $t =~ s/\$\(CF_PGP_TESTS\)/$pgptests/; } @@ -620,7 +620,7 @@ sub fetchTests sub GetTests { my $testname = shift; - my $m = shift; + my $m = shift; if ($m =~ /^$testname\s*=\s*(.*)$/gm) { return $1; diff --git a/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl b/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl index b40b3fdbbf..0032fdd753 100644 --- a/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl +++ b/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl @@ -41,7 +41,7 @@ while (my $test_src = glob("$src_dir/tests/*.0")) command_ok( [ 'pg_bsd_indent', $test_src, - "$test.out", "-P$src_dir/tests/$test.pro" + "$test.out", "-P$src_dir/tests/$test.pro" ], "pg_bsd_indent succeeds on $test"); # check result matches, adding any diff to $diff_file diff --git a/src/tools/pginclude/pgcheckdefines b/src/tools/pginclude/pgcheckdefines index b1ebec677e..a9fe79ebe5 100755 --- a/src/tools/pginclude/pgcheckdefines +++ b/src/tools/pginclude/pgcheckdefines @@ -116,7 +116,7 @@ foreach my $file (@hfiles, @cfiles) my $subdir = $fpath; chop $subdir; my $top_builddir = ".."; - my $tmp = $fpath; + my $tmp = $fpath; while (($tmp = dirname($tmp)) ne '.') { $top_builddir = $top_builddir . "/.."; @@ -168,7 +168,7 @@ foreach my $file (@hfiles, @cfiles) # number of dots varies according to nesting depth. # my @includes = (); - my $COMPILE = "$CC $CPPFLAGS $CFLAGS -H -E $fname"; + my $COMPILE = "$CC $CPPFLAGS $CFLAGS -H -E $fname"; open $pipe, '-|', "$COMPILE 2>&1 >/dev/null" or die "can't fork: $!"; while (<$pipe>) diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 58692d073b..bce63d95da 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -21,22 +21,21 @@ my $indent_opts = my $devnull = File::Spec->devnull; -my ($typedefs_file, $typedef_str, - @excludes, $indent, $build, - $show_diff, $silent_diff, $help, - @commits,); +my ($typedefs_file, $typedef_str, @excludes, + $indent, $build, $show_diff, + $silent_diff, $help, @commits,); $help = 0; my %options = ( - "help" => \$help, - "commit=s" => \@commits, - "typedefs=s" => \$typedefs_file, + "help" => \$help, + "commit=s" => \@commits, + "typedefs=s" => \$typedefs_file, "list-of-typedefs=s" => \$typedef_str, - "excludes=s" => \@excludes, - "indent=s" => \$indent, - "show-diff" => \$show_diff, - "silent-diff" => \$silent_diff,); + "excludes=s" => \@excludes, + "indent=s" => \$indent, + "show-diff" => \$show_diff, + "silent-diff" => \$silent_diff,); GetOptions(%options) || usage("bad command line argument"); usage() if $help; @@ -61,7 +60,7 @@ my $sourcedir = locate_sourcedir(); if ($sourcedir) { my $exclude_candidate = "$sourcedir/exclude_file_patterns"; - push (@excludes, $exclude_candidate) if -f $exclude_candidate; + push(@excludes, $exclude_candidate) if -f $exclude_candidate; } # The typedef list that's mechanically extracted by the buildfarm may omit @@ -117,12 +116,12 @@ sub locate_sourcedir return $sub if -d $sub; # try to find it from an ancestor directory $sub = "../src/tools/pgindent"; - foreach (1..4) + foreach (1 .. 4) { return $sub if -d $sub; $sub = "../$sub"; } - return; # undef if nothing found + return; # undef if nothing found } sub load_typedefs @@ -204,7 +203,7 @@ sub read_source sub write_source { - my $source = shift; + my $source = shift; my $source_filename = shift; open(my $src_fh, '>', $source_filename) @@ -231,7 +230,7 @@ sub pre_indent # Prevent indenting of code in 'extern "C"' blocks. # we replace the braces with comments which we'll reverse later my $extern_c_start = '/* Open extern "C" */'; - my $extern_c_stop = '/* Close extern "C" */'; + my $extern_c_stop = '/* Close extern "C" */'; $source =~ s!(^#ifdef[ \t]+__cplusplus.*\nextern[ \t]+"C"[ \t]*\n)\{[ \t]*$!$1$extern_c_start!gm; $source =~ s!(^#ifdef[ \t]+__cplusplus.*\n)\}[ \t]*$!$1$extern_c_stop!gm; @@ -271,7 +270,7 @@ sub post_indent sub run_indent { - my $source = shift; + my $source = shift; my $error_message = shift; my $cmd = "$indent $indent_opts -U" . $filtered_typedefs_fh->filename; @@ -297,7 +296,7 @@ sub run_indent sub show_diff { - my $indented = shift; + my $indented = shift; my $source_filename = shift; my $post_fh = new File::Temp(TEMPLATE => "pgdiffXXXXX"); @@ -313,7 +312,7 @@ sub show_diff sub usage { - my $message = shift; + my $message = shift; my $helptext = <<'EOF'; Usage: pgindent [OPTION]... [FILE|DIR]... @@ -346,8 +345,7 @@ $filtered_typedefs_fh = load_typedefs(); check_indent(); -my $wanted = sub -{ +my $wanted = sub { my ($dev, $ino, $mode, $nlink, $uid, $gid); (($dev, $ino, $mode, $nlink, $uid, $gid) = lstat($_)) && -f _ @@ -356,7 +354,7 @@ my $wanted = sub }; # any non-option arguments are files or directories to be processed -File::Find::find({wanted => $wanted}, @ARGV) if @ARGV; +File::Find::find({ wanted => $wanted }, @ARGV) if @ARGV; # commit file locations are relative to the source root chdir "$sourcedir/../../.." if @commits && $sourcedir; @@ -364,11 +362,11 @@ chdir "$sourcedir/../../.." if @commits && $sourcedir; # process named commits by comparing each with their immediate ancestor foreach my $commit (@commits) { - my $prev="$commit~"; - my @affected=`git diff --diff-filter=ACMR --name-only $prev $commit`; + my $prev = "$commit~"; + my @affected = `git diff --diff-filter=ACMR --name-only $prev $commit`; die "git error" if $?; chomp(@affected); - push(@files,@affected); + push(@files, @affected); } warn "No files to process" unless @files; @@ -402,8 +400,8 @@ foreach my $source_filename (@files) $otherfile =~ s/\.y$/.l/; next if $otherfile ne $source_filename && -f $otherfile; - my $source = read_source($source_filename); - my $orig_source = $source; + my $source = read_source($source_filename); + my $orig_source = $source; my $error_message = ''; $source = pre_indent($source); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 06da7cd428..260854747b 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -132,6 +132,7 @@ ArchiveModuleInit ArchiveModuleState ArchiveOpts ArchiveShutdownCB +ArchiveStartupCB ArchiveStreamState ArchiverOutput ArchiverStage @@ -240,6 +241,7 @@ Barrier BaseBackupCmd BaseBackupTargetHandle BaseBackupTargetType +BasicArchiveData BeginDirectModify_function BeginForeignInsert_function BeginForeignModify_function @@ -264,7 +266,6 @@ BitmapOr BitmapOrPath BitmapOrState Bitmapset -BlobInfo Block BlockId BlockIdData @@ -381,6 +382,7 @@ ClientData ClonePtrType ClosePortalStmt ClosePtrType +ClosestMatchState Clump ClusterInfo ClusterParams @@ -396,6 +398,7 @@ CoercionForm CoercionPathType CollAliasData CollInfo +CollParam CollateClause CollateExpr CollateStrength @@ -540,16 +543,17 @@ DR_intorel DR_printtup DR_sqlfunction DR_transientrel -DSA DWORD DataDumperPtr DataPageDeleteStack DatabaseInfo DateADT +DateTimeErrorExtra Datum DatumTupleFields DbInfo DbInfoArr +DbLocaleInfo DeClonePtrType DeadLockState DeallocateStmt @@ -565,6 +569,7 @@ DeleteStmt DependencyGenerator DependencyGeneratorData DependencyType +DeserialIOData DestReceiver DictISpell DictInt @@ -626,13 +631,13 @@ Edge EditableObjectType ElementsState EnableTimeoutParams -EndBlobPtrType -EndBlobsPtrType EndDataPtrType EndDirectModify_function EndForeignInsert_function EndForeignModify_function EndForeignScan_function +EndLOPtrType +EndLOsPtrType EndOfWalRecoveryInfo EndSampleScan_function EnumItem @@ -660,7 +665,6 @@ ExceptionLabelMap ExceptionMap ExecAuxRowMark ExecEvalBoolSubroutine -ExecEvalJsonExprContext ExecEvalSubroutine ExecForeignBatchInsert_function ExecForeignDelete_function @@ -706,9 +710,9 @@ ExprEvalOp ExprEvalOpLookup ExprEvalRowtypeCache ExprEvalStep +ExprSetupInfo ExprState ExprStateEvalFunc -ExtendBufferedFlags ExtendBufferedWhat ExtensibleNode ExtensibleNodeEntry @@ -720,7 +724,6 @@ FDWCollateState FD_SET FILE FILETIME -FPI FSMAddress FSMPage FSMPageData @@ -743,6 +746,7 @@ FinalPathExtraData FindColsContext FindSplitData FindSplitStrat +First FixedParallelExecutorState FixedParallelState FixedParamState @@ -898,6 +902,7 @@ FreePageBtreeLeafKey FreePageBtreeSearchResult FreePageManager FreePageSpanLeader +From FromCharDateMode FromExpr FullTransactionId @@ -943,6 +948,7 @@ GISTTYPE GIST_SPLITVEC GMReaderTupleBuffer GROUP +GUCHashEntry GV Gather GatherMerge @@ -1008,6 +1014,7 @@ GistVacState GlobalTransaction GlobalVisHorizonKind GlobalVisState +GrantRoleOptions GrantRoleStmt GrantStmt GrantTargetType @@ -1075,6 +1082,7 @@ HashInstrumentation HashJoin HashJoinState HashJoinTable +HashJoinTableData HashJoinTuple HashMemoryChunk HashMetaPage @@ -1114,10 +1122,12 @@ ID INFIX INT128 INTERFACE_INFO +IO IOContext IOFuncSelector IOObject IOOp +IO_STATUS_BLOCK IPCompareMethod ITEM IV @@ -1216,7 +1226,6 @@ IterateForeignScan_function IterateJsonStringValuesState JEntry JHashState -JOBOBJECTINFOCLASS JOBOBJECT_BASIC_LIMIT_INFORMATION JOBOBJECT_BASIC_UI_RESTRICTIONS JOBOBJECT_SECURITY_LIMIT_INFORMATION @@ -1229,38 +1238,30 @@ JitProviderReleaseContextCB JitProviderResetAfterErrorCB Join JoinCostWorkspace +JoinDomain JoinExpr JoinHashEntry JoinPath JoinPathExtraData JoinState +JoinTreeItem JoinType JsObject JsValue JsonAggConstructor JsonAggState -JsonArgument JsonArrayAgg JsonArrayConstructor JsonArrayQueryConstructor JsonBaseObjectInfo -JsonBehavior -JsonBehaviorType -JsonCoercion -JsonCommon JsonConstructorExpr JsonConstructorExprState JsonConstructorType JsonEncoding -JsonExpr -JsonExprOp JsonFormat JsonFormatType -JsonFunc -JsonFuncExpr JsonHashEntry JsonIsPredicate -JsonItemCoercions JsonIterateStringValuesAction JsonKeyValue JsonLexContext @@ -1275,10 +1276,8 @@ JsonObjectConstructor JsonOutput JsonParseContext JsonParseErrorType -JsonParseExpr JsonPath JsonPathBool -JsonPathDatatypeStatus JsonPathExecContext JsonPathExecResult JsonPathGinAddPathItemFunc @@ -1291,16 +1290,11 @@ JsonPathGinPathItem JsonPathItem JsonPathItemType JsonPathKeyword -JsonPathMutableContext JsonPathParseItem JsonPathParseResult JsonPathPredicateCallback JsonPathString -JsonPathVarCallback -JsonPathVariableEvalContext -JsonQuotes JsonReturning -JsonScalarExpr JsonSemAction JsonTokenType JsonTransformStringValuesAction @@ -1314,7 +1308,6 @@ JsonValueExpr JsonValueList JsonValueListIterator JsonValueType -JsonWrapper Jsonb JsonbAggState JsonbContainer @@ -1329,6 +1322,7 @@ JsonbTypeCategory JsonbValue JumbleState JunkFilter +KAXCompressReason KeyAction KeyActions KeyArray @@ -1343,17 +1337,34 @@ LINE LLVMAttributeRef LLVMBasicBlockRef LLVMBuilderRef +LLVMErrorRef LLVMIntPredicate +LLVMJITEventListenerRef LLVMJitContext LLVMJitHandle LLVMMemoryBufferRef LLVMModuleRef -LLVMOrcJITStackRef -LLVMOrcModuleHandle -LLVMOrcTargetAddress +LLVMOrcCLookupSet +LLVMOrcCSymbolMapPair +LLVMOrcCSymbolMapPairs +LLVMOrcDefinitionGeneratorRef +LLVMOrcExecutionSessionRef +LLVMOrcJITDylibLookupFlags +LLVMOrcJITDylibRef +LLVMOrcJITTargetAddress +LLVMOrcJITTargetMachineBuilderRef +LLVMOrcLLJITBuilderRef +LLVMOrcLLJITRef +LLVMOrcLookupKind +LLVMOrcLookupStateRef +LLVMOrcMaterializationUnitRef +LLVMOrcObjectLayerRef +LLVMOrcResourceTrackerRef +LLVMOrcSymbolStringPoolRef +LLVMOrcThreadSafeContextRef +LLVMOrcThreadSafeModuleRef LLVMPassManagerBuilderRef LLVMPassManagerRef -LLVMSharedModuleRef LLVMTargetMachineRef LLVMTargetRef LLVMTypeRef @@ -1370,12 +1381,9 @@ LOCKTAG LONG LONG_PTR LOOP +LPARAM LPBYTE -LPCTSTR LPCWSTR -LPDWORD -LPFILETIME -LPSECURITY_ATTRIBUTES LPSERVICE_STATUS LPSTR LPTHREAD_START_ROUTINE @@ -1391,18 +1399,17 @@ LWLock LWLockHandle LWLockMode LWLockPadded -LZ4CompressorState LZ4F_compressionContext_t LZ4F_decompressOptions_t LZ4F_decompressionContext_t LZ4F_errorCode_t LZ4F_preferences_t -LZ4File +LZ4State LabelProvider LagTracker LargeObjectDesc -LastAttnumInfo Latch +LauncherLastStartTimesEntry LerpFunc LexDescr LexemeEntry @@ -1423,6 +1430,7 @@ ListParsedLex ListenAction ListenActionKind ListenStmt +LoInfo LoadStmt LocalBufferLookupEnt LocalPgBackendStatus @@ -1479,7 +1487,6 @@ LogicalRepBeginData LogicalRepCommitData LogicalRepCommitPreparedTxnData LogicalRepCtxStruct -LogicalRepMode LogicalRepMsgType LogicalRepPartMapEntry LogicalRepPreparedTxnData @@ -1575,6 +1582,7 @@ MultirangeIOData MultirangeParseState MultirangeType NDBOX +NLSVERSIONINFOEX NODE NTSTATUS NUMCacheEntry @@ -1608,10 +1616,12 @@ NotificationList NotifyStmt Nsrt NtDllRoutine +NtFlushBuffersFileEx_t NullIfExpr NullTest NullTestType NullableDatum +NullingRelsMatch Numeric NumericAggState NumericDigit @@ -1670,7 +1680,7 @@ OprCacheKey OprInfo OprProofCacheEntry OprProofCacheKey -OutputContext +OuterJoinClauseInfo OutputPluginCallbacks OutputPluginOptions OutputPluginOutputType @@ -1680,7 +1690,6 @@ OverridingKind PACE_HEADER PACL PATH -PBOOL PCtxtHandle PERL_CONTEXT PERL_SI @@ -1743,10 +1752,9 @@ PGresAttValue PGresParamDesc PGresult PGresult_data -PHANDLE +PIO_STATUS_BLOCK PLAINTREE PLAssignStmt -PLUID_AND_ATTRIBUTES PLcword PLpgSQL_case_when PLpgSQL_condition @@ -1863,7 +1871,6 @@ PROCLOCK PROCLOCKTAG PROC_HDR PSID -PSID_AND_ATTRIBUTES PSQL_COMP_CASE PSQL_ECHO PSQL_ECHO_HIDDEN @@ -1872,7 +1879,6 @@ PTEntryArray PTIterationArray PTOKEN_PRIVILEGES PTOKEN_USER -PULONG PUTENVPROC PVIndStats PVIndVacStatus @@ -1972,6 +1978,7 @@ PartitionRangeDatum PartitionRangeDatumKind PartitionScheme PartitionSpec +PartitionStrategy PartitionTupleRouting PartitionedRelPruneInfo PartitionedRelPruningData @@ -1982,11 +1989,8 @@ PathClauseUsage PathCostComparison PathHashStack PathKey -PathKeyInfo PathKeysComparison PathTarget -PathkeyMutatorState -PathkeySortCost PatternInfo PatternInfoArray Pattern_Prefix_Status @@ -2022,6 +2026,7 @@ PgFdwModifyState PgFdwOption PgFdwPathExtraData PgFdwRelationInfo +PgFdwSamplingMethod PgFdwScanState PgIfAddrCallback PgStatShared_Archiver @@ -2105,13 +2110,11 @@ PortalStrategy PostParseColumnRefHook PostgresPollingStatusType PostingItem -PostponedQual PreParseColumnRefHook PredClass PredIterInfo PredIterInfoData PredXactList -PredXactListElement PredicateLockData PredicateLockTargetType PrefetchBufferResult @@ -2183,7 +2186,6 @@ QPRS_STATE QTN2QTState QTNode QUERYTYPE -QUERY_SECURITY_CONTEXT_TOKEN_FN QualCost QualItem Query @@ -2216,6 +2218,7 @@ RI_QueryKey RTEKind RTEPermissionInfo RWConflict +RWConflictData RWConflictPoolHeader Range RangeBound @@ -2251,7 +2254,8 @@ RecheckForeignScan_function RecordCacheEntry RecordCompareData RecordIOData -RecoveryLockListsEntry +RecoveryLockEntry +RecoveryLockXidEntry RecoveryPauseState RecoveryState RecoveryTargetTimeLineGoal @@ -2275,6 +2279,7 @@ ReindexStmt ReindexType RelFileLocator RelFileLocatorBackend +RelFileNumber RelIdCacheEnt RelInfo RelInfoArr @@ -2363,6 +2368,7 @@ ResultState ReturnSetInfo ReturnStmt RevmapContents +RevokeRoleGrantAction RewriteMappingDataEntry RewriteMappingFile RewriteRule @@ -2370,6 +2376,7 @@ RewriteState RmgrData RmgrDescData RmgrId +RoleNameEntry RoleNameItem RoleSpec RoleSpecType @@ -2384,6 +2391,7 @@ RowMarkType RowSecurityDesc RowSecurityPolicy RtlGetLastNtStatus_t +RtlNtStatusToDosError_t RuleInfo RuleLock RuleStmt @@ -2470,6 +2478,7 @@ SeqTable SeqTableData SerCommitSeqNo SerialControl +SerialIOData SerializableXactHandle SerializedActiveRelMaps SerializedClientConnectionInfo @@ -2610,9 +2619,9 @@ SplitTextOutputData SplitVar SplitedPageLayout StackElem -StartBlobPtrType -StartBlobsPtrType StartDataPtrType +StartLOPtrType +StartLOsPtrType StartReplicationCmd StartupStatusEnum StatEntry @@ -2660,6 +2669,7 @@ SubscriptionInfo SubscriptionRelState SupportRequestCost SupportRequestIndexCondition +SupportRequestOptimizeWindowClause SupportRequestRows SupportRequestSelectivity SupportRequestSimplify @@ -2676,6 +2686,7 @@ SyscacheCallbackFunction SystemRowsSamplerData SystemSamplerData SystemTimeSamplerData +TAPtype TAR_MEMBER TBMIterateResult TBMIteratingState @@ -2728,6 +2739,7 @@ TSVectorStat TState TStatus TStoreState +TU_UpdateIndexes TXNEntryFile TYPCATEGORY T_Action @@ -2880,12 +2892,10 @@ TypeCat TypeFuncClass TypeInfo TypeName -U U32 U8 UChar UCharIterator -UColAttribute UColAttributeValue UCollator UConverter @@ -2910,16 +2920,19 @@ UpdateStmt UpperRelationKind UpperUniquePath UserAuth +UserContext UserMapping UserOpts VacAttrStats VacAttrStatsP VacDeadItems VacErrPhase +VacObjFilter VacOptValue VacuumParams VacuumRelation VacuumStmt +ValidIOData ValidateIndexState ValuesScan ValuesScanState @@ -2940,6 +2953,8 @@ VariableSpace VariableStatData VariableSubstituteHook Variables +Vector32 +Vector8 VersionedQuery Vfd ViewCheckOption @@ -2956,7 +2971,6 @@ WALInsertLock WALInsertLockPadded WALOpenSegment WALReadError -WalRcvWakeupReason WALSegmentCloseCB WALSegmentContext WALSegmentOpenCB @@ -2986,6 +3000,7 @@ WalRcvExecResult WalRcvExecStatus WalRcvState WalRcvStreamOptions +WalRcvWakeupReason WalReceiverConn WalReceiverFunctionsType WalSnd @@ -2995,6 +3010,7 @@ WalSndState WalTimeSample WalUsage WalWriteMethod +WalWriteMethodOps Walfile WindowAgg WindowAggPath @@ -3093,17 +3109,16 @@ YYLTYPE YYSTYPE YY_BUFFER_STATE ZSTD_CCtx +ZSTD_CStream ZSTD_DCtx +ZSTD_DStream +ZSTD_cParameter ZSTD_inBuffer ZSTD_outBuffer +ZstdCompressorState _SPI_connection _SPI_plan -__AssignProcessToJobObject -__CreateJobObject -__CreateRestrictedToken -__IsProcessInJob -__QueryInformationJobObject -__SetInformationJobObject +__m128i __time64_t _dev_t _ino_t @@ -3111,8 +3126,8 @@ _locale_t _resultmap _stringlist acquireLocksOnSubLinks_context +add_nulling_relids_context adjust_appendrel_attrs_context -aff_regex_struct allocfunc amadjustmembers_function ambeginscan_function @@ -3139,6 +3154,7 @@ amvalidate_function array_iter array_unnest_fctx assign_collations_context +auth_password_hook_typ autovac_table av_relation avl_dbase @@ -3189,7 +3205,6 @@ cached_re_str canonicalize_state cashKEY catalogid_hash -cfp check_agg_arguments_context check_function_callback check_network_data @@ -3197,7 +3212,6 @@ check_object_relabel_type check_password_hook_type check_ungrouped_columns_context chr -clock_t cmpEntriesArg codes_t collation_cache_entry @@ -3206,6 +3220,7 @@ colormaprange compare_context config_var_value contain_aggs_of_level_context +contain_placeholder_references_context convert_testexpr_context copy_data_dest_cb copy_data_source_cb @@ -3238,6 +3253,10 @@ dlist_head dlist_iter dlist_mutable_iter dlist_node +dm_code +dm_codes +dm_letter +dm_node ds_state dsa_area dsa_area_control @@ -3310,7 +3329,6 @@ fmStringInfo fmgr_hook_type foreign_glob_cxt foreign_loc_cxt -freeaddrinfo_ptr_t freefunc fsec_t gbt_vsrt_arg @@ -3325,8 +3343,6 @@ get_attavgwidth_hook_type get_index_stats_hook_type get_relation_info_hook_type get_relation_stats_hook_type -getaddrinfo_ptr_t -getnameinfo_ptr_t gid_t gin_leafpage_items_state ginxlogCreatePostingTree @@ -3348,9 +3364,13 @@ gistxlogPageSplit gistxlogPageUpdate grouping_sets_data gseg_picksplit_item +gss_OID_set gss_buffer_desc gss_cred_id_t +gss_cred_usage_t gss_ctx_id_t +gss_key_value_element_desc +gss_key_value_set_desc gss_name_t gtrgm_consistent_cache gzFile @@ -3366,7 +3386,6 @@ hstoreUniquePairs_t hstoreUpgrade_t hyperLogLogState ifState -ilist import_error_callback_arg indexed_tlist inet @@ -3482,6 +3501,7 @@ pagetable_iterator pairingheap pairingheap_comparator pairingheap_node +pam_handle_t parallel_worker_main_type parse_error_callback_arg parser_context @@ -3564,6 +3584,7 @@ pgthreadlock_t pid_t pivot_field planner_hook_type +planstate_tree_walker_callback plperl_array_info plperl_call_data plperl_interp_desc @@ -3572,6 +3593,7 @@ plperl_proc_key plperl_proc_ptr plperl_query_desc plperl_query_entry +plpgsql_CastExprHashEntry plpgsql_CastHashEntry plpgsql_CastHashKey plpgsql_HashEnt @@ -3618,6 +3640,7 @@ pull_varattnos_context pull_varnos_context pull_vars_context pullup_replace_vars_context +pushdown_safe_type pushdown_safety_info qc_hash_func qsort_arg_comparator @@ -3631,7 +3654,9 @@ rbt_allocfunc rbt_combiner rbt_comparator rbt_freefunc -reduce_outer_joins_state +reduce_outer_joins_partial_state +reduce_outer_joins_pass1_state +reduce_outer_joins_pass2_state reference regex_arc_t regex_t @@ -3656,6 +3681,7 @@ relopts_validator remoteConn remoteConnHashEnt remoteDep +remove_nulling_relids_context rendezvousHashEntry replace_rte_variables_callback replace_rte_variables_context @@ -3665,6 +3691,7 @@ rewrite_event rf_context rm_detail_t role_auth_extra +rolename_hash row_security_policy_hook_type rsv_callback saophash_hash @@ -3758,6 +3785,8 @@ toast_compress_header tokenize_error_callback_arg transferMode transfer_thread_arg +tree_mutator_callback +tree_walker_callback trgm trgm_mb_char trivalue @@ -3772,6 +3801,8 @@ type tzEntry u_char u_int +ua_page_items +ua_page_stats uchr uid_t uint128 @@ -3779,10 +3810,12 @@ uint16 uint16_t uint32 uint32_t +uint32x4_t uint64 uint64_t uint8 uint8_t +uint8x16_t uintptr_t unicodeStyleBorderFormat unicodeStyleColumnFormat @@ -3865,7 +3898,6 @@ xl_heap_confirm xl_heap_delete xl_heap_freeze_page xl_heap_freeze_plan -xl_heap_freeze_tuple xl_heap_header xl_heap_inplace xl_heap_insert @@ -3900,6 +3932,7 @@ xl_standby_lock xl_standby_locks xl_tblspc_create_rec xl_tblspc_drop_rec +xl_testcustomrmgrs_message xl_xact_abort xl_xact_assignment xl_xact_commit @@ -3927,6 +3960,8 @@ xmlNodePtr xmlNodeSetPtr xmlParserCtxtPtr xmlParserInputPtr +xmlSaveCtxt +xmlSaveCtxtPtr xmlStructuredErrorFunc xmlTextWriter xmlTextWriterPtr @@ -3944,4 +3979,3 @@ yyscan_t z_stream z_streamp zic_t -ZSTD_CStream diff --git a/src/tools/win32tzlist.pl b/src/tools/win32tzlist.pl index 079c3705a7..657f7d4879 100755 --- a/src/tools/win32tzlist.pl +++ b/src/tools/win32tzlist.pl @@ -48,8 +48,8 @@ foreach my $keyname (@subkeys) unless ($vals{Std} && $vals{Dlt} && $vals{Display}); push @system_zones, { - 'std' => $vals{Std}->[2], - 'dlt' => $vals{Dlt}->[2], + 'std' => $vals{Std}->[2], + 'dlt' => $vals{Dlt}->[2], 'display' => clean_displayname($vals{Display}->[2]), }; } @@ -80,9 +80,9 @@ while ($pgtz =~ push @file_zones, { 'display' => clean_displayname($1), - 'std' => $2, - 'dlt' => $3, - 'match' => $4, + 'std' => $2, + 'dlt' => $3, + 'match' => $4, }; } diff --git a/src/tutorial/funcs.c b/src/tutorial/funcs.c index ceffb56835..f597777a1f 100644 --- a/src/tutorial/funcs.c +++ b/src/tutorial/funcs.c @@ -78,8 +78,8 @@ copytext(PG_FUNCTION_ARGS) * VARDATA is a pointer to the data region of the new struct. The source * could be a short datum, so retrieve its data through VARDATA_ANY. */ - memcpy(VARDATA(new_t), /* destination */ - VARDATA_ANY(t), /* source */ + memcpy(VARDATA(new_t), /* destination */ + VARDATA_ANY(t), /* source */ VARSIZE_ANY_EXHDR(t)); /* how many bytes */ PG_RETURN_TEXT_P(new_t); }