2021-05-07 16:56:14 +02:00
|
|
|
|
2023-01-02 21:00:37 +01:00
|
|
|
# Copyright (c) 2021-2023, PostgreSQL Global Development Group
|
2021-05-07 16:56:14 +02:00
|
|
|
|
2006-09-04 23:30:40 +02:00
|
|
|
package Solution;
|
2007-03-29 17:30:52 +02:00
|
|
|
|
2007-03-17 15:01:01 +01:00
|
|
|
#
|
|
|
|
# Package that encapsulates a Visual C++ solution file generation
|
2007-03-29 17:30:52 +02:00
|
|
|
#
|
2010-09-20 22:08:53 +02:00
|
|
|
# src/tools/msvc/Solution.pm
|
2007-03-17 15:01:01 +01:00
|
|
|
#
|
2006-09-04 23:30:40 +02:00
|
|
|
use Carp;
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
2012-01-03 14:44:26 +01:00
|
|
|
use VSObjectFactory;
|
2006-09-04 23:30:40 +02:00
|
|
|
|
2018-05-31 14:13:02 +02:00
|
|
|
no warnings qw(redefine); ## no critic
|
|
|
|
|
2012-01-03 14:44:26 +01:00
|
|
|
sub _new
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2012-01-03 14:44:26 +01:00
|
|
|
my $classname = shift;
|
2012-07-05 03:47:49 +02:00
|
|
|
my $options = shift;
|
|
|
|
my $self = {
|
2006-09-04 23:30:40 +02:00
|
|
|
projects => {},
|
|
|
|
options => $options,
|
2014-01-26 15:49:10 +01:00
|
|
|
VisualStudioVersion => undef,
|
|
|
|
MinimumVisualStudioVersion => undef,
|
2009-12-23 14:27:04 +01:00
|
|
|
vcver => undef,
|
2018-05-09 16:14:46 +02:00
|
|
|
platform => undef,
|
|
|
|
};
|
2012-01-03 14:44:26 +01:00
|
|
|
bless($self, $classname);
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2015-04-05 17:49:49 +02:00
|
|
|
$self->DeterminePlatform();
|
|
|
|
|
2016-09-11 18:46:55 +02:00
|
|
|
if ($options->{xslt} && !$options->{xml})
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2016-09-11 18:46:55 +02:00
|
|
|
die "XSLT requires XML\n";
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2010-04-09 15:05:58 +02:00
|
|
|
$options->{blocksize} = 8
|
2012-07-05 03:47:49 +02:00
|
|
|
unless $options->{blocksize}; # undef or 0 means default
|
2010-04-09 15:05:58 +02:00
|
|
|
die "Bad blocksize $options->{blocksize}"
|
2012-07-05 03:47:49 +02:00
|
|
|
unless grep { $_ == $options->{blocksize} } (1, 2, 4, 8, 16, 32);
|
2010-04-09 15:05:58 +02:00
|
|
|
$options->{segsize} = 1
|
2012-07-05 03:47:49 +02:00
|
|
|
unless $options->{segsize}; # undef or 0 means default
|
|
|
|
# only allow segsize 1 for now, as we can't do large files yet in windows
|
2010-04-09 15:05:58 +02:00
|
|
|
die "Bad segsize $options->{segsize}"
|
|
|
|
unless $options->{segsize} == 1;
|
|
|
|
$options->{wal_blocksize} = 8
|
2012-07-05 03:47:49 +02:00
|
|
|
unless $options->{wal_blocksize}; # undef or 0 means default
|
2010-04-09 15:05:58 +02:00
|
|
|
die "Bad wal_blocksize $options->{wal_blocksize}"
|
2012-07-05 03:47:49 +02:00
|
|
|
unless grep { $_ == $options->{wal_blocksize} }
|
|
|
|
(1, 2, 4, 8, 16, 32, 64);
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2007-03-12 20:10:50 +01:00
|
|
|
return $self;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
2014-01-26 15:49:10 +01:00
|
|
|
sub GetAdditionalHeaders
|
|
|
|
{
|
|
|
|
return '';
|
|
|
|
}
|
|
|
|
|
2012-01-03 14:44:26 +01:00
|
|
|
sub DeterminePlatform
|
2009-12-23 14:27:04 +01:00
|
|
|
{
|
|
|
|
my $self = shift;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2020-02-21 20:50:56 +01:00
|
|
|
if ($^O eq "MSWin32")
|
|
|
|
{
|
|
|
|
# Examine CL help output to determine if we are in 32 or 64-bit mode.
|
2021-01-21 02:56:03 +01:00
|
|
|
my $output = `cl /help 2>&1`;
|
2020-02-21 20:50:56 +01:00
|
|
|
$? >> 8 == 0 or die "cl command not found";
|
|
|
|
$self->{platform} =
|
|
|
|
($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32';
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
$self->{platform} = 'FAKE';
|
|
|
|
}
|
2010-04-09 15:05:58 +02:00
|
|
|
print "Detected hardware platform: $self->{platform}\n";
|
2018-05-27 15:08:42 +02:00
|
|
|
return;
|
2009-12-23 14:27:04 +01:00
|
|
|
}
|
|
|
|
|
2006-09-04 23:30:40 +02:00
|
|
|
# Return 1 if $oldfile is newer than $newfile, or if $newfile doesn't exist.
|
|
|
|
# Special case - if config.pl has changed, always return 1
|
2007-03-12 20:10:50 +01:00
|
|
|
sub IsNewer
|
|
|
|
{
|
|
|
|
my ($newfile, $oldfile) = @_;
|
2017-11-13 03:43:32 +01:00
|
|
|
-e $oldfile or warn "source file \"$oldfile\" does not exist";
|
2015-04-25 14:52:03 +02:00
|
|
|
if ( $oldfile ne 'src/tools/msvc/config.pl'
|
|
|
|
&& $oldfile ne 'src/tools/msvc/config_default.pl')
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2010-04-09 15:05:58 +02:00
|
|
|
return 1
|
2015-04-25 14:52:03 +02:00
|
|
|
if (-f 'src/tools/msvc/config.pl')
|
|
|
|
&& IsNewer($newfile, 'src/tools/msvc/config.pl');
|
2010-04-09 15:05:58 +02:00
|
|
|
return 1
|
2015-04-25 14:52:03 +02:00
|
|
|
if (-f 'src/tools/msvc/config_default.pl')
|
|
|
|
&& IsNewer($newfile, 'src/tools/msvc/config_default.pl');
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
return 1 if (!(-e $newfile));
|
|
|
|
my @nstat = stat($newfile);
|
|
|
|
my @ostat = stat($oldfile);
|
|
|
|
return 1 if ($nstat[9] < $ostat[9]);
|
|
|
|
return 0;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
# Copy a file, *not* preserving date. Only works for text files.
|
2007-03-12 20:10:50 +01:00
|
|
|
sub copyFile
|
|
|
|
{
|
|
|
|
my ($src, $dest) = @_;
|
2017-03-27 04:24:13 +02:00
|
|
|
open(my $i, '<', $src) || croak "Could not open $src";
|
|
|
|
open(my $o, '>', $dest) || croak "Could not open $dest";
|
|
|
|
while (<$i>)
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2017-03-27 04:24:13 +02:00
|
|
|
print $o $_;
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2017-03-27 04:24:13 +02:00
|
|
|
close($i);
|
|
|
|
close($o);
|
2018-05-27 15:08:42 +02:00
|
|
|
return;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
2019-06-26 03:44:46 +02:00
|
|
|
# Fetch version of OpenSSL based on a parsing of the command shipped with
|
|
|
|
# the installer this build is linking to. This returns as result an array
|
|
|
|
# made of the three first digits of the OpenSSL version, which is enough
|
|
|
|
# to decide which options to apply depending on the version of OpenSSL
|
|
|
|
# linking with.
|
|
|
|
sub GetOpenSSLVersion
|
|
|
|
{
|
|
|
|
my $self = shift;
|
|
|
|
|
|
|
|
# Attempt to get OpenSSL version and location. This assumes that
|
|
|
|
# openssl.exe is in the specified directory.
|
2019-10-04 21:34:40 +02:00
|
|
|
# Quote the .exe name in case it has spaces
|
2019-06-26 03:44:46 +02:00
|
|
|
my $opensslcmd =
|
2019-10-04 21:34:40 +02:00
|
|
|
qq("$self->{options}->{openssl}\\bin\\openssl.exe" version 2>&1);
|
2019-06-26 03:44:46 +02:00
|
|
|
my $sslout = `$opensslcmd`;
|
|
|
|
|
|
|
|
$? >> 8 == 0
|
|
|
|
or croak
|
|
|
|
"Unable to determine OpenSSL version: The openssl.exe command wasn't found.";
|
|
|
|
|
|
|
|
if ($sslout =~ /(\d+)\.(\d+)\.(\d+)(\D)/m)
|
|
|
|
{
|
|
|
|
return ($1, $2, $3);
|
|
|
|
}
|
|
|
|
|
|
|
|
croak
|
|
|
|
"Unable to determine OpenSSL version: The openssl.exe version could not be determined.";
|
|
|
|
}
|
|
|
|
|
2007-03-12 20:10:50 +01:00
|
|
|
sub GenerateFiles
|
|
|
|
{
|
|
|
|
my $self = shift;
|
2010-01-01 18:34:25 +01:00
|
|
|
my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
|
2020-02-28 08:54:49 +01:00
|
|
|
my $ac_init_found = 0;
|
2019-12-20 08:54:42 +01:00
|
|
|
my $package_name;
|
|
|
|
my $package_version;
|
|
|
|
my $package_bugreport;
|
2020-02-28 08:54:49 +01:00
|
|
|
my $package_url;
|
2020-03-10 11:20:38 +01:00
|
|
|
my ($majorver, $minorver);
|
2020-07-19 12:14:42 +02:00
|
|
|
my $ac_define_openssl_api_compat_found = 0;
|
|
|
|
my $openssl_api_compat;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2020-07-24 10:34:16 +02:00
|
|
|
# Parse configure.ac to get version numbers
|
|
|
|
open(my $c, '<', "configure.ac")
|
|
|
|
|| confess("Could not open configure.ac for reading\n");
|
2017-03-27 04:24:13 +02:00
|
|
|
while (<$c>)
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2020-02-28 08:54:49 +01:00
|
|
|
if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/
|
|
|
|
)
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2020-02-28 08:54:49 +01:00
|
|
|
$ac_init_found = 1;
|
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
$package_name = $1;
|
|
|
|
$package_version = $2;
|
|
|
|
$package_bugreport = $3;
|
2020-02-28 08:54:49 +01:00
|
|
|
#$package_tarname = $4;
|
|
|
|
$package_url = $5;
|
2019-12-20 08:54:42 +01:00
|
|
|
|
|
|
|
if ($package_version !~ /^(\d+)(?:\.(\d+))?/)
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2021-06-26 06:52:48 +02:00
|
|
|
confess "Bad format of version: $package_version\n";
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2020-03-10 11:20:38 +01:00
|
|
|
$majorver = sprintf("%d", $1);
|
|
|
|
$minorver = sprintf("%d", $2 ? $2 : 0);
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2020-07-19 12:14:42 +02:00
|
|
|
elsif (/\bAC_DEFINE\(OPENSSL_API_COMPAT, \[([0-9xL]+)\]/)
|
|
|
|
{
|
|
|
|
$ac_define_openssl_api_compat_found = 1;
|
|
|
|
$openssl_api_compat = $1;
|
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2017-03-27 04:24:13 +02:00
|
|
|
close($c);
|
2020-07-24 10:34:16 +02:00
|
|
|
confess "Unable to parse configure.ac for all variables!"
|
2020-07-19 12:14:42 +02:00
|
|
|
unless $ac_init_found && $ac_define_openssl_api_compat_found;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2015-04-25 14:52:03 +02:00
|
|
|
if (IsNewer("src/include/pg_config_os.h", "src/include/port/win32.h"))
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
print "Copying pg_config_os.h...\n";
|
2015-04-25 14:52:03 +02:00
|
|
|
copyFile("src/include/port/win32.h", "src/include/pg_config_os.h");
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
print "Generating configuration headers...\n";
|
|
|
|
my $extraver = $self->{options}->{extraver};
|
|
|
|
$extraver = '' unless defined $extraver;
|
|
|
|
my $port = $self->{options}->{"--with-pgport"} || 5432;
|
|
|
|
|
|
|
|
# Every symbol in pg_config.h.in must be accounted for here. Set
|
|
|
|
# to undef if the symbol should not be defined.
|
|
|
|
my %define = (
|
|
|
|
ALIGNOF_DOUBLE => 8,
|
|
|
|
ALIGNOF_INT => 4,
|
|
|
|
ALIGNOF_LONG => 4,
|
|
|
|
ALIGNOF_LONG_LONG_INT => 8,
|
|
|
|
ALIGNOF_PG_INT128_TYPE => undef,
|
|
|
|
ALIGNOF_SHORT => 2,
|
|
|
|
AC_APPLE_UNIVERSAL_BUILD => undef,
|
|
|
|
BLCKSZ => 1024 * $self->{options}->{blocksize},
|
2020-02-10 17:12:46 +01:00
|
|
|
CONFIGURE_ARGS => '"' . $self->GetFakeConfigure() . '"',
|
2019-12-20 08:54:42 +01:00
|
|
|
DEF_PGPORT => $port,
|
|
|
|
DEF_PGPORT_STR => qq{"$port"},
|
2022-03-25 08:44:31 +01:00
|
|
|
DLSUFFIX => '".dll"',
|
2019-12-20 08:54:42 +01:00
|
|
|
ENABLE_GSS => $self->{options}->{gss} ? 1 : undef,
|
|
|
|
ENABLE_NLS => $self->{options}->{nls} ? 1 : undef,
|
|
|
|
HAVE_APPEND_HISTORY => undef,
|
|
|
|
HAVE_ASN1_STRING_GET0_DATA => undef,
|
|
|
|
HAVE_ATOMICS => 1,
|
|
|
|
HAVE_ATOMIC_H => undef,
|
|
|
|
HAVE_BACKTRACE_SYMBOLS => undef,
|
|
|
|
HAVE_BIO_METH_NEW => undef,
|
|
|
|
HAVE_COMPUTED_GOTO => undef,
|
|
|
|
HAVE_COPYFILE => undef,
|
|
|
|
HAVE_COPYFILE_H => undef,
|
|
|
|
HAVE_CRTDEFS_H => undef,
|
|
|
|
HAVE_CRYPTO_LOCK => undef,
|
|
|
|
HAVE_DECL_FDATASYNC => 0,
|
|
|
|
HAVE_DECL_F_FULLFSYNC => 0,
|
2021-07-15 17:00:43 +02:00
|
|
|
HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER => 0,
|
|
|
|
HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER => 0,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_DECL_LLVMGETHOSTCPUNAME => 0,
|
|
|
|
HAVE_DECL_LLVMGETHOSTCPUFEATURES => 0,
|
|
|
|
HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN => 0,
|
2021-07-15 17:00:43 +02:00
|
|
|
HAVE_DECL_POSIX_FADVISE => 0,
|
2021-07-13 01:17:35 +02:00
|
|
|
HAVE_DECL_PREADV => 0,
|
|
|
|
HAVE_DECL_PWRITEV => 0,
|
2021-07-15 17:00:43 +02:00
|
|
|
HAVE_DECL_STRLCAT => 0,
|
|
|
|
HAVE_DECL_STRLCPY => 0,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_DECL_STRNLEN => 1,
|
|
|
|
HAVE_EDITLINE_HISTORY_H => undef,
|
|
|
|
HAVE_EDITLINE_READLINE_H => undef,
|
|
|
|
HAVE_EXECINFO_H => undef,
|
|
|
|
HAVE_EXPLICIT_BZERO => undef,
|
|
|
|
HAVE_FSEEKO => 1,
|
|
|
|
HAVE_GCC__ATOMIC_INT32_CAS => undef,
|
|
|
|
HAVE_GCC__ATOMIC_INT64_CAS => undef,
|
|
|
|
HAVE_GCC__SYNC_CHAR_TAS => undef,
|
|
|
|
HAVE_GCC__SYNC_INT32_CAS => undef,
|
|
|
|
HAVE_GCC__SYNC_INT32_TAS => undef,
|
|
|
|
HAVE_GCC__SYNC_INT64_CAS => undef,
|
|
|
|
HAVE_GETIFADDRS => undef,
|
|
|
|
HAVE_GETOPT => undef,
|
|
|
|
HAVE_GETOPT_H => undef,
|
|
|
|
HAVE_GETOPT_LONG => undef,
|
|
|
|
HAVE_GETPEEREID => undef,
|
|
|
|
HAVE_GETPEERUCRED => undef,
|
2023-04-17 22:00:39 +02:00
|
|
|
HAVE_GSSAPI_EXT_H => undef,
|
|
|
|
HAVE_GSSAPI_GSSAPI_EXT_H => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_GSSAPI_GSSAPI_H => undef,
|
|
|
|
HAVE_GSSAPI_H => undef,
|
Refactor HMAC implementations
Similarly to the cryptohash implementations, this refactors the existing
HMAC code into a single set of APIs that can be plugged with any crypto
libraries PostgreSQL is built with (only OpenSSL currently). If there
is no such libraries, a fallback implementation is available. Those new
APIs are designed similarly to the existing cryptohash layer, so there
is no real new design here, with the same logic around buffer bound
checks and memory handling.
HMAC has a dependency on cryptohashes, so all the cryptohash types
supported by cryptohash{_openssl}.c can be used with HMAC. This
refactoring is an advantage mainly for SCRAM, that included its own
implementation of HMAC with SHA256 without relying on the existing
crypto libraries even if PostgreSQL was built with their support.
This code has been tested on Windows and Linux, with and without
OpenSSL, across all the versions supported on HEAD from 1.1.1 down to
1.0.1. I have also checked that the implementations are working fine
using some sample results, a custom extension of my own, and doing
cross-checks across different major versions with SCRAM with the client
and the backend.
Author: Michael Paquier
Reviewed-by: Bruce Momjian
Discussion: https://postgr.es/m/X9m0nkEJEzIPXjeZ@paquier.xyz
2021-04-03 10:30:49 +02:00
|
|
|
HAVE_HMAC_CTX_FREE => undef,
|
|
|
|
HAVE_HMAC_CTX_NEW => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_HISTORY_H => undef,
|
|
|
|
HAVE_HISTORY_TRUNCATE_FILE => undef,
|
|
|
|
HAVE_IFADDRS_H => undef,
|
|
|
|
HAVE_INET_ATON => undef,
|
2022-04-01 15:41:44 +02:00
|
|
|
HAVE_INET_PTON => 1,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_INT_TIMEZONE => 1,
|
|
|
|
HAVE_INT64 => undef,
|
|
|
|
HAVE_INT8 => undef,
|
|
|
|
HAVE_INTTYPES_H => undef,
|
|
|
|
HAVE_INT_OPTERR => undef,
|
|
|
|
HAVE_INT_OPTRESET => undef,
|
|
|
|
HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P => undef,
|
Add kqueue(2) support to the WaitEventSet API.
Use kevent(2) to wait for events on the BSD family of operating
systems and macOS. This is similar to the epoll(2) support added
for Linux by commit 98a64d0bd.
Author: Thomas Munro
Reviewed-by: Andres Freund, Marko Tiikkaja, Tom Lane
Tested-by: Mateusz Guzik, Matteo Beccati, Keith Fiske, Heikki Linnakangas, Michael Paquier, Peter Eisentraut, Rui DeSousa, Tom Lane, Mark Wong
Discussion: https://postgr.es/m/CAEepm%3D37oF84-iXDTQ9MrGjENwVGds%2B5zTr38ca73kWR7ez_tA%40mail.gmail.com
2020-02-05 05:35:57 +01:00
|
|
|
HAVE_KQUEUE => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_LANGINFO_H => undef,
|
|
|
|
HAVE_LDAP_INITIALIZE => undef,
|
|
|
|
HAVE_LIBCRYPTO => undef,
|
|
|
|
HAVE_LIBLDAP => undef,
|
2021-03-21 22:20:17 +01:00
|
|
|
HAVE_LIBLZ4 => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_LIBM => undef,
|
|
|
|
HAVE_LIBPAM => undef,
|
|
|
|
HAVE_LIBREADLINE => undef,
|
|
|
|
HAVE_LIBSELINUX => undef,
|
|
|
|
HAVE_LIBSSL => undef,
|
|
|
|
HAVE_LIBWLDAP32 => undef,
|
|
|
|
HAVE_LIBXML2 => undef,
|
|
|
|
HAVE_LIBXSLT => undef,
|
|
|
|
HAVE_LIBZ => $self->{options}->{zlib} ? 1 : undef,
|
2022-02-18 19:40:31 +01:00
|
|
|
HAVE_LIBZSTD => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_LONG_INT_64 => undef,
|
|
|
|
HAVE_LONG_LONG_INT_64 => 1,
|
|
|
|
HAVE_MBARRIER_H => undef,
|
2023-07-09 01:55:03 +02:00
|
|
|
HAVE_MBSTOWCS_L => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_MEMORY_H => 1,
|
|
|
|
HAVE_MEMSET_S => undef,
|
|
|
|
HAVE_MKDTEMP => undef,
|
|
|
|
HAVE_OPENSSL_INIT_SSL => undef,
|
|
|
|
HAVE_OSSP_UUID_H => undef,
|
|
|
|
HAVE_PAM_PAM_APPL_H => undef,
|
|
|
|
HAVE_POSIX_FADVISE => undef,
|
|
|
|
HAVE_POSIX_FALLOCATE => undef,
|
|
|
|
HAVE_PPOLL => undef,
|
|
|
|
HAVE_PTHREAD => undef,
|
2021-03-10 03:40:17 +01:00
|
|
|
HAVE_PTHREAD_BARRIER_WAIT => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_PTHREAD_IS_THREADED_NP => undef,
|
|
|
|
HAVE_PTHREAD_PRIO_INHERIT => undef,
|
|
|
|
HAVE_READLINE_H => undef,
|
|
|
|
HAVE_READLINE_HISTORY_H => undef,
|
|
|
|
HAVE_READLINE_READLINE_H => undef,
|
|
|
|
HAVE_RL_COMPLETION_MATCHES => undef,
|
2020-01-24 00:20:57 +01:00
|
|
|
HAVE_RL_COMPLETION_SUPPRESS_QUOTE => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_RL_FILENAME_COMPLETION_FUNCTION => undef,
|
Improve psql's tab completion for filenames.
The Readline library contains a fair amount of knowledge about how to
tab-complete filenames, but it turns out that that doesn't work too well
unless we follow its expectation that we use its filename quoting hooks
to quote and de-quote filenames. We were trying to do such quote handling
within complete_from_files(), and that's still what we have to do if we're
using libedit, which lacks those hooks. But for Readline, it works a lot
better if we tell Readline that single-quote is a quoting character and
then provide hooks that know the details of the quoting rules for SQL
and psql meta-commands.
Hence, resurrect the quoting hook functions that existed in the original
version of tab-complete.c (and were disabled by commit f6689a328 because
they "didn't work so well yet"), and whack on them until they do seem to
work well.
Notably, this fixes bug #16059 from Steven Winfield, who pointed out
that the previous coding would strip quote marks from filenames in SQL
COPY commands, even though they're syntactically necessary there.
Now, we not only don't do that, but we'll add a quote mark when you
tab-complete, even if you didn't type one.
Getting this to work across a range of libedit versions (and, to a
lesser extent, libreadline versions) was depressingly difficult.
It will be interesting to see whether the new regression test cases
pass everywhere in the buildfarm.
Some future patch might try to handle quoted SQL identifiers with
similar explicit quoting/dequoting logic, but that's for another day.
Patch by me, reviewed by Peter Eisentraut.
Discussion: https://postgr.es/m/16059-8836946734c02b84@postgresql.org
2020-01-23 17:07:12 +01:00
|
|
|
HAVE_RL_FILENAME_QUOTE_CHARACTERS => undef,
|
|
|
|
HAVE_RL_FILENAME_QUOTING_FUNCTION => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_RL_RESET_SCREEN_SIZE => undef,
|
2021-12-02 19:06:27 +01:00
|
|
|
HAVE_RL_VARIABLE_BIND => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_SECURITY_PAM_APPL_H => undef,
|
|
|
|
HAVE_SETPROCTITLE => undef,
|
|
|
|
HAVE_SETPROCTITLE_FAST => undef,
|
2021-11-09 15:20:47 +01:00
|
|
|
HAVE_SOCKLEN_T => 1,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_SPINLOCKS => 1,
|
libpq: Add sslcertmode option to control client certificates
The sslcertmode option controls whether the server is allowed and/or
required to request a certificate from the client. There are three
modes:
- "allow" is the default and follows the current behavior, where a
configured client certificate is sent if the server requests one
(via one of its default locations or sslcert). With the current
implementation, will happen whenever TLS is negotiated.
- "disable" causes the client to refuse to send a client certificate
even if sslcert is configured or if a client certificate is available in
one of its default locations.
- "require" causes the client to fail if a client certificate is never
sent and the server opens a connection anyway. This doesn't add any
additional security, since there is no guarantee that the server is
validating the certificate correctly, but it may helpful to troubleshoot
more complicated TLS setups.
sslcertmode=require requires SSL_CTX_set_cert_cb(), available since
OpenSSL 1.0.2. Note that LibreSSL does not include it.
Using a connection parameter different than require_auth has come up as
the simplest design because certificate authentication does not rely
directly on any of the AUTH_REQ_* codes, and one may want to require a
certificate to be sent in combination of a given authentication method,
like SCRAM-SHA-256.
TAP tests are added in src/test/ssl/, some of them relying on sslinfo to
check if a certificate has been set. These are compatible across all
the versions of OpenSSL supported on HEAD (currently down to 1.0.1).
Author: Jacob Champion
Reviewed-by: Aleksander Alekseev, Peter Eisentraut, David G. Johnston,
Michael Paquier
Discussion: https://postgr.es/m/9e5a8ccddb8355ea9fa4b75a1e3a9edc88a70cd3.camel@vmware.com
2023-03-24 05:34:26 +01:00
|
|
|
HAVE_SSL_CTX_SET_CERT_CB => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_STDBOOL_H => 1,
|
2020-02-21 22:14:09 +01:00
|
|
|
HAVE_STDINT_H => 1,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_STDLIB_H => 1,
|
|
|
|
HAVE_STRCHRNUL => undef,
|
|
|
|
HAVE_STRERROR_R => undef,
|
|
|
|
HAVE_STRINGS_H => undef,
|
|
|
|
HAVE_STRING_H => 1,
|
|
|
|
HAVE_STRLCAT => undef,
|
|
|
|
HAVE_STRLCPY => undef,
|
|
|
|
HAVE_STRNLEN => 1,
|
|
|
|
HAVE_STRSIGNAL => undef,
|
|
|
|
HAVE_STRUCT_OPTION => undef,
|
|
|
|
HAVE_STRUCT_SOCKADDR_SA_LEN => undef,
|
|
|
|
HAVE_STRUCT_TM_TM_ZONE => undef,
|
|
|
|
HAVE_SYNC_FILE_RANGE => undef,
|
2021-03-19 23:46:32 +01:00
|
|
|
HAVE_SYNCFS => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_SYSLOG => undef,
|
|
|
|
HAVE_SYS_EPOLL_H => undef,
|
Add kqueue(2) support to the WaitEventSet API.
Use kevent(2) to wait for events on the BSD family of operating
systems and macOS. This is similar to the epoll(2) support added
for Linux by commit 98a64d0bd.
Author: Thomas Munro
Reviewed-by: Andres Freund, Marko Tiikkaja, Tom Lane
Tested-by: Mateusz Guzik, Matteo Beccati, Keith Fiske, Heikki Linnakangas, Michael Paquier, Peter Eisentraut, Rui DeSousa, Tom Lane, Mark Wong
Discussion: https://postgr.es/m/CAEepm%3D37oF84-iXDTQ9MrGjENwVGds%2B5zTr38ca73kWR7ez_tA%40mail.gmail.com
2020-02-05 05:35:57 +01:00
|
|
|
HAVE_SYS_EVENT_H => undef,
|
2022-01-10 11:54:11 +01:00
|
|
|
HAVE_SYS_PERSONALITY_H => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_SYS_PRCTL_H => undef,
|
|
|
|
HAVE_SYS_PROCCTL_H => undef,
|
2022-02-09 20:24:54 +01:00
|
|
|
HAVE_SYS_SIGNALFD_H => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_SYS_STAT_H => 1,
|
|
|
|
HAVE_SYS_TYPES_H => 1,
|
|
|
|
HAVE_SYS_UCRED_H => undef,
|
|
|
|
HAVE_TERMIOS_H => undef,
|
|
|
|
HAVE_TYPEOF => undef,
|
|
|
|
HAVE_UCRED_H => undef,
|
|
|
|
HAVE_UINT64 => undef,
|
|
|
|
HAVE_UINT8 => undef,
|
|
|
|
HAVE_UNION_SEMUN => undef,
|
|
|
|
HAVE_UNISTD_H => 1,
|
|
|
|
HAVE_USELOCALE => undef,
|
|
|
|
HAVE_UUID_BSD => undef,
|
|
|
|
HAVE_UUID_E2FS => undef,
|
|
|
|
HAVE_UUID_OSSP => undef,
|
|
|
|
HAVE_UUID_H => undef,
|
|
|
|
HAVE_UUID_UUID_H => undef,
|
2023-07-09 01:55:03 +02:00
|
|
|
HAVE_WCSTOMBS_L => undef,
|
Default to hidden visibility for extension libraries where possible
Until now postgres built extension libraries with global visibility, i.e.
exporting all symbols. On the one platform where that behavior is not
natively available, namely windows, we emulate it by analyzing the input files
to the shared library and exporting all the symbols therein.
Not exporting all symbols is actually desirable, as it can improve loading
speed, reduces the likelihood of symbol conflicts and can improve intra
extension library function call performance. It also makes the non-windows
builds more similar to windows builds.
Additionally, with meson implementing the export-all-symbols behavior for
windows, turns out to be more verbose than desirable.
This patch adds support for hiding symbols by default and, to counteract that,
explicit symbol visibility annotation for compilers that support
__attribute__((visibility("default"))) and -fvisibility=hidden. That is
expected to be most, if not all, compilers except msvc (for which we already
support explicit symbol export annotations).
Now that extension library symbols are explicitly exported, we don't need to
export all symbols on windows anymore, hence remove that behavior from
src/tools/msvc. The supporting code can't be removed, as we still need to
export all symbols from the main postgres binary.
Author: Andres Freund <andres@anarazel.de>
Author: Tom Lane <tgl@sss.pgh.pa.us>
Discussion: https://postgr.es/m/20211101020311.av6hphdl6xbjbuif@alap3.anarazel.de
2022-07-18 02:49:51 +02:00
|
|
|
HAVE_VISIBILITY_ATTRIBUTE => undef,
|
Fix handling of SCRAM-SHA-256's channel binding with RSA-PSS certificates
OpenSSL 1.1.1 and newer versions have added support for RSA-PSS
certificates, which requires the use of a specific routine in OpenSSL to
determine which hash function to use when compiling it when using
channel binding in SCRAM-SHA-256. X509_get_signature_nid(), that is the
original routine the channel binding code has relied on, is not able to
determine which hash algorithm to use for such certificates. However,
X509_get_signature_info(), new to OpenSSL 1.1.1, is able to do it. This
commit switches the channel binding logic to rely on
X509_get_signature_info() over X509_get_signature_nid(), which would be
the choice when building with 1.1.1 or newer.
The error could have been triggered on the client or the server, hence
libpq and the backend need to have their related code paths patched.
Note that attempting to load an RSA-PSS certificate with OpenSSL 1.1.0
or older leads to a failure due to an unsupported algorithm.
The discovery of relying on X509_get_signature_info() comes from Jacob,
the tests have been written by Heikki (with few tweaks from me), while I
have bundled the whole together while adding the bits needed for MSVC
and meson.
This issue exists since channel binding exists, so backpatch all the way
down. Some tests are added in 15~, triggered if compiling with OpenSSL
1.1.1 or newer, where the certificate and key files can easily be
generated for RSA-PSS.
Reported-by: Gunnar "Nick" Bluth
Author: Jacob Champion, Heikki Linnakangas
Discussion: https://postgr.es/m/17760-b6c61e752ec07060@postgresql.org
Backpatch-through: 11
2023-02-15 02:12:16 +01:00
|
|
|
HAVE_X509_GET_SIGNATURE_INFO => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE_X86_64_POPCNTQ => undef,
|
|
|
|
HAVE__BOOL => undef,
|
|
|
|
HAVE__BUILTIN_BSWAP16 => undef,
|
|
|
|
HAVE__BUILTIN_BSWAP32 => undef,
|
|
|
|
HAVE__BUILTIN_BSWAP64 => undef,
|
|
|
|
HAVE__BUILTIN_CLZ => undef,
|
|
|
|
HAVE__BUILTIN_CONSTANT_P => undef,
|
|
|
|
HAVE__BUILTIN_CTZ => undef,
|
2022-02-18 04:45:34 +01:00
|
|
|
HAVE__BUILTIN_FRAME_ADDRESS => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
HAVE__BUILTIN_OP_OVERFLOW => undef,
|
|
|
|
HAVE__BUILTIN_POPCOUNT => undef,
|
|
|
|
HAVE__BUILTIN_TYPES_COMPATIBLE_P => undef,
|
|
|
|
HAVE__BUILTIN_UNREACHABLE => undef,
|
|
|
|
HAVE__CONFIGTHREADLOCALE => 1,
|
|
|
|
HAVE__CPUID => 1,
|
|
|
|
HAVE__GET_CPUID => undef,
|
|
|
|
HAVE__STATIC_ASSERT => undef,
|
|
|
|
INT64_MODIFIER => qq{"ll"},
|
|
|
|
LOCALE_T_IN_XLOCALE => undef,
|
|
|
|
MAXIMUM_ALIGNOF => 8,
|
|
|
|
MEMSET_LOOP_LIMIT => 1024,
|
2020-07-19 12:14:42 +02:00
|
|
|
OPENSSL_API_COMPAT => $openssl_api_compat,
|
2019-12-20 08:54:42 +01:00
|
|
|
PACKAGE_BUGREPORT => qq{"$package_bugreport"},
|
|
|
|
PACKAGE_NAME => qq{"$package_name"},
|
|
|
|
PACKAGE_STRING => qq{"$package_name $package_version"},
|
|
|
|
PACKAGE_TARNAME => lc qq{"$package_name"},
|
2020-02-28 08:54:49 +01:00
|
|
|
PACKAGE_URL => qq{"$package_url"},
|
2019-12-20 08:54:42 +01:00
|
|
|
PACKAGE_VERSION => qq{"$package_version"},
|
|
|
|
PG_INT128_TYPE => undef,
|
|
|
|
PG_INT64_TYPE => 'long long int',
|
|
|
|
PG_KRB_SRVNAM => qq{"postgres"},
|
2020-03-10 11:20:38 +01:00
|
|
|
PG_MAJORVERSION => qq{"$majorver"},
|
|
|
|
PG_MAJORVERSION_NUM => $majorver,
|
|
|
|
PG_MINORVERSION_NUM => $minorver,
|
2019-12-20 08:54:42 +01:00
|
|
|
PG_PRINTF_ATTRIBUTE => undef,
|
|
|
|
PG_USE_STDBOOL => 1,
|
|
|
|
PG_VERSION => qq{"$package_version$extraver"},
|
2020-03-10 11:20:38 +01:00
|
|
|
PG_VERSION_NUM => sprintf("%d%04d", $majorver, $minorver),
|
2019-12-20 08:54:42 +01:00
|
|
|
PG_VERSION_STR =>
|
|
|
|
qq{"PostgreSQL $package_version$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"},
|
|
|
|
PROFILE_PID_DIR => undef,
|
|
|
|
PTHREAD_CREATE_JOINABLE => undef,
|
|
|
|
RELSEG_SIZE => (1024 / $self->{options}->{blocksize}) *
|
|
|
|
$self->{options}->{segsize} * 1024,
|
|
|
|
SIZEOF_BOOL => 1,
|
|
|
|
SIZEOF_LONG => 4,
|
|
|
|
SIZEOF_OFF_T => undef,
|
|
|
|
SIZEOF_SIZE_T => $bits / 8,
|
|
|
|
SIZEOF_VOID_P => $bits / 8,
|
|
|
|
STDC_HEADERS => 1,
|
|
|
|
STRERROR_R_INT => undef,
|
|
|
|
USE_ARMV8_CRC32C => undef,
|
|
|
|
USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK => undef,
|
|
|
|
USE_ASSERT_CHECKING => $self->{options}->{asserts} ? 1 : undef,
|
|
|
|
USE_BONJOUR => undef,
|
|
|
|
USE_BSD_AUTH => undef,
|
|
|
|
USE_ICU => $self->{options}->{icu} ? 1 : undef,
|
|
|
|
USE_LIBXML => undef,
|
|
|
|
USE_LIBXSLT => undef,
|
Allow configurable LZ4 TOAST compression.
There is now a per-column COMPRESSION option which can be set to pglz
(the default, and the only option in up until now) or lz4. Or, if you
like, you can set the new default_toast_compression GUC to lz4, and
then that will be the default for new table columns for which no value
is specified. We don't have lz4 support in the PostgreSQL code, so
to use lz4 compression, PostgreSQL must be built --with-lz4.
In general, TOAST compression means compression of individual column
values, not the whole tuple, and those values can either be compressed
inline within the tuple or compressed and then stored externally in
the TOAST table, so those properties also apply to this feature.
Prior to this commit, a TOAST pointer has two unused bits as part of
the va_extsize field, and a compessed datum has two unused bits as
part of the va_rawsize field. These bits are unused because the length
of a varlena is limited to 1GB; we now use them to indicate the
compression type that was used. This means we only have bit space for
2 more built-in compresison types, but we could work around that
problem, if necessary, by introducing a new vartag_external value for
any further types we end up wanting to add. Hopefully, it won't be
too important to offer a wide selection of algorithms here, since
each one we add not only takes more coding but also adds a build
dependency for every packager. Nevertheless, it seems worth doing
at least this much, because LZ4 gets better compression than PGLZ
with less CPU usage.
It's possible for LZ4-compressed datums to leak into composite type
values stored on disk, just as it is for PGLZ. It's also possible for
LZ4-compressed attributes to be copied into a different table via SQL
commands such as CREATE TABLE AS or INSERT .. SELECT. It would be
expensive to force such values to be decompressed, so PostgreSQL has
never done so. For the same reasons, we also don't force recompression
of already-compressed values even if the target table prefers a
different compression method than was used for the source data. These
architectural decisions are perhaps arguable but revisiting them is
well beyond the scope of what seemed possible to do as part of this
project. However, it's relatively cheap to recompress as part of
VACUUM FULL or CLUSTER, so this commit adjusts those commands to do
so, if the configured compression method of the table happens not to
match what was used for some column value stored therein.
Dilip Kumar. The original patches on which this work was based were
written by Ildus Kurbangaliev, and those were patches were based on
even earlier work by Nikita Glukhov, but the design has since changed
very substantially, since allow a potentially large number of
compression methods that could be added and dropped on a running
system proved too problematic given some of the architectural issues
mentioned above; the choice of which specific compression method to
add first is now different; and a lot of the code has been heavily
refactored. More recently, Justin Przyby helped quite a bit with
testing and reviewing and this version also includes some code
contributions from him. Other design input and review from Tomas
Vondra, Álvaro Herrera, Andres Freund, Oleg Bartunov, Alexander
Korotkov, and me.
Discussion: http://postgr.es/m/20170907194236.4cefce96%40wp.localdomain
Discussion: http://postgr.es/m/CAFiTN-uUpX3ck%3DK0mLEk-G_kUQY%3DSNOTeqdaNRR9FMdQrHKebw%40mail.gmail.com
2021-03-19 20:10:38 +01:00
|
|
|
USE_LZ4 => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
USE_LDAP => $self->{options}->{ldap} ? 1 : undef,
|
|
|
|
USE_LLVM => undef,
|
2023-08-10 13:37:46 +02:00
|
|
|
USE_LOONGARCH_CRC32C => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
USE_NAMED_POSIX_SEMAPHORES => undef,
|
|
|
|
USE_OPENSSL => undef,
|
|
|
|
USE_PAM => undef,
|
|
|
|
USE_SLICING_BY_8_CRC32C => undef,
|
|
|
|
USE_SSE42_CRC32C => undef,
|
|
|
|
USE_SSE42_CRC32C_WITH_RUNTIME_CHECK => 1,
|
|
|
|
USE_SYSTEMD => undef,
|
|
|
|
USE_SYSV_SEMAPHORES => undef,
|
|
|
|
USE_SYSV_SHARED_MEMORY => undef,
|
|
|
|
USE_UNNAMED_POSIX_SEMAPHORES => undef,
|
|
|
|
USE_WIN32_SEMAPHORES => 1,
|
|
|
|
USE_WIN32_SHARED_MEMORY => 1,
|
2022-02-18 19:40:31 +01:00
|
|
|
USE_ZSTD => undef,
|
2019-12-20 08:54:42 +01:00
|
|
|
WCSTOMBS_L_IN_XLOCALE => undef,
|
|
|
|
WORDS_BIGENDIAN => undef,
|
|
|
|
XLOG_BLCKSZ => 1024 * $self->{options}->{wal_blocksize},
|
|
|
|
_FILE_OFFSET_BITS => undef,
|
|
|
|
_LARGEFILE_SOURCE => undef,
|
|
|
|
_LARGE_FILES => undef,
|
|
|
|
inline => '__inline',
|
|
|
|
pg_restrict => '__restrict',
|
|
|
|
# not defined, because it'd conflict with __declspec(restrict)
|
|
|
|
restrict => undef,
|
2020-02-21 09:14:03 +01:00
|
|
|
typeof => undef,);
|
2019-12-20 08:54:42 +01:00
|
|
|
|
|
|
|
if ($self->{options}->{uuid})
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2019-12-20 08:54:42 +01:00
|
|
|
$define{HAVE_UUID_OSSP} = 1;
|
|
|
|
$define{HAVE_UUID_H} = 1;
|
|
|
|
}
|
|
|
|
if ($self->{options}->{xml})
|
|
|
|
{
|
|
|
|
$define{HAVE_LIBXML2} = 1;
|
|
|
|
$define{USE_LIBXML} = 1;
|
|
|
|
}
|
|
|
|
if ($self->{options}->{xslt})
|
|
|
|
{
|
|
|
|
$define{HAVE_LIBXSLT} = 1;
|
|
|
|
$define{USE_LIBXSLT} = 1;
|
|
|
|
}
|
2021-05-11 03:43:05 +02:00
|
|
|
if ($self->{options}->{lz4})
|
|
|
|
{
|
|
|
|
$define{HAVE_LIBLZ4} = 1;
|
|
|
|
$define{USE_LZ4} = 1;
|
|
|
|
}
|
2022-02-18 19:40:31 +01:00
|
|
|
if ($self->{options}->{zstd})
|
|
|
|
{
|
|
|
|
$define{HAVE_LIBZSTD} = 1;
|
|
|
|
$define{USE_ZSTD} = 1;
|
|
|
|
}
|
2019-12-20 08:54:42 +01:00
|
|
|
if ($self->{options}->{openssl})
|
|
|
|
{
|
|
|
|
$define{USE_OPENSSL} = 1;
|
2023-07-03 06:20:27 +02:00
|
|
|
$define{HAVE_SSL_CTX_SET_CERT_CB} = 1;
|
2019-06-26 03:44:46 +02:00
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
my ($digit1, $digit2, $digit3) = $self->GetOpenSSLVersion();
|
2019-06-26 03:44:46 +02:00
|
|
|
|
Fix handling of SCRAM-SHA-256's channel binding with RSA-PSS certificates
OpenSSL 1.1.1 and newer versions have added support for RSA-PSS
certificates, which requires the use of a specific routine in OpenSSL to
determine which hash function to use when compiling it when using
channel binding in SCRAM-SHA-256. X509_get_signature_nid(), that is the
original routine the channel binding code has relied on, is not able to
determine which hash algorithm to use for such certificates. However,
X509_get_signature_info(), new to OpenSSL 1.1.1, is able to do it. This
commit switches the channel binding logic to rely on
X509_get_signature_info() over X509_get_signature_nid(), which would be
the choice when building with 1.1.1 or newer.
The error could have been triggered on the client or the server, hence
libpq and the backend need to have their related code paths patched.
Note that attempting to load an RSA-PSS certificate with OpenSSL 1.1.0
or older leads to a failure due to an unsupported algorithm.
The discovery of relying on X509_get_signature_info() comes from Jacob,
the tests have been written by Heikki (with few tweaks from me), while I
have bundled the whole together while adding the bits needed for MSVC
and meson.
This issue exists since channel binding exists, so backpatch all the way
down. Some tests are added in 15~, triggered if compiling with OpenSSL
1.1.1 or newer, where the certificate and key files can easily be
generated for RSA-PSS.
Reported-by: Gunnar "Nick" Bluth
Author: Jacob Champion, Heikki Linnakangas
Discussion: https://postgr.es/m/17760-b6c61e752ec07060@postgresql.org
Backpatch-through: 11
2023-02-15 02:12:16 +01:00
|
|
|
# Symbols needed with OpenSSL 1.1.1 and above.
|
|
|
|
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|
|
|
|
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '1'))
|
|
|
|
{
|
|
|
|
$define{HAVE_X509_GET_SIGNATURE_INFO} = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
# Symbols needed with OpenSSL 1.1.0 and above.
|
2021-10-20 09:48:24 +02:00
|
|
|
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|
|
|
|
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0'))
|
2017-06-12 17:05:20 +02:00
|
|
|
{
|
2019-12-20 08:54:42 +01:00
|
|
|
$define{HAVE_ASN1_STRING_GET0_DATA} = 1;
|
|
|
|
$define{HAVE_BIO_METH_NEW} = 1;
|
Refactor HMAC implementations
Similarly to the cryptohash implementations, this refactors the existing
HMAC code into a single set of APIs that can be plugged with any crypto
libraries PostgreSQL is built with (only OpenSSL currently). If there
is no such libraries, a fallback implementation is available. Those new
APIs are designed similarly to the existing cryptohash layer, so there
is no real new design here, with the same logic around buffer bound
checks and memory handling.
HMAC has a dependency on cryptohashes, so all the cryptohash types
supported by cryptohash{_openssl}.c can be used with HMAC. This
refactoring is an advantage mainly for SCRAM, that included its own
implementation of HMAC with SHA256 without relying on the existing
crypto libraries even if PostgreSQL was built with their support.
This code has been tested on Windows and Linux, with and without
OpenSSL, across all the versions supported on HEAD from 1.1.1 down to
1.0.1. I have also checked that the implementations are working fine
using some sample results, a custom extension of my own, and doing
cross-checks across different major versions with SCRAM with the client
and the backend.
Author: Michael Paquier
Reviewed-by: Bruce Momjian
Discussion: https://postgr.es/m/X9m0nkEJEzIPXjeZ@paquier.xyz
2021-04-03 10:30:49 +02:00
|
|
|
$define{HAVE_HMAC_CTX_FREE} = 1;
|
|
|
|
$define{HAVE_HMAC_CTX_NEW} = 1;
|
2019-12-20 08:54:42 +01:00
|
|
|
$define{HAVE_OPENSSL_INIT_SSL} = 1;
|
2017-06-12 17:05:20 +02:00
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
$self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1);
|
|
|
|
$self->GenerateConfigHeader('src/include/pg_config_ext.h', \%define, 0);
|
|
|
|
$self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h',
|
|
|
|
\%define, 0);
|
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
$self->GenerateDefFile(
|
2015-04-25 14:52:03 +02:00
|
|
|
"src/interfaces/libpq/libpqdll.def",
|
|
|
|
"src/interfaces/libpq/exports.txt",
|
2012-07-05 03:47:49 +02:00
|
|
|
"LIBPQ");
|
2010-04-09 15:05:58 +02:00
|
|
|
$self->GenerateDefFile(
|
2015-04-25 14:52:03 +02:00
|
|
|
"src/interfaces/ecpg/ecpglib/ecpglib.def",
|
|
|
|
"src/interfaces/ecpg/ecpglib/exports.txt",
|
2012-07-05 03:47:49 +02:00
|
|
|
"LIBECPG");
|
2010-04-09 15:05:58 +02:00
|
|
|
$self->GenerateDefFile(
|
2015-04-25 14:52:03 +02:00
|
|
|
"src/interfaces/ecpg/compatlib/compatlib.def",
|
|
|
|
"src/interfaces/ecpg/compatlib/exports.txt",
|
2012-07-05 03:47:49 +02:00
|
|
|
"LIBECPG_COMPAT");
|
2010-04-09 15:05:58 +02:00
|
|
|
$self->GenerateDefFile(
|
2015-04-25 14:52:03 +02:00
|
|
|
"src/interfaces/ecpg/pgtypeslib/pgtypeslib.def",
|
|
|
|
"src/interfaces/ecpg/pgtypeslib/exports.txt",
|
2012-07-05 03:47:49 +02:00
|
|
|
"LIBPGTYPES");
|
2012-06-10 21:20:04 +02:00
|
|
|
|
Replace our traditional initial-catalog-data format with a better design.
Historically, the initial catalog data to be installed during bootstrap
has been written in DATA() lines in the catalog header files. This had
lots of disadvantages: the format was badly underdocumented, it was
very difficult to edit the data in any mechanized way, and due to the
lack of any abstraction the data was verbose, hard to read/understand,
and easy to get wrong.
Hence, move this data into separate ".dat" files and represent it in a way
that can easily be read and rewritten by Perl scripts. The new format is
essentially "key => value" for each column; while it's a bit repetitive,
explicit labeling of each value makes the data far more readable and less
error-prone. Provide a way to abbreviate entries by omitting field values
that match a specified default value for their column. This allows removal
of a large amount of repetitive boilerplate and also lowers the barrier to
adding new columns.
Also teach genbki.pl how to translate symbolic OID references into
numeric OIDs for more cases than just "regproc"-like pg_proc references.
It can now do that for regprocedure-like references (thus solving the
problem that regproc is ambiguous for overloaded functions), operators,
types, opfamilies, opclasses, and access methods. Use this to turn
nearly all OID cross-references in the initial data into symbolic form.
This represents a very large step forward in readability and error
resistance of the initial catalog data. It should also reduce the
difficulty of renumbering OID assignments in uncommitted patches.
Also, solve the longstanding problem that frontend code that would like to
use OID macros and other information from the catalog headers often had
difficulty with backend-only code in the headers. To do this, arrange for
all generated macros, plus such other declarations as we deem fit, to be
placed in "derived" header files that are safe for frontend inclusion.
(Once clients migrate to using these pg_*_d.h headers, it will be possible
to get rid of the pg_*_fn.h headers, which only exist to quarantine code
away from clients. That is left for follow-on patches, however.)
The now-automatically-generated macros include the Anum_xxx and Natts_xxx
constants that we used to have to update by hand when adding or removing
catalog columns.
Replace the former manual method of generating OID macros for pg_type
entries with an automatic method, ensuring that all built-in types have
OID macros. (But note that this patch does not change the way that
OID macros for pg_proc entries are built and used. It's not clear that
making that match the other catalogs would be worth extra code churn.)
Add SGML documentation explaining what the new data format is and how to
work with it.
Despite being a very large change in the catalog headers, there is no
catversion bump here, because postgres.bki and related output files
haven't changed at all.
John Naylor, based on ideas from various people; review and minor
additional coding by me; previous review by Alvaro Herrera
Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
|
|
|
chdir('src/backend/utils');
|
|
|
|
my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
|
2018-05-04 00:47:41 +02:00
|
|
|
if ( IsNewer('fmgr-stamp', 'Gen_fmgrtab.pl')
|
|
|
|
|| IsNewer('fmgr-stamp', '../catalog/Catalog.pm')
|
|
|
|
|| IsNewer('fmgr-stamp', $pg_proc_dat)
|
|
|
|
|| IsNewer('fmgr-stamp', '../../../src/include/access/transam.h'))
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2012-07-05 03:47:49 +02:00
|
|
|
system(
|
2019-02-12 22:29:26 +01:00
|
|
|
"perl -I ../catalog Gen_fmgrtab.pl --include-path ../../../src/include/ $pg_proc_dat"
|
Replace our traditional initial-catalog-data format with a better design.
Historically, the initial catalog data to be installed during bootstrap
has been written in DATA() lines in the catalog header files. This had
lots of disadvantages: the format was badly underdocumented, it was
very difficult to edit the data in any mechanized way, and due to the
lack of any abstraction the data was verbose, hard to read/understand,
and easy to get wrong.
Hence, move this data into separate ".dat" files and represent it in a way
that can easily be read and rewritten by Perl scripts. The new format is
essentially "key => value" for each column; while it's a bit repetitive,
explicit labeling of each value makes the data far more readable and less
error-prone. Provide a way to abbreviate entries by omitting field values
that match a specified default value for their column. This allows removal
of a large amount of repetitive boilerplate and also lowers the barrier to
adding new columns.
Also teach genbki.pl how to translate symbolic OID references into
numeric OIDs for more cases than just "regproc"-like pg_proc references.
It can now do that for regprocedure-like references (thus solving the
problem that regproc is ambiguous for overloaded functions), operators,
types, opfamilies, opclasses, and access methods. Use this to turn
nearly all OID cross-references in the initial data into symbolic form.
This represents a very large step forward in readability and error
resistance of the initial catalog data. It should also reduce the
difficulty of renumbering OID assignments in uncommitted patches.
Also, solve the longstanding problem that frontend code that would like to
use OID macros and other information from the catalog headers often had
difficulty with backend-only code in the headers. To do this, arrange for
all generated macros, plus such other declarations as we deem fit, to be
placed in "derived" header files that are safe for frontend inclusion.
(Once clients migrate to using these pg_*_d.h headers, it will be possible
to get rid of the pg_*_fn.h headers, which only exist to quarantine code
away from clients. That is left for follow-on patches, however.)
The now-automatically-generated macros include the Anum_xxx and Natts_xxx
constants that we used to have to update by hand when adding or removing
catalog columns.
Replace the former manual method of generating OID macros for pg_type
entries with an automatic method, ensuring that all built-in types have
OID macros. (But note that this patch does not change the way that
OID macros for pg_proc entries are built and used. It's not clear that
making that match the other catalogs would be worth extra code churn.)
Add SGML documentation explaining what the new data format is and how to
work with it.
Despite being a very large change in the catalog headers, there is no
catversion bump here, because postgres.bki and related output files
haven't changed at all.
John Naylor, based on ideas from various people; review and minor
additional coding by me; previous review by Alvaro Herrera
Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
|
|
|
);
|
2018-05-04 00:47:41 +02:00
|
|
|
open(my $f, '>', 'fmgr-stamp')
|
|
|
|
|| confess "Could not touch fmgr-stamp";
|
|
|
|
close($f);
|
2012-12-16 14:56:51 +01:00
|
|
|
}
|
Replace our traditional initial-catalog-data format with a better design.
Historically, the initial catalog data to be installed during bootstrap
has been written in DATA() lines in the catalog header files. This had
lots of disadvantages: the format was badly underdocumented, it was
very difficult to edit the data in any mechanized way, and due to the
lack of any abstraction the data was verbose, hard to read/understand,
and easy to get wrong.
Hence, move this data into separate ".dat" files and represent it in a way
that can easily be read and rewritten by Perl scripts. The new format is
essentially "key => value" for each column; while it's a bit repetitive,
explicit labeling of each value makes the data far more readable and less
error-prone. Provide a way to abbreviate entries by omitting field values
that match a specified default value for their column. This allows removal
of a large amount of repetitive boilerplate and also lowers the barrier to
adding new columns.
Also teach genbki.pl how to translate symbolic OID references into
numeric OIDs for more cases than just "regproc"-like pg_proc references.
It can now do that for regprocedure-like references (thus solving the
problem that regproc is ambiguous for overloaded functions), operators,
types, opfamilies, opclasses, and access methods. Use this to turn
nearly all OID cross-references in the initial data into symbolic form.
This represents a very large step forward in readability and error
resistance of the initial catalog data. It should also reduce the
difficulty of renumbering OID assignments in uncommitted patches.
Also, solve the longstanding problem that frontend code that would like to
use OID macros and other information from the catalog headers often had
difficulty with backend-only code in the headers. To do this, arrange for
all generated macros, plus such other declarations as we deem fit, to be
placed in "derived" header files that are safe for frontend inclusion.
(Once clients migrate to using these pg_*_d.h headers, it will be possible
to get rid of the pg_*_fn.h headers, which only exist to quarantine code
away from clients. That is left for follow-on patches, however.)
The now-automatically-generated macros include the Anum_xxx and Natts_xxx
constants that we used to have to update by hand when adding or removing
catalog columns.
Replace the former manual method of generating OID macros for pg_type
entries with an automatic method, ensuring that all built-in types have
OID macros. (But note that this patch does not change the way that
OID macros for pg_proc entries are built and used. It's not clear that
making that match the other catalogs would be worth extra code churn.)
Add SGML documentation explaining what the new data format is and how to
work with it.
Despite being a very large change in the catalog headers, there is no
catversion bump here, because postgres.bki and related output files
haven't changed at all.
John Naylor, based on ideas from various people; review and minor
additional coding by me; previous review by Alvaro Herrera
Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
|
|
|
chdir('../../..');
|
|
|
|
|
2012-12-16 14:56:51 +01:00
|
|
|
if (IsNewer(
|
2015-04-25 14:52:03 +02:00
|
|
|
'src/include/utils/fmgroids.h',
|
|
|
|
'src/backend/utils/fmgroids.h'))
|
2012-12-16 14:56:51 +01:00
|
|
|
{
|
2015-04-25 14:52:03 +02:00
|
|
|
copyFile('src/backend/utils/fmgroids.h',
|
|
|
|
'src/include/utils/fmgroids.h');
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2016-12-28 18:00:00 +01:00
|
|
|
if (IsNewer(
|
|
|
|
'src/include/utils/fmgrprotos.h',
|
|
|
|
'src/backend/utils/fmgrprotos.h'))
|
|
|
|
{
|
|
|
|
copyFile(
|
|
|
|
'src/backend/utils/fmgrprotos.h',
|
|
|
|
'src/include/utils/fmgrprotos.h');
|
|
|
|
}
|
|
|
|
|
2015-09-11 19:58:28 +02:00
|
|
|
if (IsNewer(
|
|
|
|
'src/include/storage/lwlocknames.h',
|
|
|
|
'src/backend/storage/lmgr/lwlocknames.txt'))
|
|
|
|
{
|
|
|
|
print "Generating lwlocknames.c and lwlocknames.h...\n";
|
2022-07-18 21:13:14 +02:00
|
|
|
my $lmgr = 'src/backend/storage/lmgr';
|
|
|
|
system(
|
|
|
|
"perl $lmgr/generate-lwlocknames.pl --outdir $lmgr $lmgr/lwlocknames.txt"
|
|
|
|
);
|
2015-09-11 19:58:28 +02:00
|
|
|
}
|
|
|
|
if (IsNewer(
|
|
|
|
'src/include/storage/lwlocknames.h',
|
|
|
|
'src/backend/storage/lmgr/lwlocknames.h'))
|
|
|
|
{
|
|
|
|
copyFile(
|
|
|
|
'src/backend/storage/lmgr/lwlocknames.h',
|
|
|
|
'src/include/storage/lwlocknames.h');
|
|
|
|
}
|
|
|
|
|
Generate automatically code and documentation related to wait events
The documentation and the code is generated automatically from a new
file called wait_event_names.txt, formatted in sections dedicated to
each wait event class (Timeout, Lock, IO, etc.) with three tab-separated
fields:
- C symbol in enums
- Format in the system views
- Description in the docs
Using this approach has several advantages, as we have proved to be
rather bad in maintaining this area of the tree across the years:
- The order of each item in the documentation and the code, which should
be alphabetical, has become incorrect multiple times, and the script
generating the code and documentation has a few rules to enforce that,
making the maintenance a no-brainer.
- Some wait events were added to the code, but not documented, so this
cannot be missed now.
- The order of the tables for each wait event class is enforced in the
documentation (the input .txt file does so as well for clarity, though
this is not mandatory).
- Less code, shaving 1.2k lines from the tree, with 1/3 of the savings
coming from the code, the rest from the documentation.
The wait event types "Lock" and "LWLock" still have their own code path
for their code, hence only the documentation is created for them. These
classes are listed with a special marker called WAIT_EVENT_DOCONLY in
the input file.
Adding a new wait event now requires only an update of
wait_event_names.txt, with "Lock" and "LWLock" treated as exceptions.
This commit has been tested with configure/Makefile, the CI and VPATH
build. clean, distclean and maintainer-clean were working fine.
Author: Bertrand Drouvot, Michael Paquier
Discussion: https://postgr.es/m/77a86b3a-c4a8-5f5d-69b9-d70bbf2e9b98@gmail.com
2023-07-05 03:53:11 +02:00
|
|
|
if (IsNewer(
|
|
|
|
'src/include/utils/wait_event_types.h',
|
|
|
|
'src/backend/utils/activity/wait_event_names.txt'))
|
|
|
|
{
|
Add system view pg_wait_events
This new view, wrapped around a SRF, shows some information known about
wait events, as of:
- Name.
- Type (Activity, I/O, Extension, etc.).
- Description.
All the information retrieved comes from wait_event_names.txt, and the
description is the same as the documentation with filters applied to
remove any XML markups. This view is useful when joined with
pg_stat_activity to get the description of a wait event reported.
Custom wait events for extensions are included in the view.
Original idea by Yves Colin.
Author: Bertrand Drouvot
Reviewed-by: Kyotaro Horiguchi, Masahiro Ikeda, Tom Lane, Michael
Paquier
Discussion: https://postgr.es/m/0e2ae164-dc89-03c3-cf7f-de86378053ac@gmail.com
2023-08-20 08:35:02 +02:00
|
|
|
print
|
|
|
|
"Generating pgstat_wait_event.c, wait_event_types.h and wait_event_funcs_data.c...\n";
|
Generate automatically code and documentation related to wait events
The documentation and the code is generated automatically from a new
file called wait_event_names.txt, formatted in sections dedicated to
each wait event class (Timeout, Lock, IO, etc.) with three tab-separated
fields:
- C symbol in enums
- Format in the system views
- Description in the docs
Using this approach has several advantages, as we have proved to be
rather bad in maintaining this area of the tree across the years:
- The order of each item in the documentation and the code, which should
be alphabetical, has become incorrect multiple times, and the script
generating the code and documentation has a few rules to enforce that,
making the maintenance a no-brainer.
- Some wait events were added to the code, but not documented, so this
cannot be missed now.
- The order of the tables for each wait event class is enforced in the
documentation (the input .txt file does so as well for clarity, though
this is not mandatory).
- Less code, shaving 1.2k lines from the tree, with 1/3 of the savings
coming from the code, the rest from the documentation.
The wait event types "Lock" and "LWLock" still have their own code path
for their code, hence only the documentation is created for them. These
classes are listed with a special marker called WAIT_EVENT_DOCONLY in
the input file.
Adding a new wait event now requires only an update of
wait_event_names.txt, with "Lock" and "LWLock" treated as exceptions.
This commit has been tested with configure/Makefile, the CI and VPATH
build. clean, distclean and maintainer-clean were working fine.
Author: Bertrand Drouvot, Michael Paquier
Discussion: https://postgr.es/m/77a86b3a-c4a8-5f5d-69b9-d70bbf2e9b98@gmail.com
2023-07-05 03:53:11 +02:00
|
|
|
my $activ = 'src/backend/utils/activity';
|
|
|
|
system(
|
|
|
|
"perl $activ/generate-wait_event_types.pl --outdir $activ --code $activ/wait_event_names.txt"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if (IsNewer(
|
|
|
|
'src/include/utils/wait_event_types.h',
|
|
|
|
'src/backend/utils/activity/wait_event_types.h'))
|
|
|
|
{
|
|
|
|
copyFile(
|
|
|
|
'src/backend/utils/activity/wait_event_types.h',
|
|
|
|
'src/include/utils/wait_event_types.h');
|
|
|
|
}
|
|
|
|
|
2015-04-25 14:52:03 +02:00
|
|
|
if (IsNewer('src/include/utils/probes.h', 'src/backend/utils/probes.d'))
|
2008-03-21 03:50:02 +01:00
|
|
|
{
|
2010-04-09 15:05:58 +02:00
|
|
|
print "Generating probes.h...\n";
|
|
|
|
system(
|
2023-11-14 09:47:07 +01:00
|
|
|
'perl -n src/backend/utils/Gen_dummy_probes.pl src/backend/utils/probes.d > src/include/utils/probes.h'
|
2010-04-09 15:05:58 +02:00
|
|
|
);
|
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2011-07-12 16:25:08 +02:00
|
|
|
if ($self->{options}->{python}
|
2012-07-05 03:47:49 +02:00
|
|
|
&& IsNewer(
|
2015-04-25 14:52:03 +02:00
|
|
|
'src/pl/plpython/spiexceptions.h',
|
2017-11-13 03:43:32 +01:00
|
|
|
'src/backend/utils/errcodes.txt'))
|
2011-02-28 17:41:10 +01:00
|
|
|
{
|
|
|
|
print "Generating spiexceptions.h...\n";
|
2011-07-12 16:25:08 +02:00
|
|
|
system(
|
2018-04-26 17:52:52 +02:00
|
|
|
'perl src/pl/plpython/generate-spiexceptions.pl src/backend/utils/errcodes.txt > src/pl/plpython/spiexceptions.h'
|
2011-07-12 16:25:08 +02:00
|
|
|
);
|
2011-02-28 17:41:10 +01:00
|
|
|
}
|
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
if (IsNewer(
|
2015-04-25 14:52:03 +02:00
|
|
|
'src/include/utils/errcodes.h',
|
|
|
|
'src/backend/utils/errcodes.txt'))
|
2011-02-04 04:32:49 +01:00
|
|
|
{
|
|
|
|
print "Generating errcodes.h...\n";
|
2011-07-12 16:25:08 +02:00
|
|
|
system(
|
2022-07-18 21:15:09 +02:00
|
|
|
'perl src/backend/utils/generate-errcodes.pl --outfile src/backend/utils/errcodes.h src/backend/utils/errcodes.txt'
|
2011-07-12 16:25:08 +02:00
|
|
|
);
|
2015-04-25 14:52:03 +02:00
|
|
|
copyFile('src/backend/utils/errcodes.h',
|
|
|
|
'src/include/utils/errcodes.h');
|
2011-02-04 04:32:49 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
if (IsNewer(
|
2015-04-25 14:52:03 +02:00
|
|
|
'src/pl/plpgsql/src/plerrcodes.h',
|
|
|
|
'src/backend/utils/errcodes.txt'))
|
2011-02-04 04:32:49 +01:00
|
|
|
{
|
|
|
|
print "Generating plerrcodes.h...\n";
|
2011-07-12 16:25:08 +02:00
|
|
|
system(
|
2018-04-26 17:52:52 +02:00
|
|
|
'perl src/pl/plpgsql/src/generate-plerrcodes.pl src/backend/utils/errcodes.txt > src/pl/plpgsql/src/plerrcodes.h'
|
2011-07-12 16:25:08 +02:00
|
|
|
);
|
2011-02-04 04:32:49 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2016-03-25 21:54:52 +01:00
|
|
|
if ($self->{options}->{tcl}
|
|
|
|
&& IsNewer(
|
|
|
|
'src/pl/tcl/pltclerrcodes.h', 'src/backend/utils/errcodes.txt'))
|
|
|
|
{
|
|
|
|
print "Generating pltclerrcodes.h...\n";
|
|
|
|
system(
|
2018-04-26 17:52:52 +02:00
|
|
|
'perl src/pl/tcl/generate-pltclerrcodes.pl src/backend/utils/errcodes.txt > src/pl/tcl/pltclerrcodes.h'
|
2016-03-25 21:54:52 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-04-08 14:26:19 +02:00
|
|
|
if (IsNewer(
|
|
|
|
'contrib/fuzzystrmatch/daitch_mokotoff.h',
|
|
|
|
'contrib/fuzzystrmatch/daitch_mokotoff_header.pl'))
|
|
|
|
{
|
|
|
|
print "Generating daitch_mokotoff.h...\n";
|
|
|
|
system( 'perl contrib/fuzzystrmatch/daitch_mokotoff_header.pl '
|
|
|
|
. 'contrib/fuzzystrmatch/daitch_mokotoff.h');
|
|
|
|
}
|
|
|
|
|
2015-04-25 14:52:03 +02:00
|
|
|
if (IsNewer('src/bin/psql/sql_help.h', 'src/bin/psql/create_help.pl'))
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
print "Generating sql_help.h...\n";
|
2022-07-18 20:57:31 +02:00
|
|
|
my $psql = 'src/bin/psql';
|
|
|
|
system(
|
|
|
|
"perl $psql/create_help.pl --docdir doc/src/sgml/ref --outdir $psql --basename sql_help"
|
|
|
|
);
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
Replace the data structure used for keyword lookup.
Previously, ScanKeywordLookup was passed an array of string pointers.
This had some performance deficiencies: the strings themselves might
be scattered all over the place depending on the compiler (and some
quick checking shows that at least with gcc-on-Linux, they indeed
weren't reliably close together). That led to very cache-unfriendly
behavior as the binary search touched strings in many different pages.
Also, depending on the platform, the string pointers might need to
be adjusted at program start, so that they couldn't be simple constant
data. And the ScanKeyword struct had been designed with an eye to
32-bit machines originally; on 64-bit it requires 16 bytes per
keyword, making it even more cache-unfriendly.
Redesign so that the keyword strings themselves are allocated
consecutively (as part of one big char-string constant), thereby
eliminating the touch-lots-of-unrelated-pages syndrome. And get
rid of the ScanKeyword array in favor of three separate arrays:
uint16 offsets into the keyword array, uint16 token codes, and
uint8 keyword categories. That reduces the overhead per keyword
to 5 bytes instead of 16 (even less in programs that only need
one of the token codes and categories); moreover, the binary search
only touches the offsets array, further reducing its cache footprint.
This also lets us put the token codes somewhere else than the
keyword strings are, which avoids some unpleasant build dependencies.
While we're at it, wrap the data used by ScanKeywordLookup into
a struct that can be treated as an opaque type by most callers.
That doesn't change things much right now, but it will make it
less painful to switch to a hash-based lookup method, as is being
discussed in the mailing list thread.
Most of the change here is associated with adding a generator
script that can build the new data structure from the same
list-of-PG_KEYWORD header representation we used before.
The PG_KEYWORD lists that plpgsql and ecpg used to embed in
their scanner .c files have to be moved into headers, and the
Makefiles have to be taught to invoke the generator script.
This work is also necessary if we're to consider hash-based lookup,
since the generator script is what would be responsible for
constructing a hash table.
Aside from saving a few kilobytes in each program that includes
the keyword table, this seems to speed up raw parsing (flex+bison)
by a few percent. So it's worth doing even as it stands, though
we think we can gain even more with a follow-on patch to switch
to hash-based lookup.
John Naylor, with further hacking by me
Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
|
|
|
if (IsNewer('src/common/kwlist_d.h', 'src/include/parser/kwlist.h'))
|
|
|
|
{
|
|
|
|
print "Generating kwlist_d.h...\n";
|
2019-01-10 01:47:38 +01:00
|
|
|
system(
|
|
|
|
'perl -I src/tools src/tools/gen_keywordlist.pl --extern -o src/common src/include/parser/kwlist.h'
|
|
|
|
);
|
Replace the data structure used for keyword lookup.
Previously, ScanKeywordLookup was passed an array of string pointers.
This had some performance deficiencies: the strings themselves might
be scattered all over the place depending on the compiler (and some
quick checking shows that at least with gcc-on-Linux, they indeed
weren't reliably close together). That led to very cache-unfriendly
behavior as the binary search touched strings in many different pages.
Also, depending on the platform, the string pointers might need to
be adjusted at program start, so that they couldn't be simple constant
data. And the ScanKeyword struct had been designed with an eye to
32-bit machines originally; on 64-bit it requires 16 bytes per
keyword, making it even more cache-unfriendly.
Redesign so that the keyword strings themselves are allocated
consecutively (as part of one big char-string constant), thereby
eliminating the touch-lots-of-unrelated-pages syndrome. And get
rid of the ScanKeyword array in favor of three separate arrays:
uint16 offsets into the keyword array, uint16 token codes, and
uint8 keyword categories. That reduces the overhead per keyword
to 5 bytes instead of 16 (even less in programs that only need
one of the token codes and categories); moreover, the binary search
only touches the offsets array, further reducing its cache footprint.
This also lets us put the token codes somewhere else than the
keyword strings are, which avoids some unpleasant build dependencies.
While we're at it, wrap the data used by ScanKeywordLookup into
a struct that can be treated as an opaque type by most callers.
That doesn't change things much right now, but it will make it
less painful to switch to a hash-based lookup method, as is being
discussed in the mailing list thread.
Most of the change here is associated with adding a generator
script that can build the new data structure from the same
list-of-PG_KEYWORD header representation we used before.
The PG_KEYWORD lists that plpgsql and ecpg used to embed in
their scanner .c files have to be moved into headers, and the
Makefiles have to be taught to invoke the generator script.
This work is also necessary if we're to consider hash-based lookup,
since the generator script is what would be responsible for
constructing a hash table.
Aside from saving a few kilobytes in each program that includes
the keyword table, this seems to speed up raw parsing (flex+bison)
by a few percent. So it's worth doing even as it stands, though
we think we can gain even more with a follow-on patch to switch
to hash-based lookup.
John Naylor, with further hacking by me
Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IsNewer(
|
|
|
|
'src/pl/plpgsql/src/pl_reserved_kwlist_d.h',
|
|
|
|
'src/pl/plpgsql/src/pl_reserved_kwlist.h')
|
|
|
|
|| IsNewer(
|
|
|
|
'src/pl/plpgsql/src/pl_unreserved_kwlist_d.h',
|
|
|
|
'src/pl/plpgsql/src/pl_unreserved_kwlist.h'))
|
|
|
|
{
|
|
|
|
print
|
|
|
|
"Generating pl_reserved_kwlist_d.h and pl_unreserved_kwlist_d.h...\n";
|
|
|
|
chdir('src/pl/plpgsql/src');
|
2019-01-10 01:47:38 +01:00
|
|
|
system(
|
|
|
|
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ReservedPLKeywords pl_reserved_kwlist.h'
|
|
|
|
);
|
|
|
|
system(
|
|
|
|
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname UnreservedPLKeywords pl_unreserved_kwlist.h'
|
|
|
|
);
|
Replace the data structure used for keyword lookup.
Previously, ScanKeywordLookup was passed an array of string pointers.
This had some performance deficiencies: the strings themselves might
be scattered all over the place depending on the compiler (and some
quick checking shows that at least with gcc-on-Linux, they indeed
weren't reliably close together). That led to very cache-unfriendly
behavior as the binary search touched strings in many different pages.
Also, depending on the platform, the string pointers might need to
be adjusted at program start, so that they couldn't be simple constant
data. And the ScanKeyword struct had been designed with an eye to
32-bit machines originally; on 64-bit it requires 16 bytes per
keyword, making it even more cache-unfriendly.
Redesign so that the keyword strings themselves are allocated
consecutively (as part of one big char-string constant), thereby
eliminating the touch-lots-of-unrelated-pages syndrome. And get
rid of the ScanKeyword array in favor of three separate arrays:
uint16 offsets into the keyword array, uint16 token codes, and
uint8 keyword categories. That reduces the overhead per keyword
to 5 bytes instead of 16 (even less in programs that only need
one of the token codes and categories); moreover, the binary search
only touches the offsets array, further reducing its cache footprint.
This also lets us put the token codes somewhere else than the
keyword strings are, which avoids some unpleasant build dependencies.
While we're at it, wrap the data used by ScanKeywordLookup into
a struct that can be treated as an opaque type by most callers.
That doesn't change things much right now, but it will make it
less painful to switch to a hash-based lookup method, as is being
discussed in the mailing list thread.
Most of the change here is associated with adding a generator
script that can build the new data structure from the same
list-of-PG_KEYWORD header representation we used before.
The PG_KEYWORD lists that plpgsql and ecpg used to embed in
their scanner .c files have to be moved into headers, and the
Makefiles have to be taught to invoke the generator script.
This work is also necessary if we're to consider hash-based lookup,
since the generator script is what would be responsible for
constructing a hash table.
Aside from saving a few kilobytes in each program that includes
the keyword table, this seems to speed up raw parsing (flex+bison)
by a few percent. So it's worth doing even as it stands, though
we think we can gain even more with a follow-on patch to switch
to hash-based lookup.
John Naylor, with further hacking by me
Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
|
|
|
chdir('../../../..');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsNewer(
|
|
|
|
'src/interfaces/ecpg/preproc/c_kwlist_d.h',
|
|
|
|
'src/interfaces/ecpg/preproc/c_kwlist.h')
|
|
|
|
|| IsNewer(
|
|
|
|
'src/interfaces/ecpg/preproc/ecpg_kwlist_d.h',
|
|
|
|
'src/interfaces/ecpg/preproc/ecpg_kwlist.h'))
|
|
|
|
{
|
|
|
|
print "Generating c_kwlist_d.h and ecpg_kwlist_d.h...\n";
|
|
|
|
chdir('src/interfaces/ecpg/preproc');
|
2019-01-10 01:47:38 +01:00
|
|
|
system(
|
|
|
|
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanCKeywords --no-case-fold c_kwlist.h'
|
|
|
|
);
|
|
|
|
system(
|
|
|
|
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanECPGKeywords ecpg_kwlist.h'
|
|
|
|
);
|
Replace the data structure used for keyword lookup.
Previously, ScanKeywordLookup was passed an array of string pointers.
This had some performance deficiencies: the strings themselves might
be scattered all over the place depending on the compiler (and some
quick checking shows that at least with gcc-on-Linux, they indeed
weren't reliably close together). That led to very cache-unfriendly
behavior as the binary search touched strings in many different pages.
Also, depending on the platform, the string pointers might need to
be adjusted at program start, so that they couldn't be simple constant
data. And the ScanKeyword struct had been designed with an eye to
32-bit machines originally; on 64-bit it requires 16 bytes per
keyword, making it even more cache-unfriendly.
Redesign so that the keyword strings themselves are allocated
consecutively (as part of one big char-string constant), thereby
eliminating the touch-lots-of-unrelated-pages syndrome. And get
rid of the ScanKeyword array in favor of three separate arrays:
uint16 offsets into the keyword array, uint16 token codes, and
uint8 keyword categories. That reduces the overhead per keyword
to 5 bytes instead of 16 (even less in programs that only need
one of the token codes and categories); moreover, the binary search
only touches the offsets array, further reducing its cache footprint.
This also lets us put the token codes somewhere else than the
keyword strings are, which avoids some unpleasant build dependencies.
While we're at it, wrap the data used by ScanKeywordLookup into
a struct that can be treated as an opaque type by most callers.
That doesn't change things much right now, but it will make it
less painful to switch to a hash-based lookup method, as is being
discussed in the mailing list thread.
Most of the change here is associated with adding a generator
script that can build the new data structure from the same
list-of-PG_KEYWORD header representation we used before.
The PG_KEYWORD lists that plpgsql and ecpg used to embed in
their scanner .c files have to be moved into headers, and the
Makefiles have to be taught to invoke the generator script.
This work is also necessary if we're to consider hash-based lookup,
since the generator script is what would be responsible for
constructing a hash table.
Aside from saving a few kilobytes in each program that includes
the keyword table, this seems to speed up raw parsing (flex+bison)
by a few percent. So it's worth doing even as it stands, though
we think we can gain even more with a follow-on patch to switch
to hash-based lookup.
John Naylor, with further hacking by me
Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
|
|
|
chdir('../../../..');
|
|
|
|
}
|
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
if (IsNewer(
|
2015-04-25 14:52:03 +02:00
|
|
|
'src/interfaces/ecpg/preproc/preproc.y',
|
|
|
|
'src/backend/parser/gram.y'))
|
2008-11-14 18:11:40 +01:00
|
|
|
{
|
|
|
|
print "Generating preproc.y...\n";
|
2022-07-18 20:59:03 +02:00
|
|
|
my $ecpg = 'src/interfaces/ecpg';
|
|
|
|
system(
|
|
|
|
"perl $ecpg/preproc/parse.pl --srcdir $ecpg/preproc --parser src/backend/parser/gram.y --output $ecpg/preproc/preproc.y"
|
|
|
|
);
|
2008-11-14 18:11:40 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2015-04-25 14:52:03 +02:00
|
|
|
unless (-f "src/port/pg_config_paths.h")
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
print "Generating pg_config_paths.h...\n";
|
2017-03-27 04:24:13 +02:00
|
|
|
open(my $o, '>', 'src/port/pg_config_paths.h')
|
2007-03-12 20:10:50 +01:00
|
|
|
|| confess "Could not open pg_config_paths.h";
|
2017-03-27 04:24:13 +02:00
|
|
|
print $o <<EOF;
|
2007-02-08 16:28:58 +01:00
|
|
|
#define PGBINDIR "/bin"
|
|
|
|
#define PGSHAREDIR "/share"
|
|
|
|
#define SYSCONFDIR "/etc"
|
|
|
|
#define INCLUDEDIR "/include"
|
|
|
|
#define PKGINCLUDEDIR "/include"
|
|
|
|
#define INCLUDEDIRSERVER "/include/server"
|
|
|
|
#define LIBDIR "/lib"
|
|
|
|
#define PKGLIBDIR "/lib"
|
|
|
|
#define LOCALEDIR "/share/locale"
|
|
|
|
#define DOCDIR "/doc"
|
2008-02-19 13:00:03 +01:00
|
|
|
#define HTMLDIR "/doc"
|
2007-02-08 16:28:58 +01:00
|
|
|
#define MANDIR "/man"
|
2006-09-04 23:30:40 +02:00
|
|
|
EOF
|
2017-03-27 04:24:13 +02:00
|
|
|
close($o);
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2015-04-25 14:52:03 +02:00
|
|
|
my $mf = Project::read_file('src/backend/catalog/Makefile');
|
2014-07-14 20:07:27 +02:00
|
|
|
$mf =~ s{\\\r?\n}{}g;
|
Replace our traditional initial-catalog-data format with a better design.
Historically, the initial catalog data to be installed during bootstrap
has been written in DATA() lines in the catalog header files. This had
lots of disadvantages: the format was badly underdocumented, it was
very difficult to edit the data in any mechanized way, and due to the
lack of any abstraction the data was verbose, hard to read/understand,
and easy to get wrong.
Hence, move this data into separate ".dat" files and represent it in a way
that can easily be read and rewritten by Perl scripts. The new format is
essentially "key => value" for each column; while it's a bit repetitive,
explicit labeling of each value makes the data far more readable and less
error-prone. Provide a way to abbreviate entries by omitting field values
that match a specified default value for their column. This allows removal
of a large amount of repetitive boilerplate and also lowers the barrier to
adding new columns.
Also teach genbki.pl how to translate symbolic OID references into
numeric OIDs for more cases than just "regproc"-like pg_proc references.
It can now do that for regprocedure-like references (thus solving the
problem that regproc is ambiguous for overloaded functions), operators,
types, opfamilies, opclasses, and access methods. Use this to turn
nearly all OID cross-references in the initial data into symbolic form.
This represents a very large step forward in readability and error
resistance of the initial catalog data. It should also reduce the
difficulty of renumbering OID assignments in uncommitted patches.
Also, solve the longstanding problem that frontend code that would like to
use OID macros and other information from the catalog headers often had
difficulty with backend-only code in the headers. To do this, arrange for
all generated macros, plus such other declarations as we deem fit, to be
placed in "derived" header files that are safe for frontend inclusion.
(Once clients migrate to using these pg_*_d.h headers, it will be possible
to get rid of the pg_*_fn.h headers, which only exist to quarantine code
away from clients. That is left for follow-on patches, however.)
The now-automatically-generated macros include the Anum_xxx and Natts_xxx
constants that we used to have to update by hand when adding or removing
catalog columns.
Replace the former manual method of generating OID macros for pg_type
entries with an automatic method, ensuring that all built-in types have
OID macros. (But note that this patch does not change the way that
OID macros for pg_proc entries are built and used. It's not clear that
making that match the other catalogs would be worth extra code churn.)
Add SGML documentation explaining what the new data format is and how to
work with it.
Despite being a very large change in the catalog headers, there is no
catversion bump here, because postgres.bki and related output files
haven't changed at all.
John Naylor, based on ideas from various people; review and minor
additional coding by me; previous review by Alvaro Herrera
Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
|
|
|
$mf =~ /^CATALOG_HEADERS\s*:?=(.*)$/gm
|
|
|
|
|| croak "Could not find CATALOG_HEADERS in Makefile\n";
|
|
|
|
my @bki_srcs = split /\s+/, $1;
|
|
|
|
$mf =~ /^POSTGRES_BKI_DATA\s*:?=[^,]+,(.*)\)$/gm
|
|
|
|
|| croak "Could not find POSTGRES_BKI_DATA in Makefile\n";
|
|
|
|
my @bki_data = split /\s+/, $1;
|
2018-04-25 20:00:19 +02:00
|
|
|
|
2018-05-04 00:47:41 +02:00
|
|
|
my $need_genbki = 0;
|
Replace our traditional initial-catalog-data format with a better design.
Historically, the initial catalog data to be installed during bootstrap
has been written in DATA() lines in the catalog header files. This had
lots of disadvantages: the format was badly underdocumented, it was
very difficult to edit the data in any mechanized way, and due to the
lack of any abstraction the data was verbose, hard to read/understand,
and easy to get wrong.
Hence, move this data into separate ".dat" files and represent it in a way
that can easily be read and rewritten by Perl scripts. The new format is
essentially "key => value" for each column; while it's a bit repetitive,
explicit labeling of each value makes the data far more readable and less
error-prone. Provide a way to abbreviate entries by omitting field values
that match a specified default value for their column. This allows removal
of a large amount of repetitive boilerplate and also lowers the barrier to
adding new columns.
Also teach genbki.pl how to translate symbolic OID references into
numeric OIDs for more cases than just "regproc"-like pg_proc references.
It can now do that for regprocedure-like references (thus solving the
problem that regproc is ambiguous for overloaded functions), operators,
types, opfamilies, opclasses, and access methods. Use this to turn
nearly all OID cross-references in the initial data into symbolic form.
This represents a very large step forward in readability and error
resistance of the initial catalog data. It should also reduce the
difficulty of renumbering OID assignments in uncommitted patches.
Also, solve the longstanding problem that frontend code that would like to
use OID macros and other information from the catalog headers often had
difficulty with backend-only code in the headers. To do this, arrange for
all generated macros, plus such other declarations as we deem fit, to be
placed in "derived" header files that are safe for frontend inclusion.
(Once clients migrate to using these pg_*_d.h headers, it will be possible
to get rid of the pg_*_fn.h headers, which only exist to quarantine code
away from clients. That is left for follow-on patches, however.)
The now-automatically-generated macros include the Anum_xxx and Natts_xxx
constants that we used to have to update by hand when adding or removing
catalog columns.
Replace the former manual method of generating OID macros for pg_type
entries with an automatic method, ensuring that all built-in types have
OID macros. (But note that this patch does not change the way that
OID macros for pg_proc entries are built and used. It's not clear that
making that match the other catalogs would be worth extra code churn.)
Add SGML documentation explaining what the new data format is and how to
work with it.
Despite being a very large change in the catalog headers, there is no
catversion bump here, because postgres.bki and related output files
haven't changed at all.
John Naylor, based on ideas from various people; review and minor
additional coding by me; previous review by Alvaro Herrera
Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
|
|
|
foreach my $bki (@bki_srcs, @bki_data)
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
next if $bki eq "";
|
2012-07-05 03:47:49 +02:00
|
|
|
if (IsNewer(
|
2018-05-04 00:47:41 +02:00
|
|
|
'src/backend/catalog/bki-stamp',
|
2012-07-05 03:47:49 +02:00
|
|
|
"src/include/catalog/$bki"))
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2018-05-04 00:47:41 +02:00
|
|
|
$need_genbki = 1;
|
2007-03-12 20:10:50 +01:00
|
|
|
last;
|
2006-11-29 20:49:31 +01:00
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2018-05-04 00:47:41 +02:00
|
|
|
$need_genbki = 1
|
|
|
|
if IsNewer('src/backend/catalog/bki-stamp',
|
|
|
|
'src/backend/catalog/genbki.pl');
|
|
|
|
$need_genbki = 1
|
|
|
|
if IsNewer('src/backend/catalog/bki-stamp',
|
|
|
|
'src/backend/catalog/Catalog.pm');
|
|
|
|
if ($need_genbki)
|
|
|
|
{
|
|
|
|
chdir('src/backend/catalog');
|
|
|
|
my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs);
|
2019-02-12 22:29:26 +01:00
|
|
|
system(
|
2020-03-10 11:20:38 +01:00
|
|
|
"perl genbki.pl --include-path ../../../src/include/ --set-version=$majorver $bki_srcs"
|
2019-02-12 22:29:26 +01:00
|
|
|
);
|
2018-05-04 00:47:41 +02:00
|
|
|
open(my $f, '>', 'bki-stamp')
|
|
|
|
|| confess "Could not touch bki-stamp";
|
|
|
|
close($f);
|
|
|
|
chdir('../../..');
|
2018-05-18 17:53:18 +02:00
|
|
|
}
|
2018-05-04 00:47:41 +02:00
|
|
|
|
2018-05-18 17:53:18 +02:00
|
|
|
if (IsNewer(
|
|
|
|
'src/include/catalog/header-stamp',
|
|
|
|
'src/backend/catalog/bki-stamp'))
|
|
|
|
{
|
2018-05-04 00:47:41 +02:00
|
|
|
# Copy generated headers to include directory.
|
|
|
|
opendir(my $dh, 'src/backend/catalog/')
|
|
|
|
|| die "Can't opendir src/backend/catalog/ $!";
|
|
|
|
my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh);
|
|
|
|
closedir $dh;
|
|
|
|
foreach my $def_header (@def_headers)
|
|
|
|
{
|
|
|
|
copyFile(
|
|
|
|
"src/backend/catalog/$def_header",
|
|
|
|
"src/include/catalog/$def_header");
|
|
|
|
}
|
|
|
|
copyFile(
|
|
|
|
'src/backend/catalog/schemapg.h',
|
|
|
|
'src/include/catalog/schemapg.h');
|
Build in some knowledge about foreign-key relationships in the catalogs.
This follows in the spirit of commit dfb75e478, which created primary
key and uniqueness constraints to improve the visibility of constraints
imposed on the system catalogs. While our catalogs contain many
foreign-key-like relationships, they don't quite follow SQL semantics,
in that the convention for an omitted reference is to write zero not
NULL. Plus, we have some cases in which there are arrays each of whose
elements is supposed to be an FK reference; SQL has no way to model that.
So we can't create actual foreign key constraints to describe the
situation. Nonetheless, we can collect and use knowledge about these
relationships.
This patch therefore adds annotations to the catalog header files to
declare foreign-key relationships. (The BKI_LOOKUP annotations cover
simple cases, but we weren't previously distinguishing which such
columns are allowed to contain zeroes; we also need new markings for
multi-column FK references.) Then, Catalog.pm and genbki.pl are
taught to collect this information into a table in a new generated
header "system_fk_info.h". The only user of that at the moment is
a new SQL function pg_get_catalog_foreign_keys(), which exposes the
table to SQL. The oidjoins regression test is rewritten to use
pg_get_catalog_foreign_keys() to find out which columns to check.
Aside from removing the need for manual maintenance of that test
script, this allows it to cover numerous relationships that were not
checked by the old implementation based on findoidjoins. (As of this
commit, 217 relationships are checked by the test, versus 181 before.)
Discussion: https://postgr.es/m/3240355.1612129197@sss.pgh.pa.us
2021-02-02 23:11:55 +01:00
|
|
|
copyFile(
|
|
|
|
'src/backend/catalog/system_fk_info.h',
|
|
|
|
'src/include/catalog/system_fk_info.h');
|
2018-05-18 17:53:18 +02:00
|
|
|
open(my $chs, '>', 'src/include/catalog/header-stamp')
|
|
|
|
|| confess "Could not touch header-stamp";
|
|
|
|
close($chs);
|
2018-05-04 00:47:41 +02:00
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2022-08-08 20:43:35 +02:00
|
|
|
my $nmf = Project::read_file('src/backend/nodes/Makefile');
|
|
|
|
$nmf =~ s{\\\r?\n}{}g;
|
|
|
|
$nmf =~ /^node_headers\s*:?=(.*)$/gm
|
|
|
|
|| croak "Could not find node_headers in Makefile\n";
|
|
|
|
my @node_headers = split /\s+/, $1;
|
|
|
|
@node_headers = grep { $_ ne '' } @node_headers;
|
|
|
|
my @node_files = map { "src/include/$_" } @node_headers;
|
|
|
|
|
|
|
|
my $need_node_support = 0;
|
|
|
|
foreach my $nodefile (@node_files)
|
|
|
|
{
|
|
|
|
if (IsNewer('src/backend/nodes/node-support-stamp', $nodefile))
|
|
|
|
{
|
|
|
|
$need_node_support = 1;
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
$need_node_support = 1
|
|
|
|
if IsNewer(
|
|
|
|
'src/backend/nodes/node-support-stamp',
|
|
|
|
'src/backend/nodes/gen_node_support.pl');
|
Automatically generate node support functions
Add a script to automatically generate the node support functions
(copy, equal, out, and read, as well as the node tags enum) from the
struct definitions.
For each of the four node support files, it creates two include files,
e.g., copyfuncs.funcs.c and copyfuncs.switch.c, to include in the main
file. All the scaffolding of the main file stays in place.
I have tried to mostly make the coverage of the output match what is
currently there. For example, one could now do out/read coverage of
utility statement nodes, but I have manually excluded those for now.
The reason is mainly that it's easier to diff the before and after,
and adding a bunch of stuff like this might require a separate
analysis and review.
Subtyping (TidScan -> Scan) is supported.
For the hard cases, you can just write a manual function and exclude
generating one. For the not so hard cases, there is a way of
annotating struct fields to get special behaviors. For example,
pg_node_attr(equal_ignore) has the field ignored in equal functions.
(In this patch, I have only ifdef'ed out the code to could be removed,
mainly so that it won't constantly have merge conflicts. It will be
deleted in a separate patch. All the code comments that are worth
keeping from those sections have already been moved to the header
files where the structs are defined.)
Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us>
Discussion: https://www.postgresql.org/message-id/flat/c1097590-a6a4-486a-64b1-e1f9cc0533ce%40enterprisedb.com
2022-07-09 08:52:19 +02:00
|
|
|
|
2022-08-08 20:43:35 +02:00
|
|
|
if ($need_node_support)
|
|
|
|
{
|
2022-07-18 21:32:26 +02:00
|
|
|
system(
|
|
|
|
"perl src/backend/nodes/gen_node_support.pl --outdir src/backend/nodes @node_files"
|
|
|
|
);
|
|
|
|
open(my $f, '>', 'src/backend/nodes/node-support-stamp')
|
Automatically generate node support functions
Add a script to automatically generate the node support functions
(copy, equal, out, and read, as well as the node tags enum) from the
struct definitions.
For each of the four node support files, it creates two include files,
e.g., copyfuncs.funcs.c and copyfuncs.switch.c, to include in the main
file. All the scaffolding of the main file stays in place.
I have tried to mostly make the coverage of the output match what is
currently there. For example, one could now do out/read coverage of
utility statement nodes, but I have manually excluded those for now.
The reason is mainly that it's easier to diff the before and after,
and adding a bunch of stuff like this might require a separate
analysis and review.
Subtyping (TidScan -> Scan) is supported.
For the hard cases, you can just write a manual function and exclude
generating one. For the not so hard cases, there is a way of
annotating struct fields to get special behaviors. For example,
pg_node_attr(equal_ignore) has the field ignored in equal functions.
(In this patch, I have only ifdef'ed out the code to could be removed,
mainly so that it won't constantly have merge conflicts. It will be
deleted in a separate patch. All the code comments that are worth
keeping from those sections have already been moved to the header
files where the structs are defined.)
Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us>
Discussion: https://www.postgresql.org/message-id/flat/c1097590-a6a4-486a-64b1-e1f9cc0533ce%40enterprisedb.com
2022-07-09 08:52:19 +02:00
|
|
|
|| confess "Could not touch node-support-stamp";
|
|
|
|
close($f);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsNewer(
|
|
|
|
'src/include/nodes/nodetags.h',
|
|
|
|
'src/backend/nodes/nodetags.h'))
|
|
|
|
{
|
|
|
|
copyFile('src/backend/nodes/nodetags.h',
|
|
|
|
'src/include/nodes/nodetags.h');
|
|
|
|
}
|
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
open(my $o, '>', "doc/src/sgml/version.sgml")
|
2012-07-05 03:47:49 +02:00
|
|
|
|| croak "Could not write to version.sgml\n";
|
2017-03-27 04:24:13 +02:00
|
|
|
print $o <<EOF;
|
2019-12-20 08:54:42 +01:00
|
|
|
<!ENTITY version "$package_version">
|
2020-03-10 11:20:38 +01:00
|
|
|
<!ENTITY majorversion "$majorver">
|
2007-03-06 15:16:55 +01:00
|
|
|
EOF
|
2017-03-27 04:24:13 +02:00
|
|
|
close($o);
|
2018-05-27 15:08:42 +02:00
|
|
|
return;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
# Read lines from input file and substitute symbols using the same
|
|
|
|
# logic that config.status uses. There should be one call of this for
|
2020-07-24 10:34:16 +02:00
|
|
|
# each AC_CONFIG_HEADERS call in configure.ac.
|
2019-12-20 08:54:42 +01:00
|
|
|
#
|
|
|
|
# If the "required" argument is true, we also keep track which of our
|
|
|
|
# defines have been found and error out if any are left unused at the
|
|
|
|
# end. That way we avoid accumulating defines in this file that are
|
|
|
|
# no longer used by configure.
|
|
|
|
sub GenerateConfigHeader
|
|
|
|
{
|
|
|
|
my ($self, $config_header, $defines, $required) = @_;
|
|
|
|
|
|
|
|
my $config_header_in = $config_header . '.in';
|
|
|
|
|
|
|
|
if ( IsNewer($config_header, $config_header_in)
|
|
|
|
|| IsNewer($config_header, __FILE__))
|
|
|
|
{
|
2020-02-25 05:57:40 +01:00
|
|
|
my %defines_copy = %$defines;
|
|
|
|
|
2019-12-20 08:54:42 +01:00
|
|
|
open(my $i, '<', $config_header_in)
|
|
|
|
|| confess "Could not open $config_header_in\n";
|
|
|
|
open(my $o, '>', $config_header)
|
|
|
|
|| confess "Could not write to $config_header\n";
|
|
|
|
|
|
|
|
print $o
|
|
|
|
"/* $config_header. Generated from $config_header_in by src/tools/msvc/Solution.pm. */\n";
|
|
|
|
|
|
|
|
while (<$i>)
|
|
|
|
{
|
|
|
|
if (m/^#(\s*)undef\s+(\w+)/)
|
|
|
|
{
|
|
|
|
my $ws = $1;
|
|
|
|
my $macro = $2;
|
|
|
|
if (exists $defines->{$macro})
|
|
|
|
{
|
|
|
|
if (defined $defines->{$macro})
|
|
|
|
{
|
|
|
|
print $o "#${ws}define $macro ", $defines->{$macro},
|
|
|
|
"\n";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
print $o "/* #${ws}undef $macro */\n";
|
|
|
|
}
|
|
|
|
delete $defines_copy{$macro};
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
croak
|
|
|
|
"undefined symbol: $macro at $config_header line $.";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
print $o $_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close($o);
|
|
|
|
close($i);
|
2020-02-25 05:57:40 +01:00
|
|
|
|
|
|
|
if ($required && scalar(keys %defines_copy) > 0)
|
|
|
|
{
|
|
|
|
croak "unused defines: " . join(' ', keys %defines_copy);
|
|
|
|
}
|
2019-12-20 08:54:42 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-03 14:11:00 +02:00
|
|
|
sub GenerateDefFile
|
|
|
|
{
|
2012-07-05 03:47:49 +02:00
|
|
|
my ($self, $deffile, $txtfile, $libname) = @_;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
if (IsNewer($deffile, $txtfile))
|
2007-10-03 14:11:00 +02:00
|
|
|
{
|
|
|
|
print "Generating $deffile...\n";
|
2017-03-27 04:24:13 +02:00
|
|
|
open(my $if, '<', $txtfile) || confess("Could not open $txtfile\n");
|
|
|
|
open(my $of, '>', $deffile) || confess("Could not open $deffile\n");
|
|
|
|
print $of "LIBRARY $libname\nEXPORTS\n";
|
|
|
|
while (<$if>)
|
2007-10-03 14:11:00 +02:00
|
|
|
{
|
|
|
|
next if (/^#/);
|
|
|
|
next if (/^\s*$/);
|
|
|
|
my ($f, $o) = split;
|
2017-03-27 04:24:13 +02:00
|
|
|
print $of " $f @ $o\n";
|
2007-10-03 14:11:00 +02:00
|
|
|
}
|
2017-03-27 04:24:13 +02:00
|
|
|
close($of);
|
|
|
|
close($if);
|
2007-10-03 14:11:00 +02:00
|
|
|
}
|
2018-05-27 15:08:42 +02:00
|
|
|
return;
|
2007-10-03 14:11:00 +02:00
|
|
|
}
|
|
|
|
|
2007-03-12 20:10:50 +01:00
|
|
|
sub AddProject
|
|
|
|
{
|
|
|
|
my ($self, $name, $type, $folder, $initialdir) = @_;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
my $proj =
|
|
|
|
VSObjectFactory::CreateProject($self->{vcver}, $name, $type, $self);
|
|
|
|
push @{ $self->{projects}->{$folder} }, $proj;
|
2007-03-12 20:10:50 +01:00
|
|
|
$proj->AddDir($initialdir) if ($initialdir);
|
|
|
|
if ($self->{options}->{zlib})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{zlib} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{zlib} . '\lib\zdll.lib');
|
|
|
|
}
|
|
|
|
if ($self->{options}->{openssl})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{openssl} . '\include');
|
2019-06-26 03:44:46 +02:00
|
|
|
my ($digit1, $digit2, $digit3) = $self->GetOpenSSLVersion();
|
|
|
|
|
|
|
|
# Starting at version 1.1.0 the OpenSSL installers have
|
|
|
|
# changed their library names from:
|
|
|
|
# - libeay to libcrypto
|
|
|
|
# - ssleay to libssl
|
2021-10-20 09:48:24 +02:00
|
|
|
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|
|
|
|
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0'))
|
2017-06-05 20:24:42 +02:00
|
|
|
{
|
2019-06-26 03:44:46 +02:00
|
|
|
my $dbgsuffix;
|
|
|
|
my $libsslpath;
|
|
|
|
my $libcryptopath;
|
|
|
|
|
|
|
|
# The format name of the libraries is slightly
|
|
|
|
# different between the Win32 and Win64 platform, so
|
|
|
|
# adapt.
|
|
|
|
if (-e "$self->{options}->{openssl}/lib/VC/sslcrypto32MD.lib")
|
|
|
|
{
|
|
|
|
# Win32 here, with a debugging library set.
|
|
|
|
$dbgsuffix = 1;
|
|
|
|
$libsslpath = '\lib\VC\libssl32.lib';
|
|
|
|
$libcryptopath = '\lib\VC\libcrypto32.lib';
|
|
|
|
}
|
|
|
|
elsif (-e "$self->{options}->{openssl}/lib/VC/sslcrypto64MD.lib")
|
|
|
|
{
|
|
|
|
# Win64 here, with a debugging library set.
|
|
|
|
$dbgsuffix = 1;
|
|
|
|
$libsslpath = '\lib\VC\libssl64.lib';
|
|
|
|
$libcryptopath = '\lib\VC\libcrypto64.lib';
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
# On both Win32 and Win64 the same library
|
|
|
|
# names are used without a debugging context.
|
|
|
|
$dbgsuffix = 0;
|
|
|
|
$libsslpath = '\lib\libssl.lib';
|
|
|
|
$libcryptopath = '\lib\libcrypto.lib';
|
|
|
|
}
|
|
|
|
|
|
|
|
$proj->AddLibrary($self->{options}->{openssl} . $libsslpath,
|
|
|
|
$dbgsuffix);
|
|
|
|
$proj->AddLibrary($self->{options}->{openssl} . $libcryptopath,
|
|
|
|
$dbgsuffix);
|
2017-06-05 20:24:42 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-06-26 03:44:46 +02:00
|
|
|
# Choose which set of libraries to use depending on if
|
|
|
|
# debugging libraries are in place in the installer.
|
|
|
|
if (-e "$self->{options}->{openssl}/lib/VC/ssleay32MD.lib")
|
|
|
|
{
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
# We don't expect the config-specific library
|
|
|
|
# to be here, so don't ask for it in last
|
|
|
|
# parameter.
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{openssl} . '\lib\ssleay32.lib', 0);
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{openssl} . '\lib\libeay32.lib', 0);
|
|
|
|
}
|
2017-06-05 20:24:42 +02:00
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
if ($self->{options}->{nls})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{nls} . '\include');
|
2007-03-24 23:16:49 +01:00
|
|
|
$proj->AddLibrary($self->{options}->{nls} . '\lib\libintl.lib');
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2014-01-19 17:07:15 +01:00
|
|
|
if ($self->{options}->{gss})
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2021-05-27 13:11:00 +02:00
|
|
|
$proj->AddIncludeDir($self->{options}->{gss} . '\include');
|
|
|
|
$proj->AddIncludeDir($self->{options}->{gss} . '\include\krb5');
|
|
|
|
if ($self->{platform} eq 'Win32')
|
|
|
|
{
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\i386\krb5_32.lib');
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\i386\comerr32.lib');
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\i386\gssapi32.lib');
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\amd64\krb5_64.lib');
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\amd64\comerr64.lib');
|
|
|
|
$proj->AddLibrary(
|
|
|
|
$self->{options}->{gss} . '\lib\amd64\gssapi64.lib');
|
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
2010-03-03 04:29:37 +01:00
|
|
|
if ($self->{options}->{iconv})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{iconv} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{iconv} . '\lib\iconv.lib');
|
2010-04-09 15:05:58 +02:00
|
|
|
}
|
2017-06-12 17:05:20 +02:00
|
|
|
if ($self->{options}->{icu})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{icu} . '\include');
|
2017-06-13 15:13:32 +02:00
|
|
|
if ($self->{platform} eq 'Win32')
|
|
|
|
{
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib\icuin.lib');
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib\icuuc.lib');
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib\icudt.lib');
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icuin.lib');
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icuuc.lib');
|
|
|
|
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icudt.lib');
|
|
|
|
}
|
2017-06-12 17:05:20 +02:00
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
if ($self->{options}->{xml})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{xml} . '\include');
|
2017-05-12 16:17:54 +02:00
|
|
|
$proj->AddIncludeDir($self->{options}->{xml} . '\include\libxml2');
|
2007-03-12 20:10:50 +01:00
|
|
|
$proj->AddLibrary($self->{options}->{xml} . '\lib\libxml2.lib');
|
|
|
|
}
|
2010-03-02 23:02:31 +01:00
|
|
|
if ($self->{options}->{xslt})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{xslt} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{xslt} . '\lib\libxslt.lib');
|
|
|
|
}
|
2021-05-11 03:43:05 +02:00
|
|
|
if ($self->{options}->{lz4})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{lz4} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{lz4} . '\lib\liblz4.lib');
|
|
|
|
}
|
2022-02-18 19:40:31 +01:00
|
|
|
if ($self->{options}->{zstd})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{zstd} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{zstd} . '\lib\libzstd.lib');
|
|
|
|
}
|
2016-09-11 18:46:55 +02:00
|
|
|
if ($self->{options}->{uuid})
|
|
|
|
{
|
|
|
|
$proj->AddIncludeDir($self->{options}->{uuid} . '\include');
|
|
|
|
$proj->AddLibrary($self->{options}->{uuid} . '\lib\uuid.lib');
|
|
|
|
}
|
2007-03-12 20:10:50 +01:00
|
|
|
return $proj;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
2007-03-12 20:10:50 +01:00
|
|
|
sub Save
|
|
|
|
{
|
|
|
|
my ($self) = @_;
|
|
|
|
my %flduid;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2007-03-12 20:10:50 +01:00
|
|
|
$self->GenerateFiles();
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $fld (keys %{ $self->{projects} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $proj (@{ $self->{projects}->{$fld} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
$proj->Save();
|
|
|
|
}
|
|
|
|
}
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
open(my $sln, '>', "pgsql.sln") || croak "Could not write to pgsql.sln\n";
|
|
|
|
print $sln <<EOF;
|
2012-01-03 14:44:26 +01:00
|
|
|
Microsoft Visual Studio Solution File, Format Version $self->{solutionFileVersion}
|
|
|
|
# $self->{visualStudioName}
|
2006-09-04 23:30:40 +02:00
|
|
|
EOF
|
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln $self->GetAdditionalHeaders();
|
2014-01-26 15:49:10 +01:00
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $fld (keys %{ $self->{projects} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $proj (@{ $self->{projects}->{$fld} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2012-01-03 14:44:26 +01:00
|
|
|
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}$proj->{filenameExtension}", "$proj->{guid}"
|
2006-09-04 23:30:40 +02:00
|
|
|
EndProject
|
|
|
|
EOF
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
if ($fld ne "")
|
|
|
|
{
|
2020-02-21 20:50:56 +01:00
|
|
|
$flduid{$fld} = $^O eq "MSWin32" ? Win32::GuidGen() : 'FAKE';
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2006-09-04 23:30:40 +02:00
|
|
|
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "$fld", "$fld", "$flduid{$fld}"
|
|
|
|
EndProject
|
|
|
|
EOF
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
}
|
2006-09-04 23:30:40 +02:00
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2006-09-04 23:30:40 +02:00
|
|
|
Global
|
|
|
|
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
2010-01-01 18:34:25 +01:00
|
|
|
Debug|$self->{platform}= Debug|$self->{platform}
|
|
|
|
Release|$self->{platform} = Release|$self->{platform}
|
2006-09-04 23:30:40 +02:00
|
|
|
EndGlobalSection
|
|
|
|
GlobalSection(ProjectConfigurationPlatforms) = postSolution
|
|
|
|
EOF
|
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $fld (keys %{ $self->{projects} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $proj (@{ $self->{projects}->{$fld} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2010-01-01 18:34:25 +01:00
|
|
|
$proj->{guid}.Debug|$self->{platform}.ActiveCfg = Debug|$self->{platform}
|
|
|
|
$proj->{guid}.Debug|$self->{platform}.Build.0 = Debug|$self->{platform}
|
|
|
|
$proj->{guid}.Release|$self->{platform}.ActiveCfg = Release|$self->{platform}
|
|
|
|
$proj->{guid}.Release|$self->{platform}.Build.0 = Release|$self->{platform}
|
2006-09-04 23:30:40 +02:00
|
|
|
EOF
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
}
|
2006-09-04 23:30:40 +02:00
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2006-09-04 23:30:40 +02:00
|
|
|
EndGlobalSection
|
|
|
|
GlobalSection(SolutionProperties) = preSolution
|
|
|
|
HideSolutionNode = FALSE
|
|
|
|
EndGlobalSection
|
|
|
|
GlobalSection(NestedProjects) = preSolution
|
|
|
|
EOF
|
|
|
|
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $fld (keys %{ $self->{projects} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
|
|
|
next if ($fld eq "");
|
2012-07-05 03:47:49 +02:00
|
|
|
foreach my $proj (@{ $self->{projects}->{$fld} })
|
2007-03-12 20:10:50 +01:00
|
|
|
{
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln "\t\t$proj->{guid} = $flduid{$fld}\n";
|
2007-03-12 20:10:50 +01:00
|
|
|
}
|
|
|
|
}
|
2006-09-04 23:30:40 +02:00
|
|
|
|
2017-03-27 04:24:13 +02:00
|
|
|
print $sln <<EOF;
|
2006-09-04 23:30:40 +02:00
|
|
|
EndGlobalSection
|
|
|
|
EndGlobal
|
|
|
|
EOF
|
2017-03-27 04:24:13 +02:00
|
|
|
close($sln);
|
2018-05-27 15:08:42 +02:00
|
|
|
return;
|
2006-09-04 23:30:40 +02:00
|
|
|
}
|
|
|
|
|
2007-04-26 12:36:47 +02:00
|
|
|
sub GetFakeConfigure
|
|
|
|
{
|
|
|
|
my $self = shift;
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2023-07-11 20:20:37 +02:00
|
|
|
my $cfg = '';
|
2007-04-26 12:36:47 +02:00
|
|
|
$cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
|
|
|
|
$cfg .= ' --enable-nls' if ($self->{options}->{nls});
|
2016-03-04 16:59:47 +01:00
|
|
|
$cfg .= ' --enable-tap-tests' if ($self->{options}->{tap_tests});
|
2012-07-05 03:47:49 +02:00
|
|
|
$cfg .= ' --with-ldap' if ($self->{options}->{ldap});
|
2007-04-26 12:36:47 +02:00
|
|
|
$cfg .= ' --without-zlib' unless ($self->{options}->{zlib});
|
2014-07-12 19:36:28 +02:00
|
|
|
$cfg .= ' --with-extra-version' if ($self->{options}->{extraver});
|
2021-02-01 11:19:44 +01:00
|
|
|
$cfg .= ' --with-ssl=openssl' if ($self->{options}->{openssl});
|
2019-07-13 09:51:31 +02:00
|
|
|
$cfg .= ' --with-uuid' if ($self->{options}->{uuid});
|
2014-07-12 19:36:28 +02:00
|
|
|
$cfg .= ' --with-libxml' if ($self->{options}->{xml});
|
|
|
|
$cfg .= ' --with-libxslt' if ($self->{options}->{xslt});
|
2021-05-11 03:43:05 +02:00
|
|
|
$cfg .= ' --with-lz4' if ($self->{options}->{lz4});
|
2022-02-18 19:40:31 +01:00
|
|
|
$cfg .= ' --with-zstd' if ($self->{options}->{zstd});
|
2014-07-12 19:36:28 +02:00
|
|
|
$cfg .= ' --with-gssapi' if ($self->{options}->{gss});
|
2017-06-12 17:05:20 +02:00
|
|
|
$cfg .= ' --with-icu' if ($self->{options}->{icu});
|
2014-07-12 19:36:28 +02:00
|
|
|
$cfg .= ' --with-tcl' if ($self->{options}->{tcl});
|
|
|
|
$cfg .= ' --with-perl' if ($self->{options}->{perl});
|
|
|
|
$cfg .= ' --with-python' if ($self->{options}->{python});
|
2021-05-28 15:26:30 +02:00
|
|
|
my $port = $self->{options}->{'--with-pgport'};
|
2021-05-28 15:35:11 +02:00
|
|
|
$cfg .= " --with-pgport=$port" if defined($port);
|
2012-06-10 21:20:04 +02:00
|
|
|
|
2007-04-26 12:36:47 +02:00
|
|
|
return $cfg;
|
|
|
|
}
|
|
|
|
|
2016-04-29 13:59:47 +02:00
|
|
|
package VS2015Solution;
|
|
|
|
|
|
|
|
#
|
|
|
|
# Package that encapsulates a Visual Studio 2015 solution file
|
|
|
|
#
|
|
|
|
|
|
|
|
use Carp;
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use base qw(Solution);
|
|
|
|
|
2018-05-31 14:13:02 +02:00
|
|
|
no warnings qw(redefine); ## no critic
|
|
|
|
|
2016-04-29 13:59:47 +02:00
|
|
|
sub new
|
|
|
|
{
|
|
|
|
my $classname = shift;
|
|
|
|
my $self = $classname->SUPER::_new(@_);
|
|
|
|
bless($self, $classname);
|
|
|
|
|
|
|
|
$self->{solutionFileVersion} = '12.00';
|
|
|
|
$self->{vcver} = '14.00';
|
|
|
|
$self->{visualStudioName} = 'Visual Studio 2015';
|
|
|
|
$self->{VisualStudioVersion} = '14.0.24730.2';
|
|
|
|
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
|
|
|
|
|
|
|
|
return $self;
|
|
|
|
}
|
|
|
|
|
2017-09-25 14:03:05 +02:00
|
|
|
package VS2017Solution;
|
|
|
|
|
|
|
|
#
|
|
|
|
# Package that encapsulates a Visual Studio 2017 solution file
|
|
|
|
#
|
|
|
|
|
|
|
|
use Carp;
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use base qw(Solution);
|
|
|
|
|
2018-05-31 14:13:02 +02:00
|
|
|
no warnings qw(redefine); ## no critic
|
|
|
|
|
2017-09-25 14:03:05 +02:00
|
|
|
sub new
|
|
|
|
{
|
|
|
|
my $classname = shift;
|
|
|
|
my $self = $classname->SUPER::_new(@_);
|
|
|
|
bless($self, $classname);
|
|
|
|
|
|
|
|
$self->{solutionFileVersion} = '12.00';
|
|
|
|
$self->{vcver} = '15.00';
|
|
|
|
$self->{visualStudioName} = 'Visual Studio 2017';
|
|
|
|
$self->{VisualStudioVersion} = '15.0.26730.3';
|
|
|
|
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
|
|
|
|
|
|
|
|
return $self;
|
|
|
|
}
|
|
|
|
|
2019-07-02 07:02:33 +02:00
|
|
|
package VS2019Solution;
|
|
|
|
|
|
|
|
#
|
|
|
|
# Package that encapsulates a Visual Studio 2019 solution file
|
|
|
|
#
|
|
|
|
|
|
|
|
use Carp;
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use base qw(Solution);
|
|
|
|
|
|
|
|
no warnings qw(redefine); ## no critic
|
|
|
|
|
|
|
|
sub new
|
|
|
|
{
|
|
|
|
my $classname = shift;
|
|
|
|
my $self = $classname->SUPER::_new(@_);
|
|
|
|
bless($self, $classname);
|
|
|
|
|
|
|
|
$self->{solutionFileVersion} = '12.00';
|
|
|
|
$self->{vcver} = '16.00';
|
|
|
|
$self->{visualStudioName} = 'Visual Studio 2019';
|
|
|
|
$self->{VisualStudioVersion} = '16.0.28729.10';
|
|
|
|
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
|
|
|
|
|
|
|
|
return $self;
|
|
|
|
}
|
|
|
|
|
2021-11-24 05:03:23 +01:00
|
|
|
package VS2022Solution;
|
|
|
|
|
|
|
|
#
|
|
|
|
# Package that encapsulates a Visual Studio 2022 solution file
|
|
|
|
#
|
|
|
|
|
|
|
|
use Carp;
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use base qw(Solution);
|
|
|
|
|
|
|
|
no warnings qw(redefine); ## no critic
|
|
|
|
|
|
|
|
sub new
|
|
|
|
{
|
|
|
|
my $classname = shift;
|
|
|
|
my $self = $classname->SUPER::_new(@_);
|
|
|
|
bless($self, $classname);
|
|
|
|
|
|
|
|
$self->{solutionFileVersion} = '12.00';
|
|
|
|
$self->{vcver} = '17.00';
|
|
|
|
$self->{visualStudioName} = 'Visual Studio 2022';
|
|
|
|
$self->{VisualStudioVersion} = '17.0.31903.59';
|
|
|
|
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
|
|
|
|
|
|
|
|
return $self;
|
|
|
|
}
|
|
|
|
|
2014-01-26 15:49:10 +01:00
|
|
|
sub GetAdditionalHeaders
|
|
|
|
{
|
|
|
|
my ($self, $f) = @_;
|
|
|
|
|
|
|
|
return qq|VisualStudioVersion = $self->{VisualStudioVersion}
|
|
|
|
MinimumVisualStudioVersion = $self->{MinimumVisualStudioVersion}
|
|
|
|
|;
|
|
|
|
}
|
|
|
|
|
2006-09-04 23:30:40 +02:00
|
|
|
1;
|