postgresql/src/tools/msvc/Solution.pm

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1357 lines
37 KiB
Perl
Raw Normal View History

# Copyright (c) 2021-2023, PostgreSQL Global Development Group
2006-09-04 23:30:40 +02:00
package Solution;
#
# Package that encapsulates a Visual C++ solution file generation
#
2010-09-20 22:08:53 +02:00
# src/tools/msvc/Solution.pm
#
2006-09-04 23:30:40 +02:00
use Carp;
use strict;
use warnings;
use VSObjectFactory;
2006-09-04 23:30:40 +02:00
no warnings qw(redefine); ## no critic
sub _new
{
my $classname = shift;
my $options = shift;
my $self = {
2006-09-04 23:30:40 +02:00
projects => {},
options => $options,
VisualStudioVersion => undef,
MinimumVisualStudioVersion => undef,
vcver => undef,
platform => undef,
};
bless($self, $classname);
$self->DeterminePlatform();
if ($options->{xslt} && !$options->{xml})
{
die "XSLT requires XML\n";
}
$options->{blocksize} = 8
unless $options->{blocksize}; # undef or 0 means default
die "Bad blocksize $options->{blocksize}"
unless grep { $_ == $options->{blocksize} } (1, 2, 4, 8, 16, 32);
$options->{segsize} = 1
unless $options->{segsize}; # undef or 0 means default
# only allow segsize 1 for now, as we can't do large files yet in windows
die "Bad segsize $options->{segsize}"
unless $options->{segsize} == 1;
$options->{wal_blocksize} = 8
unless $options->{wal_blocksize}; # undef or 0 means default
die "Bad wal_blocksize $options->{wal_blocksize}"
unless grep { $_ == $options->{wal_blocksize} }
(1, 2, 4, 8, 16, 32, 64);
return $self;
2006-09-04 23:30:40 +02:00
}
sub GetAdditionalHeaders
{
return '';
}
sub DeterminePlatform
{
my $self = shift;
if ($^O eq "MSWin32")
{
# Examine CL help output to determine if we are in 32 or 64-bit mode.
my $output = `cl /help 2>&1`;
$? >> 8 == 0 or die "cl command not found";
$self->{platform} =
($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32';
}
else
{
$self->{platform} = 'FAKE';
}
print "Detected hardware platform: $self->{platform}\n";
return;
}
2006-09-04 23:30:40 +02:00
# Return 1 if $oldfile is newer than $newfile, or if $newfile doesn't exist.
# Special case - if config.pl has changed, always return 1
sub IsNewer
{
my ($newfile, $oldfile) = @_;
-e $oldfile or warn "source file \"$oldfile\" does not exist";
if ( $oldfile ne 'src/tools/msvc/config.pl'
&& $oldfile ne 'src/tools/msvc/config_default.pl')
{
return 1
if (-f 'src/tools/msvc/config.pl')
&& IsNewer($newfile, 'src/tools/msvc/config.pl');
return 1
if (-f 'src/tools/msvc/config_default.pl')
&& IsNewer($newfile, 'src/tools/msvc/config_default.pl');
}
return 1 if (!(-e $newfile));
my @nstat = stat($newfile);
my @ostat = stat($oldfile);
return 1 if ($nstat[9] < $ostat[9]);
return 0;
2006-09-04 23:30:40 +02:00
}
# Copy a file, *not* preserving date. Only works for text files.
sub copyFile
{
my ($src, $dest) = @_;
open(my $i, '<', $src) || croak "Could not open $src";
open(my $o, '>', $dest) || croak "Could not open $dest";
while (<$i>)
{
print $o $_;
}
close($i);
close($o);
return;
2006-09-04 23:30:40 +02:00
}
# Fetch version of OpenSSL based on a parsing of the command shipped with
# the installer this build is linking to. This returns as result an array
# made of the three first digits of the OpenSSL version, which is enough
# to decide which options to apply depending on the version of OpenSSL
# linking with.
sub GetOpenSSLVersion
{
my $self = shift;
# Attempt to get OpenSSL version and location. This assumes that
# openssl.exe is in the specified directory.
# Quote the .exe name in case it has spaces
my $opensslcmd =
qq("$self->{options}->{openssl}\\bin\\openssl.exe" version 2>&1);
my $sslout = `$opensslcmd`;
$? >> 8 == 0
or croak
"Unable to determine OpenSSL version: The openssl.exe command wasn't found.";
if ($sslout =~ /(\d+)\.(\d+)\.(\d+)(\D)/m)
{
return ($1, $2, $3);
}
croak
"Unable to determine OpenSSL version: The openssl.exe version could not be determined.";
}
sub GenerateFiles
{
my $self = shift;
my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
my $ac_init_found = 0;
my $package_name;
my $package_version;
my $package_bugreport;
my $package_url;
my ($majorver, $minorver);
my $ac_define_openssl_api_compat_found = 0;
my $openssl_api_compat;
# Parse configure.ac to get version numbers
open(my $c, '<', "configure.ac")
|| confess("Could not open configure.ac for reading\n");
while (<$c>)
{
if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/
)
{
$ac_init_found = 1;
$package_name = $1;
$package_version = $2;
$package_bugreport = $3;
#$package_tarname = $4;
$package_url = $5;
if ($package_version !~ /^(\d+)(?:\.(\d+))?/)
{
confess "Bad format of version: $package_version\n";
}
$majorver = sprintf("%d", $1);
$minorver = sprintf("%d", $2 ? $2 : 0);
}
elsif (/\bAC_DEFINE\(OPENSSL_API_COMPAT, \[([0-9xL]+)\]/)
{
$ac_define_openssl_api_compat_found = 1;
$openssl_api_compat = $1;
}
}
close($c);
confess "Unable to parse configure.ac for all variables!"
unless $ac_init_found && $ac_define_openssl_api_compat_found;
if (IsNewer("src/include/pg_config_os.h", "src/include/port/win32.h"))
{
print "Copying pg_config_os.h...\n";
copyFile("src/include/port/win32.h", "src/include/pg_config_os.h");
}
print "Generating configuration headers...\n";
my $extraver = $self->{options}->{extraver};
$extraver = '' unless defined $extraver;
my $port = $self->{options}->{"--with-pgport"} || 5432;
# Every symbol in pg_config.h.in must be accounted for here. Set
# to undef if the symbol should not be defined.
my %define = (
ALIGNOF_DOUBLE => 8,
ALIGNOF_INT => 4,
ALIGNOF_LONG => 4,
ALIGNOF_LONG_LONG_INT => 8,
ALIGNOF_PG_INT128_TYPE => undef,
ALIGNOF_SHORT => 2,
AC_APPLE_UNIVERSAL_BUILD => undef,
BLCKSZ => 1024 * $self->{options}->{blocksize},
CONFIGURE_ARGS => '"' . $self->GetFakeConfigure() . '"',
DEF_PGPORT => $port,
DEF_PGPORT_STR => qq{"$port"},
DLSUFFIX => '".dll"',
ENABLE_GSS => $self->{options}->{gss} ? 1 : undef,
ENABLE_NLS => $self->{options}->{nls} ? 1 : undef,
HAVE_APPEND_HISTORY => undef,
HAVE_ASN1_STRING_GET0_DATA => undef,
HAVE_ATOMICS => 1,
HAVE_ATOMIC_H => undef,
HAVE_BACKTRACE_SYMBOLS => undef,
HAVE_BIO_METH_NEW => undef,
HAVE_COMPUTED_GOTO => undef,
HAVE_COPYFILE => undef,
HAVE_COPYFILE_H => undef,
HAVE_CRTDEFS_H => undef,
HAVE_CRYPTO_LOCK => undef,
HAVE_DECL_FDATASYNC => 0,
HAVE_DECL_F_FULLFSYNC => 0,
HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER => 0,
HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER => 0,
HAVE_DECL_LLVMGETHOSTCPUNAME => 0,
HAVE_DECL_LLVMGETHOSTCPUFEATURES => 0,
HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN => 0,
HAVE_DECL_POSIX_FADVISE => 0,
HAVE_DECL_PREADV => 0,
HAVE_DECL_PWRITEV => 0,
HAVE_DECL_STRLCAT => 0,
HAVE_DECL_STRLCPY => 0,
HAVE_DECL_STRNLEN => 1,
HAVE_EDITLINE_HISTORY_H => undef,
HAVE_EDITLINE_READLINE_H => undef,
HAVE_EXECINFO_H => undef,
HAVE_EXPLICIT_BZERO => undef,
HAVE_FSEEKO => 1,
HAVE_GCC__ATOMIC_INT32_CAS => undef,
HAVE_GCC__ATOMIC_INT64_CAS => undef,
HAVE_GCC__SYNC_CHAR_TAS => undef,
HAVE_GCC__SYNC_INT32_CAS => undef,
HAVE_GCC__SYNC_INT32_TAS => undef,
HAVE_GCC__SYNC_INT64_CAS => undef,
HAVE_GETIFADDRS => undef,
HAVE_GETOPT => undef,
HAVE_GETOPT_H => undef,
HAVE_GETOPT_LONG => undef,
HAVE_GETPEEREID => undef,
HAVE_GETPEERUCRED => undef,
HAVE_GSSAPI_EXT_H => undef,
HAVE_GSSAPI_GSSAPI_EXT_H => undef,
HAVE_GSSAPI_GSSAPI_H => undef,
HAVE_GSSAPI_H => undef,
HAVE_HMAC_CTX_FREE => undef,
HAVE_HMAC_CTX_NEW => undef,
HAVE_HISTORY_H => undef,
HAVE_HISTORY_TRUNCATE_FILE => undef,
HAVE_IFADDRS_H => undef,
HAVE_INET_ATON => undef,
HAVE_INET_PTON => 1,
HAVE_INT_TIMEZONE => 1,
HAVE_INT64 => undef,
HAVE_INT8 => undef,
HAVE_INTTYPES_H => undef,
HAVE_INT_OPTERR => undef,
HAVE_INT_OPTRESET => undef,
HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P => undef,
HAVE_KQUEUE => undef,
HAVE_LANGINFO_H => undef,
HAVE_LDAP_INITIALIZE => undef,
HAVE_LIBCRYPTO => undef,
HAVE_LIBLDAP => undef,
HAVE_LIBLZ4 => undef,
HAVE_LIBM => undef,
HAVE_LIBPAM => undef,
HAVE_LIBREADLINE => undef,
HAVE_LIBSELINUX => undef,
HAVE_LIBSSL => undef,
HAVE_LIBWLDAP32 => undef,
HAVE_LIBXML2 => undef,
HAVE_LIBXSLT => undef,
HAVE_LIBZ => $self->{options}->{zlib} ? 1 : undef,
HAVE_LIBZSTD => undef,
HAVE_LONG_INT_64 => undef,
HAVE_LONG_LONG_INT_64 => 1,
HAVE_MBARRIER_H => undef,
HAVE_MBSTOWCS_L => undef,
HAVE_MEMORY_H => 1,
HAVE_MEMSET_S => undef,
HAVE_MKDTEMP => undef,
HAVE_OPENSSL_INIT_SSL => undef,
HAVE_OSSP_UUID_H => undef,
HAVE_PAM_PAM_APPL_H => undef,
HAVE_POSIX_FADVISE => undef,
HAVE_POSIX_FALLOCATE => undef,
HAVE_PPOLL => undef,
HAVE_PTHREAD => undef,
HAVE_PTHREAD_BARRIER_WAIT => undef,
HAVE_PTHREAD_IS_THREADED_NP => undef,
HAVE_PTHREAD_PRIO_INHERIT => undef,
HAVE_READLINE_H => undef,
HAVE_READLINE_HISTORY_H => undef,
HAVE_READLINE_READLINE_H => undef,
HAVE_RL_COMPLETION_MATCHES => undef,
HAVE_RL_COMPLETION_SUPPRESS_QUOTE => undef,
HAVE_RL_FILENAME_COMPLETION_FUNCTION => undef,
Improve psql's tab completion for filenames. The Readline library contains a fair amount of knowledge about how to tab-complete filenames, but it turns out that that doesn't work too well unless we follow its expectation that we use its filename quoting hooks to quote and de-quote filenames. We were trying to do such quote handling within complete_from_files(), and that's still what we have to do if we're using libedit, which lacks those hooks. But for Readline, it works a lot better if we tell Readline that single-quote is a quoting character and then provide hooks that know the details of the quoting rules for SQL and psql meta-commands. Hence, resurrect the quoting hook functions that existed in the original version of tab-complete.c (and were disabled by commit f6689a328 because they "didn't work so well yet"), and whack on them until they do seem to work well. Notably, this fixes bug #16059 from Steven Winfield, who pointed out that the previous coding would strip quote marks from filenames in SQL COPY commands, even though they're syntactically necessary there. Now, we not only don't do that, but we'll add a quote mark when you tab-complete, even if you didn't type one. Getting this to work across a range of libedit versions (and, to a lesser extent, libreadline versions) was depressingly difficult. It will be interesting to see whether the new regression test cases pass everywhere in the buildfarm. Some future patch might try to handle quoted SQL identifiers with similar explicit quoting/dequoting logic, but that's for another day. Patch by me, reviewed by Peter Eisentraut. Discussion: https://postgr.es/m/16059-8836946734c02b84@postgresql.org
2020-01-23 17:07:12 +01:00
HAVE_RL_FILENAME_QUOTE_CHARACTERS => undef,
HAVE_RL_FILENAME_QUOTING_FUNCTION => undef,
HAVE_RL_RESET_SCREEN_SIZE => undef,
HAVE_RL_VARIABLE_BIND => undef,
HAVE_SECURITY_PAM_APPL_H => undef,
HAVE_SETPROCTITLE => undef,
HAVE_SETPROCTITLE_FAST => undef,
HAVE_SOCKLEN_T => 1,
HAVE_SPINLOCKS => 1,
libpq: Add sslcertmode option to control client certificates The sslcertmode option controls whether the server is allowed and/or required to request a certificate from the client. There are three modes: - "allow" is the default and follows the current behavior, where a configured client certificate is sent if the server requests one (via one of its default locations or sslcert). With the current implementation, will happen whenever TLS is negotiated. - "disable" causes the client to refuse to send a client certificate even if sslcert is configured or if a client certificate is available in one of its default locations. - "require" causes the client to fail if a client certificate is never sent and the server opens a connection anyway. This doesn't add any additional security, since there is no guarantee that the server is validating the certificate correctly, but it may helpful to troubleshoot more complicated TLS setups. sslcertmode=require requires SSL_CTX_set_cert_cb(), available since OpenSSL 1.0.2. Note that LibreSSL does not include it. Using a connection parameter different than require_auth has come up as the simplest design because certificate authentication does not rely directly on any of the AUTH_REQ_* codes, and one may want to require a certificate to be sent in combination of a given authentication method, like SCRAM-SHA-256. TAP tests are added in src/test/ssl/, some of them relying on sslinfo to check if a certificate has been set. These are compatible across all the versions of OpenSSL supported on HEAD (currently down to 1.0.1). Author: Jacob Champion Reviewed-by: Aleksander Alekseev, Peter Eisentraut, David G. Johnston, Michael Paquier Discussion: https://postgr.es/m/9e5a8ccddb8355ea9fa4b75a1e3a9edc88a70cd3.camel@vmware.com
2023-03-24 05:34:26 +01:00
HAVE_SSL_CTX_SET_CERT_CB => undef,
HAVE_STDBOOL_H => 1,
HAVE_STDINT_H => 1,
HAVE_STDLIB_H => 1,
HAVE_STRCHRNUL => undef,
HAVE_STRERROR_R => undef,
HAVE_STRINGS_H => undef,
HAVE_STRING_H => 1,
HAVE_STRLCAT => undef,
HAVE_STRLCPY => undef,
HAVE_STRNLEN => 1,
HAVE_STRSIGNAL => undef,
HAVE_STRUCT_OPTION => undef,
HAVE_STRUCT_SOCKADDR_SA_LEN => undef,
HAVE_STRUCT_TM_TM_ZONE => undef,
HAVE_SYNC_FILE_RANGE => undef,
HAVE_SYNCFS => undef,
HAVE_SYSLOG => undef,
HAVE_SYS_EPOLL_H => undef,
HAVE_SYS_EVENT_H => undef,
HAVE_SYS_PERSONALITY_H => undef,
HAVE_SYS_PRCTL_H => undef,
HAVE_SYS_PROCCTL_H => undef,
HAVE_SYS_SIGNALFD_H => undef,
HAVE_SYS_STAT_H => 1,
HAVE_SYS_TYPES_H => 1,
HAVE_SYS_UCRED_H => undef,
HAVE_TERMIOS_H => undef,
HAVE_TYPEOF => undef,
HAVE_UCRED_H => undef,
HAVE_UINT64 => undef,
HAVE_UINT8 => undef,
HAVE_UNION_SEMUN => undef,
HAVE_UNISTD_H => 1,
HAVE_USELOCALE => undef,
HAVE_UUID_BSD => undef,
HAVE_UUID_E2FS => undef,
HAVE_UUID_OSSP => undef,
HAVE_UUID_H => undef,
HAVE_UUID_UUID_H => undef,
HAVE_WCSTOMBS_L => undef,
Default to hidden visibility for extension libraries where possible Until now postgres built extension libraries with global visibility, i.e. exporting all symbols. On the one platform where that behavior is not natively available, namely windows, we emulate it by analyzing the input files to the shared library and exporting all the symbols therein. Not exporting all symbols is actually desirable, as it can improve loading speed, reduces the likelihood of symbol conflicts and can improve intra extension library function call performance. It also makes the non-windows builds more similar to windows builds. Additionally, with meson implementing the export-all-symbols behavior for windows, turns out to be more verbose than desirable. This patch adds support for hiding symbols by default and, to counteract that, explicit symbol visibility annotation for compilers that support __attribute__((visibility("default"))) and -fvisibility=hidden. That is expected to be most, if not all, compilers except msvc (for which we already support explicit symbol export annotations). Now that extension library symbols are explicitly exported, we don't need to export all symbols on windows anymore, hence remove that behavior from src/tools/msvc. The supporting code can't be removed, as we still need to export all symbols from the main postgres binary. Author: Andres Freund <andres@anarazel.de> Author: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/20211101020311.av6hphdl6xbjbuif@alap3.anarazel.de
2022-07-18 02:49:51 +02:00
HAVE_VISIBILITY_ATTRIBUTE => undef,
Fix handling of SCRAM-SHA-256's channel binding with RSA-PSS certificates OpenSSL 1.1.1 and newer versions have added support for RSA-PSS certificates, which requires the use of a specific routine in OpenSSL to determine which hash function to use when compiling it when using channel binding in SCRAM-SHA-256. X509_get_signature_nid(), that is the original routine the channel binding code has relied on, is not able to determine which hash algorithm to use for such certificates. However, X509_get_signature_info(), new to OpenSSL 1.1.1, is able to do it. This commit switches the channel binding logic to rely on X509_get_signature_info() over X509_get_signature_nid(), which would be the choice when building with 1.1.1 or newer. The error could have been triggered on the client or the server, hence libpq and the backend need to have their related code paths patched. Note that attempting to load an RSA-PSS certificate with OpenSSL 1.1.0 or older leads to a failure due to an unsupported algorithm. The discovery of relying on X509_get_signature_info() comes from Jacob, the tests have been written by Heikki (with few tweaks from me), while I have bundled the whole together while adding the bits needed for MSVC and meson. This issue exists since channel binding exists, so backpatch all the way down. Some tests are added in 15~, triggered if compiling with OpenSSL 1.1.1 or newer, where the certificate and key files can easily be generated for RSA-PSS. Reported-by: Gunnar "Nick" Bluth Author: Jacob Champion, Heikki Linnakangas Discussion: https://postgr.es/m/17760-b6c61e752ec07060@postgresql.org Backpatch-through: 11
2023-02-15 02:12:16 +01:00
HAVE_X509_GET_SIGNATURE_INFO => undef,
HAVE_X86_64_POPCNTQ => undef,
HAVE__BOOL => undef,
HAVE__BUILTIN_BSWAP16 => undef,
HAVE__BUILTIN_BSWAP32 => undef,
HAVE__BUILTIN_BSWAP64 => undef,
HAVE__BUILTIN_CLZ => undef,
HAVE__BUILTIN_CONSTANT_P => undef,
HAVE__BUILTIN_CTZ => undef,
HAVE__BUILTIN_FRAME_ADDRESS => undef,
HAVE__BUILTIN_OP_OVERFLOW => undef,
HAVE__BUILTIN_POPCOUNT => undef,
HAVE__BUILTIN_TYPES_COMPATIBLE_P => undef,
HAVE__BUILTIN_UNREACHABLE => undef,
HAVE__CONFIGTHREADLOCALE => 1,
HAVE__CPUID => 1,
HAVE__GET_CPUID => undef,
HAVE__STATIC_ASSERT => undef,
INT64_MODIFIER => qq{"ll"},
LOCALE_T_IN_XLOCALE => undef,
MAXIMUM_ALIGNOF => 8,
MEMSET_LOOP_LIMIT => 1024,
OPENSSL_API_COMPAT => $openssl_api_compat,
PACKAGE_BUGREPORT => qq{"$package_bugreport"},
PACKAGE_NAME => qq{"$package_name"},
PACKAGE_STRING => qq{"$package_name $package_version"},
PACKAGE_TARNAME => lc qq{"$package_name"},
PACKAGE_URL => qq{"$package_url"},
PACKAGE_VERSION => qq{"$package_version"},
PG_INT128_TYPE => undef,
PG_INT64_TYPE => 'long long int',
PG_KRB_SRVNAM => qq{"postgres"},
PG_MAJORVERSION => qq{"$majorver"},
PG_MAJORVERSION_NUM => $majorver,
PG_MINORVERSION_NUM => $minorver,
PG_PRINTF_ATTRIBUTE => undef,
PG_USE_STDBOOL => 1,
PG_VERSION => qq{"$package_version$extraver"},
PG_VERSION_NUM => sprintf("%d%04d", $majorver, $minorver),
PG_VERSION_STR =>
qq{"PostgreSQL $package_version$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"},
PROFILE_PID_DIR => undef,
PTHREAD_CREATE_JOINABLE => undef,
RELSEG_SIZE => (1024 / $self->{options}->{blocksize}) *
$self->{options}->{segsize} * 1024,
SIZEOF_BOOL => 1,
SIZEOF_LONG => 4,
SIZEOF_OFF_T => undef,
SIZEOF_SIZE_T => $bits / 8,
SIZEOF_VOID_P => $bits / 8,
STDC_HEADERS => 1,
STRERROR_R_INT => undef,
USE_ARMV8_CRC32C => undef,
USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK => undef,
USE_ASSERT_CHECKING => $self->{options}->{asserts} ? 1 : undef,
USE_BONJOUR => undef,
USE_BSD_AUTH => undef,
USE_ICU => $self->{options}->{icu} ? 1 : undef,
USE_LIBXML => undef,
USE_LIBXSLT => undef,
Allow configurable LZ4 TOAST compression. There is now a per-column COMPRESSION option which can be set to pglz (the default, and the only option in up until now) or lz4. Or, if you like, you can set the new default_toast_compression GUC to lz4, and then that will be the default for new table columns for which no value is specified. We don't have lz4 support in the PostgreSQL code, so to use lz4 compression, PostgreSQL must be built --with-lz4. In general, TOAST compression means compression of individual column values, not the whole tuple, and those values can either be compressed inline within the tuple or compressed and then stored externally in the TOAST table, so those properties also apply to this feature. Prior to this commit, a TOAST pointer has two unused bits as part of the va_extsize field, and a compessed datum has two unused bits as part of the va_rawsize field. These bits are unused because the length of a varlena is limited to 1GB; we now use them to indicate the compression type that was used. This means we only have bit space for 2 more built-in compresison types, but we could work around that problem, if necessary, by introducing a new vartag_external value for any further types we end up wanting to add. Hopefully, it won't be too important to offer a wide selection of algorithms here, since each one we add not only takes more coding but also adds a build dependency for every packager. Nevertheless, it seems worth doing at least this much, because LZ4 gets better compression than PGLZ with less CPU usage. It's possible for LZ4-compressed datums to leak into composite type values stored on disk, just as it is for PGLZ. It's also possible for LZ4-compressed attributes to be copied into a different table via SQL commands such as CREATE TABLE AS or INSERT .. SELECT. It would be expensive to force such values to be decompressed, so PostgreSQL has never done so. For the same reasons, we also don't force recompression of already-compressed values even if the target table prefers a different compression method than was used for the source data. These architectural decisions are perhaps arguable but revisiting them is well beyond the scope of what seemed possible to do as part of this project. However, it's relatively cheap to recompress as part of VACUUM FULL or CLUSTER, so this commit adjusts those commands to do so, if the configured compression method of the table happens not to match what was used for some column value stored therein. Dilip Kumar. The original patches on which this work was based were written by Ildus Kurbangaliev, and those were patches were based on even earlier work by Nikita Glukhov, but the design has since changed very substantially, since allow a potentially large number of compression methods that could be added and dropped on a running system proved too problematic given some of the architectural issues mentioned above; the choice of which specific compression method to add first is now different; and a lot of the code has been heavily refactored. More recently, Justin Przyby helped quite a bit with testing and reviewing and this version also includes some code contributions from him. Other design input and review from Tomas Vondra, Álvaro Herrera, Andres Freund, Oleg Bartunov, Alexander Korotkov, and me. Discussion: http://postgr.es/m/20170907194236.4cefce96%40wp.localdomain Discussion: http://postgr.es/m/CAFiTN-uUpX3ck%3DK0mLEk-G_kUQY%3DSNOTeqdaNRR9FMdQrHKebw%40mail.gmail.com
2021-03-19 20:10:38 +01:00
USE_LZ4 => undef,
USE_LDAP => $self->{options}->{ldap} ? 1 : undef,
USE_LLVM => undef,
USE_LOONGARCH_CRC32C => undef,
USE_NAMED_POSIX_SEMAPHORES => undef,
USE_OPENSSL => undef,
USE_PAM => undef,
USE_SLICING_BY_8_CRC32C => undef,
USE_SSE42_CRC32C => undef,
USE_SSE42_CRC32C_WITH_RUNTIME_CHECK => 1,
USE_SYSTEMD => undef,
USE_SYSV_SEMAPHORES => undef,
USE_SYSV_SHARED_MEMORY => undef,
USE_UNNAMED_POSIX_SEMAPHORES => undef,
USE_WIN32_SEMAPHORES => 1,
USE_WIN32_SHARED_MEMORY => 1,
USE_ZSTD => undef,
WCSTOMBS_L_IN_XLOCALE => undef,
WORDS_BIGENDIAN => undef,
XLOG_BLCKSZ => 1024 * $self->{options}->{wal_blocksize},
_FILE_OFFSET_BITS => undef,
_LARGEFILE_SOURCE => undef,
_LARGE_FILES => undef,
inline => '__inline',
pg_restrict => '__restrict',
# not defined, because it'd conflict with __declspec(restrict)
restrict => undef,
typeof => undef,);
if ($self->{options}->{uuid})
{
$define{HAVE_UUID_OSSP} = 1;
$define{HAVE_UUID_H} = 1;
}
if ($self->{options}->{xml})
{
$define{HAVE_LIBXML2} = 1;
$define{USE_LIBXML} = 1;
}
if ($self->{options}->{xslt})
{
$define{HAVE_LIBXSLT} = 1;
$define{USE_LIBXSLT} = 1;
}
if ($self->{options}->{lz4})
{
$define{HAVE_LIBLZ4} = 1;
$define{USE_LZ4} = 1;
}
if ($self->{options}->{zstd})
{
$define{HAVE_LIBZSTD} = 1;
$define{USE_ZSTD} = 1;
}
if ($self->{options}->{openssl})
{
$define{USE_OPENSSL} = 1;
$define{HAVE_SSL_CTX_SET_CERT_CB} = 1;
my ($digit1, $digit2, $digit3) = $self->GetOpenSSLVersion();
Fix handling of SCRAM-SHA-256's channel binding with RSA-PSS certificates OpenSSL 1.1.1 and newer versions have added support for RSA-PSS certificates, which requires the use of a specific routine in OpenSSL to determine which hash function to use when compiling it when using channel binding in SCRAM-SHA-256. X509_get_signature_nid(), that is the original routine the channel binding code has relied on, is not able to determine which hash algorithm to use for such certificates. However, X509_get_signature_info(), new to OpenSSL 1.1.1, is able to do it. This commit switches the channel binding logic to rely on X509_get_signature_info() over X509_get_signature_nid(), which would be the choice when building with 1.1.1 or newer. The error could have been triggered on the client or the server, hence libpq and the backend need to have their related code paths patched. Note that attempting to load an RSA-PSS certificate with OpenSSL 1.1.0 or older leads to a failure due to an unsupported algorithm. The discovery of relying on X509_get_signature_info() comes from Jacob, the tests have been written by Heikki (with few tweaks from me), while I have bundled the whole together while adding the bits needed for MSVC and meson. This issue exists since channel binding exists, so backpatch all the way down. Some tests are added in 15~, triggered if compiling with OpenSSL 1.1.1 or newer, where the certificate and key files can easily be generated for RSA-PSS. Reported-by: Gunnar "Nick" Bluth Author: Jacob Champion, Heikki Linnakangas Discussion: https://postgr.es/m/17760-b6c61e752ec07060@postgresql.org Backpatch-through: 11
2023-02-15 02:12:16 +01:00
# Symbols needed with OpenSSL 1.1.1 and above.
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '1'))
{
$define{HAVE_X509_GET_SIGNATURE_INFO} = 1;
}
# Symbols needed with OpenSSL 1.1.0 and above.
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0'))
{
$define{HAVE_ASN1_STRING_GET0_DATA} = 1;
$define{HAVE_BIO_METH_NEW} = 1;
$define{HAVE_HMAC_CTX_FREE} = 1;
$define{HAVE_HMAC_CTX_NEW} = 1;
$define{HAVE_OPENSSL_INIT_SSL} = 1;
}
}
$self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1);
$self->GenerateConfigHeader('src/include/pg_config_ext.h', \%define, 0);
$self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h',
\%define, 0);
$self->GenerateDefFile(
"src/interfaces/libpq/libpqdll.def",
"src/interfaces/libpq/exports.txt",
"LIBPQ");
$self->GenerateDefFile(
"src/interfaces/ecpg/ecpglib/ecpglib.def",
"src/interfaces/ecpg/ecpglib/exports.txt",
"LIBECPG");
$self->GenerateDefFile(
"src/interfaces/ecpg/compatlib/compatlib.def",
"src/interfaces/ecpg/compatlib/exports.txt",
"LIBECPG_COMPAT");
$self->GenerateDefFile(
"src/interfaces/ecpg/pgtypeslib/pgtypeslib.def",
"src/interfaces/ecpg/pgtypeslib/exports.txt",
"LIBPGTYPES");
Replace our traditional initial-catalog-data format with a better design. Historically, the initial catalog data to be installed during bootstrap has been written in DATA() lines in the catalog header files. This had lots of disadvantages: the format was badly underdocumented, it was very difficult to edit the data in any mechanized way, and due to the lack of any abstraction the data was verbose, hard to read/understand, and easy to get wrong. Hence, move this data into separate ".dat" files and represent it in a way that can easily be read and rewritten by Perl scripts. The new format is essentially "key => value" for each column; while it's a bit repetitive, explicit labeling of each value makes the data far more readable and less error-prone. Provide a way to abbreviate entries by omitting field values that match a specified default value for their column. This allows removal of a large amount of repetitive boilerplate and also lowers the barrier to adding new columns. Also teach genbki.pl how to translate symbolic OID references into numeric OIDs for more cases than just "regproc"-like pg_proc references. It can now do that for regprocedure-like references (thus solving the problem that regproc is ambiguous for overloaded functions), operators, types, opfamilies, opclasses, and access methods. Use this to turn nearly all OID cross-references in the initial data into symbolic form. This represents a very large step forward in readability and error resistance of the initial catalog data. It should also reduce the difficulty of renumbering OID assignments in uncommitted patches. Also, solve the longstanding problem that frontend code that would like to use OID macros and other information from the catalog headers often had difficulty with backend-only code in the headers. To do this, arrange for all generated macros, plus such other declarations as we deem fit, to be placed in "derived" header files that are safe for frontend inclusion. (Once clients migrate to using these pg_*_d.h headers, it will be possible to get rid of the pg_*_fn.h headers, which only exist to quarantine code away from clients. That is left for follow-on patches, however.) The now-automatically-generated macros include the Anum_xxx and Natts_xxx constants that we used to have to update by hand when adding or removing catalog columns. Replace the former manual method of generating OID macros for pg_type entries with an automatic method, ensuring that all built-in types have OID macros. (But note that this patch does not change the way that OID macros for pg_proc entries are built and used. It's not clear that making that match the other catalogs would be worth extra code churn.) Add SGML documentation explaining what the new data format is and how to work with it. Despite being a very large change in the catalog headers, there is no catversion bump here, because postgres.bki and related output files haven't changed at all. John Naylor, based on ideas from various people; review and minor additional coding by me; previous review by Alvaro Herrera Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
chdir('src/backend/utils');
my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
if ( IsNewer('fmgr-stamp', 'Gen_fmgrtab.pl')
|| IsNewer('fmgr-stamp', '../catalog/Catalog.pm')
|| IsNewer('fmgr-stamp', $pg_proc_dat)
|| IsNewer('fmgr-stamp', '../../../src/include/access/transam.h'))
{
system(
"perl -I ../catalog Gen_fmgrtab.pl --include-path ../../../src/include/ $pg_proc_dat"
Replace our traditional initial-catalog-data format with a better design. Historically, the initial catalog data to be installed during bootstrap has been written in DATA() lines in the catalog header files. This had lots of disadvantages: the format was badly underdocumented, it was very difficult to edit the data in any mechanized way, and due to the lack of any abstraction the data was verbose, hard to read/understand, and easy to get wrong. Hence, move this data into separate ".dat" files and represent it in a way that can easily be read and rewritten by Perl scripts. The new format is essentially "key => value" for each column; while it's a bit repetitive, explicit labeling of each value makes the data far more readable and less error-prone. Provide a way to abbreviate entries by omitting field values that match a specified default value for their column. This allows removal of a large amount of repetitive boilerplate and also lowers the barrier to adding new columns. Also teach genbki.pl how to translate symbolic OID references into numeric OIDs for more cases than just "regproc"-like pg_proc references. It can now do that for regprocedure-like references (thus solving the problem that regproc is ambiguous for overloaded functions), operators, types, opfamilies, opclasses, and access methods. Use this to turn nearly all OID cross-references in the initial data into symbolic form. This represents a very large step forward in readability and error resistance of the initial catalog data. It should also reduce the difficulty of renumbering OID assignments in uncommitted patches. Also, solve the longstanding problem that frontend code that would like to use OID macros and other information from the catalog headers often had difficulty with backend-only code in the headers. To do this, arrange for all generated macros, plus such other declarations as we deem fit, to be placed in "derived" header files that are safe for frontend inclusion. (Once clients migrate to using these pg_*_d.h headers, it will be possible to get rid of the pg_*_fn.h headers, which only exist to quarantine code away from clients. That is left for follow-on patches, however.) The now-automatically-generated macros include the Anum_xxx and Natts_xxx constants that we used to have to update by hand when adding or removing catalog columns. Replace the former manual method of generating OID macros for pg_type entries with an automatic method, ensuring that all built-in types have OID macros. (But note that this patch does not change the way that OID macros for pg_proc entries are built and used. It's not clear that making that match the other catalogs would be worth extra code churn.) Add SGML documentation explaining what the new data format is and how to work with it. Despite being a very large change in the catalog headers, there is no catversion bump here, because postgres.bki and related output files haven't changed at all. John Naylor, based on ideas from various people; review and minor additional coding by me; previous review by Alvaro Herrera Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
);
open(my $f, '>', 'fmgr-stamp')
|| confess "Could not touch fmgr-stamp";
close($f);
}
Replace our traditional initial-catalog-data format with a better design. Historically, the initial catalog data to be installed during bootstrap has been written in DATA() lines in the catalog header files. This had lots of disadvantages: the format was badly underdocumented, it was very difficult to edit the data in any mechanized way, and due to the lack of any abstraction the data was verbose, hard to read/understand, and easy to get wrong. Hence, move this data into separate ".dat" files and represent it in a way that can easily be read and rewritten by Perl scripts. The new format is essentially "key => value" for each column; while it's a bit repetitive, explicit labeling of each value makes the data far more readable and less error-prone. Provide a way to abbreviate entries by omitting field values that match a specified default value for their column. This allows removal of a large amount of repetitive boilerplate and also lowers the barrier to adding new columns. Also teach genbki.pl how to translate symbolic OID references into numeric OIDs for more cases than just "regproc"-like pg_proc references. It can now do that for regprocedure-like references (thus solving the problem that regproc is ambiguous for overloaded functions), operators, types, opfamilies, opclasses, and access methods. Use this to turn nearly all OID cross-references in the initial data into symbolic form. This represents a very large step forward in readability and error resistance of the initial catalog data. It should also reduce the difficulty of renumbering OID assignments in uncommitted patches. Also, solve the longstanding problem that frontend code that would like to use OID macros and other information from the catalog headers often had difficulty with backend-only code in the headers. To do this, arrange for all generated macros, plus such other declarations as we deem fit, to be placed in "derived" header files that are safe for frontend inclusion. (Once clients migrate to using these pg_*_d.h headers, it will be possible to get rid of the pg_*_fn.h headers, which only exist to quarantine code away from clients. That is left for follow-on patches, however.) The now-automatically-generated macros include the Anum_xxx and Natts_xxx constants that we used to have to update by hand when adding or removing catalog columns. Replace the former manual method of generating OID macros for pg_type entries with an automatic method, ensuring that all built-in types have OID macros. (But note that this patch does not change the way that OID macros for pg_proc entries are built and used. It's not clear that making that match the other catalogs would be worth extra code churn.) Add SGML documentation explaining what the new data format is and how to work with it. Despite being a very large change in the catalog headers, there is no catversion bump here, because postgres.bki and related output files haven't changed at all. John Naylor, based on ideas from various people; review and minor additional coding by me; previous review by Alvaro Herrera Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
chdir('../../..');
if (IsNewer(
'src/include/utils/fmgroids.h',
'src/backend/utils/fmgroids.h'))
{
copyFile('src/backend/utils/fmgroids.h',
'src/include/utils/fmgroids.h');
}
if (IsNewer(
'src/include/utils/fmgrprotos.h',
'src/backend/utils/fmgrprotos.h'))
{
copyFile(
'src/backend/utils/fmgrprotos.h',
'src/include/utils/fmgrprotos.h');
}
if (IsNewer(
'src/include/storage/lwlocknames.h',
'src/backend/storage/lmgr/lwlocknames.txt'))
{
print "Generating lwlocknames.c and lwlocknames.h...\n";
my $lmgr = 'src/backend/storage/lmgr';
system(
"perl $lmgr/generate-lwlocknames.pl --outdir $lmgr $lmgr/lwlocknames.txt"
);
}
if (IsNewer(
'src/include/storage/lwlocknames.h',
'src/backend/storage/lmgr/lwlocknames.h'))
{
copyFile(
'src/backend/storage/lmgr/lwlocknames.h',
'src/include/storage/lwlocknames.h');
}
Generate automatically code and documentation related to wait events The documentation and the code is generated automatically from a new file called wait_event_names.txt, formatted in sections dedicated to each wait event class (Timeout, Lock, IO, etc.) with three tab-separated fields: - C symbol in enums - Format in the system views - Description in the docs Using this approach has several advantages, as we have proved to be rather bad in maintaining this area of the tree across the years: - The order of each item in the documentation and the code, which should be alphabetical, has become incorrect multiple times, and the script generating the code and documentation has a few rules to enforce that, making the maintenance a no-brainer. - Some wait events were added to the code, but not documented, so this cannot be missed now. - The order of the tables for each wait event class is enforced in the documentation (the input .txt file does so as well for clarity, though this is not mandatory). - Less code, shaving 1.2k lines from the tree, with 1/3 of the savings coming from the code, the rest from the documentation. The wait event types "Lock" and "LWLock" still have their own code path for their code, hence only the documentation is created for them. These classes are listed with a special marker called WAIT_EVENT_DOCONLY in the input file. Adding a new wait event now requires only an update of wait_event_names.txt, with "Lock" and "LWLock" treated as exceptions. This commit has been tested with configure/Makefile, the CI and VPATH build. clean, distclean and maintainer-clean were working fine. Author: Bertrand Drouvot, Michael Paquier Discussion: https://postgr.es/m/77a86b3a-c4a8-5f5d-69b9-d70bbf2e9b98@gmail.com
2023-07-05 03:53:11 +02:00
if (IsNewer(
'src/include/utils/wait_event_types.h',
'src/backend/utils/activity/wait_event_names.txt'))
{
print
"Generating pgstat_wait_event.c, wait_event_types.h and wait_event_funcs_data.c...\n";
Generate automatically code and documentation related to wait events The documentation and the code is generated automatically from a new file called wait_event_names.txt, formatted in sections dedicated to each wait event class (Timeout, Lock, IO, etc.) with three tab-separated fields: - C symbol in enums - Format in the system views - Description in the docs Using this approach has several advantages, as we have proved to be rather bad in maintaining this area of the tree across the years: - The order of each item in the documentation and the code, which should be alphabetical, has become incorrect multiple times, and the script generating the code and documentation has a few rules to enforce that, making the maintenance a no-brainer. - Some wait events were added to the code, but not documented, so this cannot be missed now. - The order of the tables for each wait event class is enforced in the documentation (the input .txt file does so as well for clarity, though this is not mandatory). - Less code, shaving 1.2k lines from the tree, with 1/3 of the savings coming from the code, the rest from the documentation. The wait event types "Lock" and "LWLock" still have their own code path for their code, hence only the documentation is created for them. These classes are listed with a special marker called WAIT_EVENT_DOCONLY in the input file. Adding a new wait event now requires only an update of wait_event_names.txt, with "Lock" and "LWLock" treated as exceptions. This commit has been tested with configure/Makefile, the CI and VPATH build. clean, distclean and maintainer-clean were working fine. Author: Bertrand Drouvot, Michael Paquier Discussion: https://postgr.es/m/77a86b3a-c4a8-5f5d-69b9-d70bbf2e9b98@gmail.com
2023-07-05 03:53:11 +02:00
my $activ = 'src/backend/utils/activity';
system(
"perl $activ/generate-wait_event_types.pl --outdir $activ --code $activ/wait_event_names.txt"
);
}
if (IsNewer(
'src/include/utils/wait_event_types.h',
'src/backend/utils/activity/wait_event_types.h'))
{
copyFile(
'src/backend/utils/activity/wait_event_types.h',
'src/include/utils/wait_event_types.h');
}
if (IsNewer('src/include/utils/probes.h', 'src/backend/utils/probes.d'))
{
print "Generating probes.h...\n";
system(
'perl -n src/backend/utils/Gen_dummy_probes.pl src/backend/utils/probes.d > src/include/utils/probes.h'
);
}
2011-07-12 16:25:08 +02:00
if ($self->{options}->{python}
&& IsNewer(
'src/pl/plpython/spiexceptions.h',
'src/backend/utils/errcodes.txt'))
{
print "Generating spiexceptions.h...\n";
2011-07-12 16:25:08 +02:00
system(
'perl src/pl/plpython/generate-spiexceptions.pl src/backend/utils/errcodes.txt > src/pl/plpython/spiexceptions.h'
2011-07-12 16:25:08 +02:00
);
}
if (IsNewer(
'src/include/utils/errcodes.h',
'src/backend/utils/errcodes.txt'))
{
print "Generating errcodes.h...\n";
2011-07-12 16:25:08 +02:00
system(
'perl src/backend/utils/generate-errcodes.pl --outfile src/backend/utils/errcodes.h src/backend/utils/errcodes.txt'
2011-07-12 16:25:08 +02:00
);
copyFile('src/backend/utils/errcodes.h',
'src/include/utils/errcodes.h');
}
if (IsNewer(
'src/pl/plpgsql/src/plerrcodes.h',
'src/backend/utils/errcodes.txt'))
{
print "Generating plerrcodes.h...\n";
2011-07-12 16:25:08 +02:00
system(
'perl src/pl/plpgsql/src/generate-plerrcodes.pl src/backend/utils/errcodes.txt > src/pl/plpgsql/src/plerrcodes.h'
2011-07-12 16:25:08 +02:00
);
}
if ($self->{options}->{tcl}
&& IsNewer(
'src/pl/tcl/pltclerrcodes.h', 'src/backend/utils/errcodes.txt'))
{
print "Generating pltclerrcodes.h...\n";
system(
'perl src/pl/tcl/generate-pltclerrcodes.pl src/backend/utils/errcodes.txt > src/pl/tcl/pltclerrcodes.h'
);
}
if (IsNewer(
'contrib/fuzzystrmatch/daitch_mokotoff.h',
'contrib/fuzzystrmatch/daitch_mokotoff_header.pl'))
{
print "Generating daitch_mokotoff.h...\n";
system( 'perl contrib/fuzzystrmatch/daitch_mokotoff_header.pl '
. 'contrib/fuzzystrmatch/daitch_mokotoff.h');
}
if (IsNewer('src/bin/psql/sql_help.h', 'src/bin/psql/create_help.pl'))
{
print "Generating sql_help.h...\n";
my $psql = 'src/bin/psql';
system(
"perl $psql/create_help.pl --docdir doc/src/sgml/ref --outdir $psql --basename sql_help"
);
}
Replace the data structure used for keyword lookup. Previously, ScanKeywordLookup was passed an array of string pointers. This had some performance deficiencies: the strings themselves might be scattered all over the place depending on the compiler (and some quick checking shows that at least with gcc-on-Linux, they indeed weren't reliably close together). That led to very cache-unfriendly behavior as the binary search touched strings in many different pages. Also, depending on the platform, the string pointers might need to be adjusted at program start, so that they couldn't be simple constant data. And the ScanKeyword struct had been designed with an eye to 32-bit machines originally; on 64-bit it requires 16 bytes per keyword, making it even more cache-unfriendly. Redesign so that the keyword strings themselves are allocated consecutively (as part of one big char-string constant), thereby eliminating the touch-lots-of-unrelated-pages syndrome. And get rid of the ScanKeyword array in favor of three separate arrays: uint16 offsets into the keyword array, uint16 token codes, and uint8 keyword categories. That reduces the overhead per keyword to 5 bytes instead of 16 (even less in programs that only need one of the token codes and categories); moreover, the binary search only touches the offsets array, further reducing its cache footprint. This also lets us put the token codes somewhere else than the keyword strings are, which avoids some unpleasant build dependencies. While we're at it, wrap the data used by ScanKeywordLookup into a struct that can be treated as an opaque type by most callers. That doesn't change things much right now, but it will make it less painful to switch to a hash-based lookup method, as is being discussed in the mailing list thread. Most of the change here is associated with adding a generator script that can build the new data structure from the same list-of-PG_KEYWORD header representation we used before. The PG_KEYWORD lists that plpgsql and ecpg used to embed in their scanner .c files have to be moved into headers, and the Makefiles have to be taught to invoke the generator script. This work is also necessary if we're to consider hash-based lookup, since the generator script is what would be responsible for constructing a hash table. Aside from saving a few kilobytes in each program that includes the keyword table, this seems to speed up raw parsing (flex+bison) by a few percent. So it's worth doing even as it stands, though we think we can gain even more with a follow-on patch to switch to hash-based lookup. John Naylor, with further hacking by me Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
if (IsNewer('src/common/kwlist_d.h', 'src/include/parser/kwlist.h'))
{
print "Generating kwlist_d.h...\n";
system(
'perl -I src/tools src/tools/gen_keywordlist.pl --extern -o src/common src/include/parser/kwlist.h'
);
Replace the data structure used for keyword lookup. Previously, ScanKeywordLookup was passed an array of string pointers. This had some performance deficiencies: the strings themselves might be scattered all over the place depending on the compiler (and some quick checking shows that at least with gcc-on-Linux, they indeed weren't reliably close together). That led to very cache-unfriendly behavior as the binary search touched strings in many different pages. Also, depending on the platform, the string pointers might need to be adjusted at program start, so that they couldn't be simple constant data. And the ScanKeyword struct had been designed with an eye to 32-bit machines originally; on 64-bit it requires 16 bytes per keyword, making it even more cache-unfriendly. Redesign so that the keyword strings themselves are allocated consecutively (as part of one big char-string constant), thereby eliminating the touch-lots-of-unrelated-pages syndrome. And get rid of the ScanKeyword array in favor of three separate arrays: uint16 offsets into the keyword array, uint16 token codes, and uint8 keyword categories. That reduces the overhead per keyword to 5 bytes instead of 16 (even less in programs that only need one of the token codes and categories); moreover, the binary search only touches the offsets array, further reducing its cache footprint. This also lets us put the token codes somewhere else than the keyword strings are, which avoids some unpleasant build dependencies. While we're at it, wrap the data used by ScanKeywordLookup into a struct that can be treated as an opaque type by most callers. That doesn't change things much right now, but it will make it less painful to switch to a hash-based lookup method, as is being discussed in the mailing list thread. Most of the change here is associated with adding a generator script that can build the new data structure from the same list-of-PG_KEYWORD header representation we used before. The PG_KEYWORD lists that plpgsql and ecpg used to embed in their scanner .c files have to be moved into headers, and the Makefiles have to be taught to invoke the generator script. This work is also necessary if we're to consider hash-based lookup, since the generator script is what would be responsible for constructing a hash table. Aside from saving a few kilobytes in each program that includes the keyword table, this seems to speed up raw parsing (flex+bison) by a few percent. So it's worth doing even as it stands, though we think we can gain even more with a follow-on patch to switch to hash-based lookup. John Naylor, with further hacking by me Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
}
if (IsNewer(
'src/pl/plpgsql/src/pl_reserved_kwlist_d.h',
'src/pl/plpgsql/src/pl_reserved_kwlist.h')
|| IsNewer(
'src/pl/plpgsql/src/pl_unreserved_kwlist_d.h',
'src/pl/plpgsql/src/pl_unreserved_kwlist.h'))
{
print
"Generating pl_reserved_kwlist_d.h and pl_unreserved_kwlist_d.h...\n";
chdir('src/pl/plpgsql/src');
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ReservedPLKeywords pl_reserved_kwlist.h'
);
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname UnreservedPLKeywords pl_unreserved_kwlist.h'
);
Replace the data structure used for keyword lookup. Previously, ScanKeywordLookup was passed an array of string pointers. This had some performance deficiencies: the strings themselves might be scattered all over the place depending on the compiler (and some quick checking shows that at least with gcc-on-Linux, they indeed weren't reliably close together). That led to very cache-unfriendly behavior as the binary search touched strings in many different pages. Also, depending on the platform, the string pointers might need to be adjusted at program start, so that they couldn't be simple constant data. And the ScanKeyword struct had been designed with an eye to 32-bit machines originally; on 64-bit it requires 16 bytes per keyword, making it even more cache-unfriendly. Redesign so that the keyword strings themselves are allocated consecutively (as part of one big char-string constant), thereby eliminating the touch-lots-of-unrelated-pages syndrome. And get rid of the ScanKeyword array in favor of three separate arrays: uint16 offsets into the keyword array, uint16 token codes, and uint8 keyword categories. That reduces the overhead per keyword to 5 bytes instead of 16 (even less in programs that only need one of the token codes and categories); moreover, the binary search only touches the offsets array, further reducing its cache footprint. This also lets us put the token codes somewhere else than the keyword strings are, which avoids some unpleasant build dependencies. While we're at it, wrap the data used by ScanKeywordLookup into a struct that can be treated as an opaque type by most callers. That doesn't change things much right now, but it will make it less painful to switch to a hash-based lookup method, as is being discussed in the mailing list thread. Most of the change here is associated with adding a generator script that can build the new data structure from the same list-of-PG_KEYWORD header representation we used before. The PG_KEYWORD lists that plpgsql and ecpg used to embed in their scanner .c files have to be moved into headers, and the Makefiles have to be taught to invoke the generator script. This work is also necessary if we're to consider hash-based lookup, since the generator script is what would be responsible for constructing a hash table. Aside from saving a few kilobytes in each program that includes the keyword table, this seems to speed up raw parsing (flex+bison) by a few percent. So it's worth doing even as it stands, though we think we can gain even more with a follow-on patch to switch to hash-based lookup. John Naylor, with further hacking by me Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
chdir('../../../..');
}
if (IsNewer(
'src/interfaces/ecpg/preproc/c_kwlist_d.h',
'src/interfaces/ecpg/preproc/c_kwlist.h')
|| IsNewer(
'src/interfaces/ecpg/preproc/ecpg_kwlist_d.h',
'src/interfaces/ecpg/preproc/ecpg_kwlist.h'))
{
print "Generating c_kwlist_d.h and ecpg_kwlist_d.h...\n";
chdir('src/interfaces/ecpg/preproc');
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanCKeywords --no-case-fold c_kwlist.h'
);
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanECPGKeywords ecpg_kwlist.h'
);
Replace the data structure used for keyword lookup. Previously, ScanKeywordLookup was passed an array of string pointers. This had some performance deficiencies: the strings themselves might be scattered all over the place depending on the compiler (and some quick checking shows that at least with gcc-on-Linux, they indeed weren't reliably close together). That led to very cache-unfriendly behavior as the binary search touched strings in many different pages. Also, depending on the platform, the string pointers might need to be adjusted at program start, so that they couldn't be simple constant data. And the ScanKeyword struct had been designed with an eye to 32-bit machines originally; on 64-bit it requires 16 bytes per keyword, making it even more cache-unfriendly. Redesign so that the keyword strings themselves are allocated consecutively (as part of one big char-string constant), thereby eliminating the touch-lots-of-unrelated-pages syndrome. And get rid of the ScanKeyword array in favor of three separate arrays: uint16 offsets into the keyword array, uint16 token codes, and uint8 keyword categories. That reduces the overhead per keyword to 5 bytes instead of 16 (even less in programs that only need one of the token codes and categories); moreover, the binary search only touches the offsets array, further reducing its cache footprint. This also lets us put the token codes somewhere else than the keyword strings are, which avoids some unpleasant build dependencies. While we're at it, wrap the data used by ScanKeywordLookup into a struct that can be treated as an opaque type by most callers. That doesn't change things much right now, but it will make it less painful to switch to a hash-based lookup method, as is being discussed in the mailing list thread. Most of the change here is associated with adding a generator script that can build the new data structure from the same list-of-PG_KEYWORD header representation we used before. The PG_KEYWORD lists that plpgsql and ecpg used to embed in their scanner .c files have to be moved into headers, and the Makefiles have to be taught to invoke the generator script. This work is also necessary if we're to consider hash-based lookup, since the generator script is what would be responsible for constructing a hash table. Aside from saving a few kilobytes in each program that includes the keyword table, this seems to speed up raw parsing (flex+bison) by a few percent. So it's worth doing even as it stands, though we think we can gain even more with a follow-on patch to switch to hash-based lookup. John Naylor, with further hacking by me Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
2019-01-06 23:02:57 +01:00
chdir('../../../..');
}
if (IsNewer(
'src/interfaces/ecpg/preproc/preproc.y',
'src/backend/parser/gram.y'))
{
print "Generating preproc.y...\n";
my $ecpg = 'src/interfaces/ecpg';
system(
"perl $ecpg/preproc/parse.pl --srcdir $ecpg/preproc --parser src/backend/parser/gram.y --output $ecpg/preproc/preproc.y"
);
}
unless (-f "src/port/pg_config_paths.h")
{
print "Generating pg_config_paths.h...\n";
open(my $o, '>', 'src/port/pg_config_paths.h')
|| confess "Could not open pg_config_paths.h";
print $o <<EOF;
#define PGBINDIR "/bin"
#define PGSHAREDIR "/share"
#define SYSCONFDIR "/etc"
#define INCLUDEDIR "/include"
#define PKGINCLUDEDIR "/include"
#define INCLUDEDIRSERVER "/include/server"
#define LIBDIR "/lib"
#define PKGLIBDIR "/lib"
#define LOCALEDIR "/share/locale"
#define DOCDIR "/doc"
#define HTMLDIR "/doc"
#define MANDIR "/man"
2006-09-04 23:30:40 +02:00
EOF
close($o);
}
my $mf = Project::read_file('src/backend/catalog/Makefile');
$mf =~ s{\\\r?\n}{}g;
Replace our traditional initial-catalog-data format with a better design. Historically, the initial catalog data to be installed during bootstrap has been written in DATA() lines in the catalog header files. This had lots of disadvantages: the format was badly underdocumented, it was very difficult to edit the data in any mechanized way, and due to the lack of any abstraction the data was verbose, hard to read/understand, and easy to get wrong. Hence, move this data into separate ".dat" files and represent it in a way that can easily be read and rewritten by Perl scripts. The new format is essentially "key => value" for each column; while it's a bit repetitive, explicit labeling of each value makes the data far more readable and less error-prone. Provide a way to abbreviate entries by omitting field values that match a specified default value for their column. This allows removal of a large amount of repetitive boilerplate and also lowers the barrier to adding new columns. Also teach genbki.pl how to translate symbolic OID references into numeric OIDs for more cases than just "regproc"-like pg_proc references. It can now do that for regprocedure-like references (thus solving the problem that regproc is ambiguous for overloaded functions), operators, types, opfamilies, opclasses, and access methods. Use this to turn nearly all OID cross-references in the initial data into symbolic form. This represents a very large step forward in readability and error resistance of the initial catalog data. It should also reduce the difficulty of renumbering OID assignments in uncommitted patches. Also, solve the longstanding problem that frontend code that would like to use OID macros and other information from the catalog headers often had difficulty with backend-only code in the headers. To do this, arrange for all generated macros, plus such other declarations as we deem fit, to be placed in "derived" header files that are safe for frontend inclusion. (Once clients migrate to using these pg_*_d.h headers, it will be possible to get rid of the pg_*_fn.h headers, which only exist to quarantine code away from clients. That is left for follow-on patches, however.) The now-automatically-generated macros include the Anum_xxx and Natts_xxx constants that we used to have to update by hand when adding or removing catalog columns. Replace the former manual method of generating OID macros for pg_type entries with an automatic method, ensuring that all built-in types have OID macros. (But note that this patch does not change the way that OID macros for pg_proc entries are built and used. It's not clear that making that match the other catalogs would be worth extra code churn.) Add SGML documentation explaining what the new data format is and how to work with it. Despite being a very large change in the catalog headers, there is no catversion bump here, because postgres.bki and related output files haven't changed at all. John Naylor, based on ideas from various people; review and minor additional coding by me; previous review by Alvaro Herrera Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
$mf =~ /^CATALOG_HEADERS\s*:?=(.*)$/gm
|| croak "Could not find CATALOG_HEADERS in Makefile\n";
my @bki_srcs = split /\s+/, $1;
$mf =~ /^POSTGRES_BKI_DATA\s*:?=[^,]+,(.*)\)$/gm
|| croak "Could not find POSTGRES_BKI_DATA in Makefile\n";
my @bki_data = split /\s+/, $1;
my $need_genbki = 0;
Replace our traditional initial-catalog-data format with a better design. Historically, the initial catalog data to be installed during bootstrap has been written in DATA() lines in the catalog header files. This had lots of disadvantages: the format was badly underdocumented, it was very difficult to edit the data in any mechanized way, and due to the lack of any abstraction the data was verbose, hard to read/understand, and easy to get wrong. Hence, move this data into separate ".dat" files and represent it in a way that can easily be read and rewritten by Perl scripts. The new format is essentially "key => value" for each column; while it's a bit repetitive, explicit labeling of each value makes the data far more readable and less error-prone. Provide a way to abbreviate entries by omitting field values that match a specified default value for their column. This allows removal of a large amount of repetitive boilerplate and also lowers the barrier to adding new columns. Also teach genbki.pl how to translate symbolic OID references into numeric OIDs for more cases than just "regproc"-like pg_proc references. It can now do that for regprocedure-like references (thus solving the problem that regproc is ambiguous for overloaded functions), operators, types, opfamilies, opclasses, and access methods. Use this to turn nearly all OID cross-references in the initial data into symbolic form. This represents a very large step forward in readability and error resistance of the initial catalog data. It should also reduce the difficulty of renumbering OID assignments in uncommitted patches. Also, solve the longstanding problem that frontend code that would like to use OID macros and other information from the catalog headers often had difficulty with backend-only code in the headers. To do this, arrange for all generated macros, plus such other declarations as we deem fit, to be placed in "derived" header files that are safe for frontend inclusion. (Once clients migrate to using these pg_*_d.h headers, it will be possible to get rid of the pg_*_fn.h headers, which only exist to quarantine code away from clients. That is left for follow-on patches, however.) The now-automatically-generated macros include the Anum_xxx and Natts_xxx constants that we used to have to update by hand when adding or removing catalog columns. Replace the former manual method of generating OID macros for pg_type entries with an automatic method, ensuring that all built-in types have OID macros. (But note that this patch does not change the way that OID macros for pg_proc entries are built and used. It's not clear that making that match the other catalogs would be worth extra code churn.) Add SGML documentation explaining what the new data format is and how to work with it. Despite being a very large change in the catalog headers, there is no catversion bump here, because postgres.bki and related output files haven't changed at all. John Naylor, based on ideas from various people; review and minor additional coding by me; previous review by Alvaro Herrera Discussion: https://postgr.es/m/CAJVSVGWO48JbbwXkJz_yBFyGYW-M9YWxnPdxJBUosDC9ou_F0Q@mail.gmail.com
2018-04-08 19:16:50 +02:00
foreach my $bki (@bki_srcs, @bki_data)
{
next if $bki eq "";
if (IsNewer(
'src/backend/catalog/bki-stamp',
"src/include/catalog/$bki"))
{
$need_genbki = 1;
last;
2006-11-29 20:49:31 +01:00
}
}
$need_genbki = 1
if IsNewer('src/backend/catalog/bki-stamp',
'src/backend/catalog/genbki.pl');
$need_genbki = 1
if IsNewer('src/backend/catalog/bki-stamp',
'src/backend/catalog/Catalog.pm');
if ($need_genbki)
{
chdir('src/backend/catalog');
my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs);
system(
"perl genbki.pl --include-path ../../../src/include/ --set-version=$majorver $bki_srcs"
);
open(my $f, '>', 'bki-stamp')
|| confess "Could not touch bki-stamp";
close($f);
chdir('../../..');
}
if (IsNewer(
'src/include/catalog/header-stamp',
'src/backend/catalog/bki-stamp'))
{
# Copy generated headers to include directory.
opendir(my $dh, 'src/backend/catalog/')
|| die "Can't opendir src/backend/catalog/ $!";
my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh);
closedir $dh;
foreach my $def_header (@def_headers)
{
copyFile(
"src/backend/catalog/$def_header",
"src/include/catalog/$def_header");
}
copyFile(
'src/backend/catalog/schemapg.h',
'src/include/catalog/schemapg.h');
Build in some knowledge about foreign-key relationships in the catalogs. This follows in the spirit of commit dfb75e478, which created primary key and uniqueness constraints to improve the visibility of constraints imposed on the system catalogs. While our catalogs contain many foreign-key-like relationships, they don't quite follow SQL semantics, in that the convention for an omitted reference is to write zero not NULL. Plus, we have some cases in which there are arrays each of whose elements is supposed to be an FK reference; SQL has no way to model that. So we can't create actual foreign key constraints to describe the situation. Nonetheless, we can collect and use knowledge about these relationships. This patch therefore adds annotations to the catalog header files to declare foreign-key relationships. (The BKI_LOOKUP annotations cover simple cases, but we weren't previously distinguishing which such columns are allowed to contain zeroes; we also need new markings for multi-column FK references.) Then, Catalog.pm and genbki.pl are taught to collect this information into a table in a new generated header "system_fk_info.h". The only user of that at the moment is a new SQL function pg_get_catalog_foreign_keys(), which exposes the table to SQL. The oidjoins regression test is rewritten to use pg_get_catalog_foreign_keys() to find out which columns to check. Aside from removing the need for manual maintenance of that test script, this allows it to cover numerous relationships that were not checked by the old implementation based on findoidjoins. (As of this commit, 217 relationships are checked by the test, versus 181 before.) Discussion: https://postgr.es/m/3240355.1612129197@sss.pgh.pa.us
2021-02-02 23:11:55 +01:00
copyFile(
'src/backend/catalog/system_fk_info.h',
'src/include/catalog/system_fk_info.h');
open(my $chs, '>', 'src/include/catalog/header-stamp')
|| confess "Could not touch header-stamp";
close($chs);
}
my $nmf = Project::read_file('src/backend/nodes/Makefile');
$nmf =~ s{\\\r?\n}{}g;
$nmf =~ /^node_headers\s*:?=(.*)$/gm
|| croak "Could not find node_headers in Makefile\n";
my @node_headers = split /\s+/, $1;
@node_headers = grep { $_ ne '' } @node_headers;
my @node_files = map { "src/include/$_" } @node_headers;
my $need_node_support = 0;
foreach my $nodefile (@node_files)
{
if (IsNewer('src/backend/nodes/node-support-stamp', $nodefile))
{
$need_node_support = 1;
last;
}
}
$need_node_support = 1
if IsNewer(
'src/backend/nodes/node-support-stamp',
'src/backend/nodes/gen_node_support.pl');
Automatically generate node support functions Add a script to automatically generate the node support functions (copy, equal, out, and read, as well as the node tags enum) from the struct definitions. For each of the four node support files, it creates two include files, e.g., copyfuncs.funcs.c and copyfuncs.switch.c, to include in the main file. All the scaffolding of the main file stays in place. I have tried to mostly make the coverage of the output match what is currently there. For example, one could now do out/read coverage of utility statement nodes, but I have manually excluded those for now. The reason is mainly that it's easier to diff the before and after, and adding a bunch of stuff like this might require a separate analysis and review. Subtyping (TidScan -> Scan) is supported. For the hard cases, you can just write a manual function and exclude generating one. For the not so hard cases, there is a way of annotating struct fields to get special behaviors. For example, pg_node_attr(equal_ignore) has the field ignored in equal functions. (In this patch, I have only ifdef'ed out the code to could be removed, mainly so that it won't constantly have merge conflicts. It will be deleted in a separate patch. All the code comments that are worth keeping from those sections have already been moved to the header files where the structs are defined.) Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://www.postgresql.org/message-id/flat/c1097590-a6a4-486a-64b1-e1f9cc0533ce%40enterprisedb.com
2022-07-09 08:52:19 +02:00
if ($need_node_support)
{
system(
"perl src/backend/nodes/gen_node_support.pl --outdir src/backend/nodes @node_files"
);
open(my $f, '>', 'src/backend/nodes/node-support-stamp')
Automatically generate node support functions Add a script to automatically generate the node support functions (copy, equal, out, and read, as well as the node tags enum) from the struct definitions. For each of the four node support files, it creates two include files, e.g., copyfuncs.funcs.c and copyfuncs.switch.c, to include in the main file. All the scaffolding of the main file stays in place. I have tried to mostly make the coverage of the output match what is currently there. For example, one could now do out/read coverage of utility statement nodes, but I have manually excluded those for now. The reason is mainly that it's easier to diff the before and after, and adding a bunch of stuff like this might require a separate analysis and review. Subtyping (TidScan -> Scan) is supported. For the hard cases, you can just write a manual function and exclude generating one. For the not so hard cases, there is a way of annotating struct fields to get special behaviors. For example, pg_node_attr(equal_ignore) has the field ignored in equal functions. (In this patch, I have only ifdef'ed out the code to could be removed, mainly so that it won't constantly have merge conflicts. It will be deleted in a separate patch. All the code comments that are worth keeping from those sections have already been moved to the header files where the structs are defined.) Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://www.postgresql.org/message-id/flat/c1097590-a6a4-486a-64b1-e1f9cc0533ce%40enterprisedb.com
2022-07-09 08:52:19 +02:00
|| confess "Could not touch node-support-stamp";
close($f);
}
if (IsNewer(
'src/include/nodes/nodetags.h',
'src/backend/nodes/nodetags.h'))
{
copyFile('src/backend/nodes/nodetags.h',
'src/include/nodes/nodetags.h');
}
open(my $o, '>', "doc/src/sgml/version.sgml")
|| croak "Could not write to version.sgml\n";
print $o <<EOF;
<!ENTITY version "$package_version">
<!ENTITY majorversion "$majorver">
EOF
close($o);
return;
2006-09-04 23:30:40 +02:00
}
# Read lines from input file and substitute symbols using the same
# logic that config.status uses. There should be one call of this for
# each AC_CONFIG_HEADERS call in configure.ac.
#
# If the "required" argument is true, we also keep track which of our
# defines have been found and error out if any are left unused at the
# end. That way we avoid accumulating defines in this file that are
# no longer used by configure.
sub GenerateConfigHeader
{
my ($self, $config_header, $defines, $required) = @_;
my $config_header_in = $config_header . '.in';
if ( IsNewer($config_header, $config_header_in)
|| IsNewer($config_header, __FILE__))
{
my %defines_copy = %$defines;
open(my $i, '<', $config_header_in)
|| confess "Could not open $config_header_in\n";
open(my $o, '>', $config_header)
|| confess "Could not write to $config_header\n";
print $o
"/* $config_header. Generated from $config_header_in by src/tools/msvc/Solution.pm. */\n";
while (<$i>)
{
if (m/^#(\s*)undef\s+(\w+)/)
{
my $ws = $1;
my $macro = $2;
if (exists $defines->{$macro})
{
if (defined $defines->{$macro})
{
print $o "#${ws}define $macro ", $defines->{$macro},
"\n";
}
else
{
print $o "/* #${ws}undef $macro */\n";
}
delete $defines_copy{$macro};
}
else
{
croak
"undefined symbol: $macro at $config_header line $.";
}
}
else
{
print $o $_;
}
}
close($o);
close($i);
if ($required && scalar(keys %defines_copy) > 0)
{
croak "unused defines: " . join(' ', keys %defines_copy);
}
}
}
sub GenerateDefFile
{
my ($self, $deffile, $txtfile, $libname) = @_;
if (IsNewer($deffile, $txtfile))
{
print "Generating $deffile...\n";
open(my $if, '<', $txtfile) || confess("Could not open $txtfile\n");
open(my $of, '>', $deffile) || confess("Could not open $deffile\n");
print $of "LIBRARY $libname\nEXPORTS\n";
while (<$if>)
{
next if (/^#/);
next if (/^\s*$/);
my ($f, $o) = split;
print $of " $f @ $o\n";
}
close($of);
close($if);
}
return;
}
sub AddProject
{
my ($self, $name, $type, $folder, $initialdir) = @_;
my $proj =
VSObjectFactory::CreateProject($self->{vcver}, $name, $type, $self);
push @{ $self->{projects}->{$folder} }, $proj;
$proj->AddDir($initialdir) if ($initialdir);
if ($self->{options}->{zlib})
{
$proj->AddIncludeDir($self->{options}->{zlib} . '\include');
$proj->AddLibrary($self->{options}->{zlib} . '\lib\zdll.lib');
}
if ($self->{options}->{openssl})
{
$proj->AddIncludeDir($self->{options}->{openssl} . '\include');
my ($digit1, $digit2, $digit3) = $self->GetOpenSSLVersion();
# Starting at version 1.1.0 the OpenSSL installers have
# changed their library names from:
# - libeay to libcrypto
# - ssleay to libssl
if ( ($digit1 >= '3' && $digit2 >= '0' && $digit3 >= '0')
|| ($digit1 >= '1' && $digit2 >= '1' && $digit3 >= '0'))
{
my $dbgsuffix;
my $libsslpath;
my $libcryptopath;
# The format name of the libraries is slightly
# different between the Win32 and Win64 platform, so
# adapt.
if (-e "$self->{options}->{openssl}/lib/VC/sslcrypto32MD.lib")
{
# Win32 here, with a debugging library set.
$dbgsuffix = 1;
$libsslpath = '\lib\VC\libssl32.lib';
$libcryptopath = '\lib\VC\libcrypto32.lib';
}
elsif (-e "$self->{options}->{openssl}/lib/VC/sslcrypto64MD.lib")
{
# Win64 here, with a debugging library set.
$dbgsuffix = 1;
$libsslpath = '\lib\VC\libssl64.lib';
$libcryptopath = '\lib\VC\libcrypto64.lib';
}
else
{
# On both Win32 and Win64 the same library
# names are used without a debugging context.
$dbgsuffix = 0;
$libsslpath = '\lib\libssl.lib';
$libcryptopath = '\lib\libcrypto.lib';
}
$proj->AddLibrary($self->{options}->{openssl} . $libsslpath,
$dbgsuffix);
$proj->AddLibrary($self->{options}->{openssl} . $libcryptopath,
$dbgsuffix);
}
else
{
# Choose which set of libraries to use depending on if
# debugging libraries are in place in the installer.
if (-e "$self->{options}->{openssl}/lib/VC/ssleay32MD.lib")
{
$proj->AddLibrary(
$self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
$proj->AddLibrary(
$self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
}
else
{
# We don't expect the config-specific library
# to be here, so don't ask for it in last
# parameter.
$proj->AddLibrary(
$self->{options}->{openssl} . '\lib\ssleay32.lib', 0);
$proj->AddLibrary(
$self->{options}->{openssl} . '\lib\libeay32.lib', 0);
}
}
}
if ($self->{options}->{nls})
{
$proj->AddIncludeDir($self->{options}->{nls} . '\include');
$proj->AddLibrary($self->{options}->{nls} . '\lib\libintl.lib');
}
if ($self->{options}->{gss})
{
$proj->AddIncludeDir($self->{options}->{gss} . '\include');
$proj->AddIncludeDir($self->{options}->{gss} . '\include\krb5');
if ($self->{platform} eq 'Win32')
{
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\i386\krb5_32.lib');
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\i386\comerr32.lib');
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\i386\gssapi32.lib');
}
else
{
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\amd64\krb5_64.lib');
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\amd64\comerr64.lib');
$proj->AddLibrary(
$self->{options}->{gss} . '\lib\amd64\gssapi64.lib');
}
}
if ($self->{options}->{iconv})
{
$proj->AddIncludeDir($self->{options}->{iconv} . '\include');
$proj->AddLibrary($self->{options}->{iconv} . '\lib\iconv.lib');
}
if ($self->{options}->{icu})
{
$proj->AddIncludeDir($self->{options}->{icu} . '\include');
if ($self->{platform} eq 'Win32')
{
$proj->AddLibrary($self->{options}->{icu} . '\lib\icuin.lib');
$proj->AddLibrary($self->{options}->{icu} . '\lib\icuuc.lib');
$proj->AddLibrary($self->{options}->{icu} . '\lib\icudt.lib');
}
else
{
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icuin.lib');
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icuuc.lib');
$proj->AddLibrary($self->{options}->{icu} . '\lib64\icudt.lib');
}
}
if ($self->{options}->{xml})
{
$proj->AddIncludeDir($self->{options}->{xml} . '\include');
$proj->AddIncludeDir($self->{options}->{xml} . '\include\libxml2');
$proj->AddLibrary($self->{options}->{xml} . '\lib\libxml2.lib');
}
if ($self->{options}->{xslt})
{
$proj->AddIncludeDir($self->{options}->{xslt} . '\include');
$proj->AddLibrary($self->{options}->{xslt} . '\lib\libxslt.lib');
}
if ($self->{options}->{lz4})
{
$proj->AddIncludeDir($self->{options}->{lz4} . '\include');
$proj->AddLibrary($self->{options}->{lz4} . '\lib\liblz4.lib');
}
if ($self->{options}->{zstd})
{
$proj->AddIncludeDir($self->{options}->{zstd} . '\include');
$proj->AddLibrary($self->{options}->{zstd} . '\lib\libzstd.lib');
}
if ($self->{options}->{uuid})
{
$proj->AddIncludeDir($self->{options}->{uuid} . '\include');
$proj->AddLibrary($self->{options}->{uuid} . '\lib\uuid.lib');
}
return $proj;
2006-09-04 23:30:40 +02:00
}
sub Save
{
my ($self) = @_;
my %flduid;
$self->GenerateFiles();
foreach my $fld (keys %{ $self->{projects} })
{
foreach my $proj (@{ $self->{projects}->{$fld} })
{
$proj->Save();
}
}
open(my $sln, '>', "pgsql.sln") || croak "Could not write to pgsql.sln\n";
print $sln <<EOF;
Microsoft Visual Studio Solution File, Format Version $self->{solutionFileVersion}
# $self->{visualStudioName}
2006-09-04 23:30:40 +02:00
EOF
print $sln $self->GetAdditionalHeaders();
foreach my $fld (keys %{ $self->{projects} })
{
foreach my $proj (@{ $self->{projects}->{$fld} })
{
print $sln <<EOF;
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}$proj->{filenameExtension}", "$proj->{guid}"
2006-09-04 23:30:40 +02:00
EndProject
EOF
}
if ($fld ne "")
{
$flduid{$fld} = $^O eq "MSWin32" ? Win32::GuidGen() : 'FAKE';
print $sln <<EOF;
2006-09-04 23:30:40 +02:00
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "$fld", "$fld", "$flduid{$fld}"
EndProject
EOF
}
}
2006-09-04 23:30:40 +02:00
print $sln <<EOF;
2006-09-04 23:30:40 +02:00
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|$self->{platform}= Debug|$self->{platform}
Release|$self->{platform} = Release|$self->{platform}
2006-09-04 23:30:40 +02:00
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
EOF
foreach my $fld (keys %{ $self->{projects} })
{
foreach my $proj (@{ $self->{projects}->{$fld} })
{
print $sln <<EOF;
$proj->{guid}.Debug|$self->{platform}.ActiveCfg = Debug|$self->{platform}
$proj->{guid}.Debug|$self->{platform}.Build.0 = Debug|$self->{platform}
$proj->{guid}.Release|$self->{platform}.ActiveCfg = Release|$self->{platform}
$proj->{guid}.Release|$self->{platform}.Build.0 = Release|$self->{platform}
2006-09-04 23:30:40 +02:00
EOF
}
}
2006-09-04 23:30:40 +02:00
print $sln <<EOF;
2006-09-04 23:30:40 +02:00
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
EOF
foreach my $fld (keys %{ $self->{projects} })
{
next if ($fld eq "");
foreach my $proj (@{ $self->{projects}->{$fld} })
{
print $sln "\t\t$proj->{guid} = $flduid{$fld}\n";
}
}
2006-09-04 23:30:40 +02:00
print $sln <<EOF;
2006-09-04 23:30:40 +02:00
EndGlobalSection
EndGlobal
EOF
close($sln);
return;
2006-09-04 23:30:40 +02:00
}
sub GetFakeConfigure
{
my $self = shift;
my $cfg = '';
$cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
$cfg .= ' --enable-nls' if ($self->{options}->{nls});
$cfg .= ' --enable-tap-tests' if ($self->{options}->{tap_tests});
$cfg .= ' --with-ldap' if ($self->{options}->{ldap});
$cfg .= ' --without-zlib' unless ($self->{options}->{zlib});
$cfg .= ' --with-extra-version' if ($self->{options}->{extraver});
$cfg .= ' --with-ssl=openssl' if ($self->{options}->{openssl});
$cfg .= ' --with-uuid' if ($self->{options}->{uuid});
$cfg .= ' --with-libxml' if ($self->{options}->{xml});
$cfg .= ' --with-libxslt' if ($self->{options}->{xslt});
$cfg .= ' --with-lz4' if ($self->{options}->{lz4});
$cfg .= ' --with-zstd' if ($self->{options}->{zstd});
$cfg .= ' --with-gssapi' if ($self->{options}->{gss});
$cfg .= ' --with-icu' if ($self->{options}->{icu});
$cfg .= ' --with-tcl' if ($self->{options}->{tcl});
$cfg .= ' --with-perl' if ($self->{options}->{perl});
$cfg .= ' --with-python' if ($self->{options}->{python});
my $port = $self->{options}->{'--with-pgport'};
2021-05-28 15:35:11 +02:00
$cfg .= " --with-pgport=$port" if defined($port);
return $cfg;
}
package VS2015Solution;
#
# Package that encapsulates a Visual Studio 2015 solution file
#
use Carp;
use strict;
use warnings;
use base qw(Solution);
no warnings qw(redefine); ## no critic
sub new
{
my $classname = shift;
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
$self->{solutionFileVersion} = '12.00';
$self->{vcver} = '14.00';
$self->{visualStudioName} = 'Visual Studio 2015';
$self->{VisualStudioVersion} = '14.0.24730.2';
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
return $self;
}
package VS2017Solution;
#
# Package that encapsulates a Visual Studio 2017 solution file
#
use Carp;
use strict;
use warnings;
use base qw(Solution);
no warnings qw(redefine); ## no critic
sub new
{
my $classname = shift;
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
$self->{solutionFileVersion} = '12.00';
$self->{vcver} = '15.00';
$self->{visualStudioName} = 'Visual Studio 2017';
$self->{VisualStudioVersion} = '15.0.26730.3';
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
return $self;
}
package VS2019Solution;
#
# Package that encapsulates a Visual Studio 2019 solution file
#
use Carp;
use strict;
use warnings;
use base qw(Solution);
no warnings qw(redefine); ## no critic
sub new
{
my $classname = shift;
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
$self->{solutionFileVersion} = '12.00';
$self->{vcver} = '16.00';
$self->{visualStudioName} = 'Visual Studio 2019';
$self->{VisualStudioVersion} = '16.0.28729.10';
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
return $self;
}
package VS2022Solution;
#
# Package that encapsulates a Visual Studio 2022 solution file
#
use Carp;
use strict;
use warnings;
use base qw(Solution);
no warnings qw(redefine); ## no critic
sub new
{
my $classname = shift;
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
$self->{solutionFileVersion} = '12.00';
$self->{vcver} = '17.00';
$self->{visualStudioName} = 'Visual Studio 2022';
$self->{VisualStudioVersion} = '17.0.31903.59';
$self->{MinimumVisualStudioVersion} = '10.0.40219.1';
return $self;
}
sub GetAdditionalHeaders
{
my ($self, $f) = @_;
return qq|VisualStudioVersion = $self->{VisualStudioVersion}
MinimumVisualStudioVersion = $self->{MinimumVisualStudioVersion}
|;
}
2006-09-04 23:30:40 +02:00
1;