Use SASLprep to normalize passwords for SCRAM authentication.

An important step of SASLprep normalization, is to convert the string to
Unicode normalization form NFKC. Unicode normalization requires a fairly
large table of character decompositions, which is generated from data
published by the Unicode consortium. The script to generate the table is
put in src/common/unicode, as well test code for the normalization.
A pre-generated version of the tables is included in src/include/common,
so you don't need the code in src/common/unicode to build PostgreSQL, only
if you wish to modify the normalization tables.

The SASLprep implementation depends on the UTF-8 functions from
src/backend/utils/mb/wchar.c. So to use it, you must also compile and link
that. That doesn't change anything for the current users of these
functions, the backend and libpq, as they both already link with wchar.o.
It would be good to move those functions into a separate file in
src/commmon, but I'll leave that for another day.

No documentation changes included, because there is no details on the
SCRAM mechanism in the docs anyway. An overview on that in the protocol
specification would probably be good, even though SCRAM is documented in
detail in RFC5802. I'll write that as a separate patch. An important thing
to mention there is that we apply SASLprep even on invalid UTF-8 strings,
to support other encodings.

Patch by Michael Paquier and me.

Discussion: https://www.postgresql.org/message-id/CAB7nPqSByyEmAVLtEf1KxTRh=PWNKiWKEKQR=e1yGehz=wbymQ@mail.gmail.com
This commit is contained in:
Heikki Linnakangas 2017-04-07 14:56:05 +03:00
parent 32e33a7979
commit 60f11b87a2
19 changed files with 11322 additions and 32 deletions

View File

@ -11,13 +11,43 @@
*
* - Username from the authentication exchange is not used. The client
* should send an empty string as the username.
* - Password is not processed with the SASLprep algorithm.
*
* - If the password isn't valid UTF-8, or contains characters prohibited
* by the SASLprep profile, we skip the SASLprep pre-processing and use
* the raw bytes in calculating the hash.
*
* - Channel binding is not supported yet.
*
*
* The password stored in pg_authid consists of the salt, iteration count,
* StoredKey and ServerKey.
*
* On error handling:
* SASLprep usage
* --------------
*
* One notable difference to the SCRAM specification is that while the
* specification dictates that the password is in UTF-8, and prohibits
* certain characters, we are more lenient. If the password isn't a valid
* UTF-8 string, or contains prohibited characters, the raw bytes are used
* to calculate the hash instead, without SASLprep processing. This is
* because PostgreSQL supports other encodings too, and the encoding being
* used during authentication is undefined (client_encoding isn't set until
* after authentication). In effect, we try to interpret the password as
* UTF-8 and apply SASLprep processing, but if it looks invalid, we assume
* that it's in some other encoding.
*
* In the worst case, we misinterpret a password that's in a different
* encoding as being Unicode, because it happens to consists entirely of
* valid UTF-8 bytes, and we apply Unicode normalization to it. As long
* as we do that consistently, that will not lead to failed logins.
* Fortunately, the UTF-8 byte sequences that are ignored by SASLprep
* don't correspond to any commonly used characters in any of the other
* supported encodings, so it should not lead to any significant loss in
* entropy, even if the normalization is incorrectly applied to a
* non-UTF-8 password.
*
* Error handling
* --------------
*
* Don't reveal user information to an unauthenticated client. We don't
* want an attacker to be able to probe whether a particular username is
@ -37,6 +67,7 @@
* to the encoding being used, whatever that is. We cannot avoid that in
* general, after logging in, but let's do what we can here.
*
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
@ -52,6 +83,7 @@
#include "catalog/pg_authid.h"
#include "catalog/pg_control.h"
#include "common/base64.h"
#include "common/saslprep.h"
#include "common/scram-common.h"
#include "common/sha2.h"
#include "libpq/auth.h"
@ -344,6 +376,17 @@ scram_build_verifier(const char *username, const char *password,
char salt[SCRAM_SALT_LEN];
char *encoded_salt;
int encoded_len;
char *prep_password = NULL;
pg_saslprep_rc rc;
/*
* Normalize the password with SASLprep. If that doesn't work, because
* the password isn't valid UTF-8 or contains prohibited characters, just
* proceed with the original password. (See comments at top of file.)
*/
rc = pg_saslprep(password, &prep_password);
if (rc == SASLPREP_SUCCESS)
password = (const char *) prep_password;
if (iterations <= 0)
iterations = SCRAM_ITERATIONS_DEFAULT;
@ -373,6 +416,9 @@ scram_build_verifier(const char *username, const char *password,
(void) hex_encode((const char *) keybuf, SCRAM_KEY_LEN, serverkey_hex);
serverkey_hex[SCRAM_KEY_LEN * 2] = '\0';
if (prep_password)
pfree(prep_password);
return psprintf("scram-sha-256:%s:%d:%s:%s", encoded_salt, iterations, storedkey_hex, serverkey_hex);
}
@ -392,13 +438,14 @@ scram_verify_plain_password(const char *username, const char *password,
uint8 stored_key[SCRAM_KEY_LEN];
uint8 server_key[SCRAM_KEY_LEN];
uint8 computed_key[SCRAM_KEY_LEN];
char *prep_password = NULL;
pg_saslprep_rc rc;
if (!parse_scram_verifier(verifier, &encoded_salt, &iterations,
stored_key, server_key))
{
/*
* The password looked like a SCRAM verifier, but could not be
* parsed.
* The password looked like a SCRAM verifier, but could not be parsed.
*/
elog(LOG, "invalid SCRAM verifier for user \"%s\"", username);
return false;
@ -412,10 +459,18 @@ scram_verify_plain_password(const char *username, const char *password,
return false;
}
/* Normalize the password */
rc = pg_saslprep(password, &prep_password);
if (rc == SASLPREP_SUCCESS)
password = prep_password;
/* Compute Server key based on the user-supplied plaintext password */
scram_ClientOrServerKey(password, salt, saltlen, iterations,
SCRAM_SERVER_KEY_NAME, computed_key);
if (prep_password)
pfree(prep_password);
/*
* Compare the verifier's Server Key with the one computed from the
* user-supplied password.

View File

@ -42,7 +42,8 @@ override CPPFLAGS += -DVAL_LIBS="\"$(LIBS)\""
OBJS_COMMON = base64.o config_info.o controldata_utils.o exec.o ip.o \
keywords.o md5.o pg_lzcompress.o pgfnames.o psprintf.o relpath.o \
rmtree.o scram-common.o string.o username.o wait_error.o
rmtree.o saslprep.o scram-common.o string.o unicode_norm.o \
username.o wait_error.o
ifeq ($(with_openssl),yes)
OBJS_COMMON += sha2_openssl.o

1279
src/common/saslprep.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -147,29 +147,10 @@ scram_H(const uint8 *input, int len, uint8 *result)
pg_sha256_final(&ctx, result);
}
/*
* Encrypt password for SCRAM authentication. This basically applies the
* normalization of the password and a hash calculation using the salt
* value given by caller.
*/
static void
scram_SaltedPassword(const char *password, const char *salt, int saltlen, int iterations,
uint8 *result)
{
/*
* XXX: Here SASLprep should be applied on password. However, per RFC5802,
* it is required that the password is encoded in UTF-8, something that is
* not guaranteed in this protocol. We may want to revisit this
* normalization function once encoding functions are available as well in
* the frontend in order to be able to encode properly this string, and
* then apply SASLprep on it.
*/
scram_Hi(password, salt, saltlen, iterations, result);
}
/*
* Calculate ClientKey or ServerKey.
*
* The password should already be normalized by SASLprep.
*/
void
scram_ClientOrServerKey(const char *password,
@ -179,7 +160,7 @@ scram_ClientOrServerKey(const char *password,
uint8 keybuf[SCRAM_KEY_LEN];
scram_HMAC_ctx ctx;
scram_SaltedPassword(password, salt, saltlen, iterations, keybuf);
scram_Hi(password, salt, saltlen, iterations, keybuf);
scram_HMAC_init(&ctx, keybuf, SCRAM_KEY_LEN);
scram_HMAC_update(&ctx, keystr, strlen(keystr));
scram_HMAC_final(result, &ctx);

7
src/common/unicode/.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
/norm_test
/norm_test_table.h
# Files downloaded from the Unicode Character Database
/CompositionExclusions.txt
/NormalizationTest.txt
/UnicodeData.txt

View File

@ -0,0 +1,53 @@
#-------------------------------------------------------------------------
#
# Makefile
# Makefile for src/common/unicode
#
# IDENTIFICATION
# src/common/unicode/Makefile
#
#-------------------------------------------------------------------------
subdir = src/common/unicode
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
LIBS += $(PTHREAD_LIBS)
# By default, do nothing.
all:
DOWNLOAD = wget -O $@ --no-use-server-timestamps
# These files are part of the Unicode Character Database. Download
# them on demand.
UnicodeData.txt CompositionExclusions.txt NormalizationTest.txt:
$(DOWNLOAD) http://unicode.org/Public/UNIDATA/$(@F)
# Generation of conversion tables used for string normalization with
# UTF-8 strings.
unicode_norm_table.h: generate-unicode_norm_table.pl UnicodeData.txt CompositionExclusions.txt
$(PERL) generate-unicode_norm_table.pl
# Test suite
normalization-check: norm_test
./norm_test
norm_test: norm_test.o ../unicode_norm.o
norm_test.o: norm_test_table.h
norm_test_table.h: generate-norm_test_table.pl NormalizationTest.txt
perl generate-norm_test_table.pl NormalizationTest.txt $@
.PHONY: normalization-check
clean:
rm -f $(OBJS) norm_test norm_test.o
distclean: clean
rm -f UnicodeData.txt CompositionExclusions.txt NormalizationTest.txt norm_test_table.h unicode_norm_table.h
maintainer-clean: distclean

35
src/common/unicode/README Normal file
View File

@ -0,0 +1,35 @@
This directory contains tools to generate the tables in
src/include/common/unicode_norm.h, used for Unicode normalization. The
generated .h file is included in the source tree, so these are normally not
needed to build PostgreSQL, only if you need to re-generate the .h file
from the Unicode data files for some reason, e.g. to update to a new version
of Unicode.
Generating unicode_norm_table.h
-------------------------------
1. Download the Unicode data file, UnicodeData.txt, from the Unicode
consortium and place it to the current directory. Run the perl script
"norm_test_generate.pl", to process it, and to generate the
"unicode_norm_table.h" file. The Makefile contains a rule to download the
data files if they don't exist.
make unicode_norm_table.h
2. Inspect the resulting header file. Once you're happy with it, copy it to
the right location.
cp unicode_norm_table.h ../../../src/include/common/
Tests
-----
The Unicode consortium publishes a comprehensive test suite for the
normalization algorithm, in a file called NormalizationTest.txt. This
directory also contains a perl script and some C code, to run our
normalization code with all the test strings in NormalizationTest.txt.
To download NormalizationTest.txt and run the tests:
make normalization-check

View File

@ -0,0 +1,102 @@
#!/usr/bin/perl
#
# Read Unicode consortium's normalization test suite, NormalizationTest.txt,
# and generate a C array from it, for norm_test.c.
#
# NormalizationTest.txt is part of the Unicode Character Database.
#
# Copyright (c) 2000-2017, PostgreSQL Global Development Group
use strict;
use warnings;
use File::Basename;
die "Usage: $0 INPUT_FILE OUTPUT_FILE\n" if @ARGV != 2;
my $input_file = $ARGV[0];
my $output_file = $ARGV[1];
my $output_base = basename($output_file);
# Open the input and output files
open my $INPUT, $input_file
or die "Could not open input file $input_file: $!";
open my $OUTPUT, "> $output_file"
or die "Could not open output file $output_file: $!\n";
# Print header of output file.
print $OUTPUT <<HEADER;
/*-------------------------------------------------------------------------
*
* norm_test_table.h
* Test strings for Unicode normalization.
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/common/unicode/norm_test_table.h
*
*-------------------------------------------------------------------------
*/
/*
* File auto-generated by src/common/unicode/generate-norm_test_table.pl, do
* not edit. There is deliberately not an #ifndef PG_NORM_TEST_TABLE_H
* here.
*/
typedef struct
{
int linenum;
pg_wchar input[50];
pg_wchar output[50];
} pg_unicode_test;
/* test table */
HEADER
print $OUTPUT
"static const pg_unicode_test UnicodeNormalizationTests[] =\n{\n";
# Helper routine to conver a space-separated list of Unicode characters to
# hexadecimal list format, suitable for outputting in a C array.
sub codepoint_string_to_hex
{
my $codepoint_string = shift;
my $result;
foreach (split(' ', $codepoint_string))
{
my $cp = $_;
my $utf8 = "0x$cp, ";
$result .= $utf8;
}
$result .= '0'; # null-terminated the array
return $result;
}
# Process the input file line by line
my $linenum = 0;
while (my $line = <$INPUT>)
{
$linenum = $linenum + 1;
if ($line =~ /^\s*#/) { next; } # ignore comments
if ($line =~ /^@/) { next; } # ignore @Part0 like headers
# Split the line wanted and get the fields needed:
#
# source; NFC; NFD; NFKC; NFKD
my ($source, $nfc, $nfd, $nfkc, $nfkd) = split(';', $line);
my $source_utf8 = codepoint_string_to_hex($source);
my $nfkc_utf8 = codepoint_string_to_hex($nfkc);
print $OUTPUT "\t{ $linenum, { $source_utf8 }, { $nfkc_utf8 } },\n";
}
# Output terminator entry
print $OUTPUT "\t{ 0, { 0 }, { 0 } }";
print $OUTPUT "\n};\n";
close $OUTPUT;
close $INPUT;

View File

@ -0,0 +1,226 @@
#!/usr/bin/perl
#
# Generate a composition table, using Unicode data files as input
#
# Input: UnicodeData.txt and CompositionExclusions.txt
# Output: unicode_norm_table.h
#
# Copyright (c) 2000-2017, PostgreSQL Global Development Group
use strict;
use warnings;
my $output_file = "unicode_norm_table.h";
my $FH;
# Read list of codes that should be excluded from re-composition.
my @composition_exclusion_codes = ();
open($FH, "CompositionExclusions.txt")
or die "Could not open CompositionExclusions.txt: $!.";
while (my $line = <$FH>)
{
if ($line =~ /^([[:xdigit:]]+)/)
{
push @composition_exclusion_codes, $1;
}
}
close $FH;
# Read entries from UnicodeData.txt into a list, and a hash table. We need
# three fields from each row: the codepoint, canonical combining class,
# and character decomposition mapping
my @characters = ();
my %character_hash = ();
open($FH, "UnicodeData.txt") or die "Could not open UnicodeData.txt: $!.";
while (my $line = <$FH>)
{
# Split the line wanted and get the fields needed:
# - Unicode code value
# - Canonical Combining Class
# - Character Decomposition Mapping
my @elts = split(';', $line);
my $code = $elts[0];
my $class = $elts[3];
my $decomp = $elts[5];
# Skip codepoints above U+10FFFF. They cannot be represented in 4 bytes
# in UTF-8, and PostgreSQL doesn't support UTF-8 characters longer than
# 4 bytes. (This is just pro forma, as there aren't any such entries in
# the data file, currently.)
next if hex($code) > 0x10FFFF;
# Skip characters with no decompositions and a class of 0, to reduce the
# table size.
next if $class eq '0' && $decomp eq '';
my %char_entry = (code => $code, class => $class, decomp => $decomp);
push(@characters, \%char_entry);
$character_hash{$code} = \%char_entry;
}
close $FH;
my $num_characters = scalar @characters;
# Start writing out the output file
open my $OUTPUT, "> $output_file"
or die "Could not open output file $output_file: $!\n";
print $OUTPUT <<HEADER;
/*-------------------------------------------------------------------------
*
* unicode_norm_table.h
* Composition table used for Unicode normalization
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/common/unicode_norm_table.h
*
*-------------------------------------------------------------------------
*/
/*
* File auto-generated by src/common/unicode/generate-unicode_norm_table.pl,
* do not edit. There is deliberately not an #ifndef PG_UNICODE_NORM_TABLE_H
* here.
*/
typedef struct
{
uint32 codepoint; /* Unicode codepoint */
uint8 class; /* combining class of character */
uint8 dec_size_flags; /* size and flags of decomposition code list */
uint16 dec_index; /* index into UnicodeDecomp_codepoints, or the
* decomposition itself if DECOMP_INLINE */
} pg_unicode_decomposition;
#define DECOMP_NO_COMPOSE 0x80 /* don't use for re-composition */
#define DECOMP_INLINE 0x40 /* decomposition is stored inline in dec_index */
#define DECOMPOSITION_SIZE(x) ((x)->dec_size_flags & 0x3F)
#define DECOMPOSITION_NO_COMPOSE(x) (((x)->dec_size_flags & DECOMP_NO_COMPOSE) != 0)
#define DECOMPOSITION_IS_INLINE(x) (((x)->dec_size_flags & DECOMP_INLINE) != 0)
/* Table of Unicode codepoints and their decompositions */
static const pg_unicode_decomposition UnicodeDecompMain[$num_characters] =
{
HEADER
my $decomp_index = 0;
my $decomp_string = "";
my $last_code = $characters[-1]->{code};
foreach my $char (@characters)
{
my $code = $char->{code};
my $class = $char->{class};
my $decomp = $char->{decomp};
# The character decomposition mapping field in UnicodeData.txt is a list
# of unicode codepoints, separated by space. But it can be prefixed with
# so-called compatibility formatting tag, like "<compat>", or "<font>".
# The entries with compatibility formatting tags should not be used for
# re-composing characters during normalization, so flag them in the table.
# (The tag doesn't matter, only whether there is a tag or not)
my $compat = 0;
if ($decomp =~ /\<.*\>/)
{
$compat = 1;
$decomp =~ s/\<[^][]*\>//g;
}
my @decomp_elts = split(" ", $decomp);
# Decomposition size
# Print size of decomposition
my $decomp_size = scalar(@decomp_elts);
my $first_decomp = shift @decomp_elts;
my $flags = "";
my $comment = "";
if ($decomp_size == 2)
{
# Should this be used for recomposition?
if ($compat)
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "compatibility mapping";
}
elsif ($character_hash{$first_decomp}
&& $character_hash{$first_decomp}->{class} != 0)
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "non-starter decomposition";
}
else
{
foreach my $lcode (@composition_exclusion_codes)
{
if ($lcode eq $char->{code})
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "in exclusion list";
last;
}
}
}
}
if ($decomp_size == 0)
{
print $OUTPUT "\t{0x$code, $class, 0$flags, 0}";
}
elsif ($decomp_size == 1 && length($first_decomp) <= 4)
{
# The decomposition consists of a single codepoint, and it fits
# in a uint16, so we can store it "inline" in the main table.
$flags .= " | DECOMP_INLINE";
print $OUTPUT "\t{0x$code, $class, 1$flags, 0x$first_decomp}";
}
else
{
print $OUTPUT
"\t{0x$code, $class, $decomp_size$flags, $decomp_index}";
# Now save the decompositions into a dedicated area that will
# be written afterwards. First build the entry dedicated to
# a sub-table with the code and decomposition.
$decomp_string .= ",\n" if ($decomp_string ne "");
$decomp_string .= "\t /* $decomp_index */ 0x$first_decomp";
foreach (@decomp_elts)
{
$decomp_string .= ", 0x$_";
}
$decomp_index = $decomp_index + $decomp_size;
}
# Print a comma after all items except the last one.
print $OUTPUT "," unless ($code eq $last_code);
if ($comment ne "")
{
# If the line is wide already, indent the comment with one tab,
# otherwise with two. This is to make the output match the way
# pgindent would mangle it. (This is quite hacky. To do this
# properly, we should actually track how long the line is so far,
# but this works for now.)
print $OUTPUT "\t" if ($decomp_index < 10);
print $OUTPUT "\t/* $comment */" if ($comment ne "");
}
print $OUTPUT "\n";
}
print $OUTPUT "\n};\n\n";
# Print the array of decomposed codes.
print $OUTPUT <<HEADER;
/* codepoints array */
static const uint32 UnicodeDecomp_codepoints[$decomp_index] =
{
$decomp_string
};
HEADER
close $OUTPUT;

View File

@ -0,0 +1,80 @@
/*-------------------------------------------------------------------------
* norm_test.c
* Program to test Unicode normalization functions.
*
* Portions Copyright (c) 2017, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/common/unicode_norm.c
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common/unicode_norm.h"
#include "norm_test_table.h"
static char *
print_wchar_str(const pg_wchar *s)
{
#define BUF_DIGITS 50
static char buf[BUF_DIGITS * 2 + 1];
int i;
i = 0;
while (*s && i < BUF_DIGITS)
{
snprintf(&buf[i * 2], 3, "%04X", *s);
i++;
s++;
}
buf[i * 2] = '\0';
return buf;
}
static int
pg_wcscmp(const pg_wchar *s1, const pg_wchar *s2)
{
for (;;)
{
if (*s1 < *s2)
return -1;
if (*s1 > *s2)
return 1;
if (*s1 == 0)
return 0;
s1++;
s2++;
}
}
int
main(int argc, char **argv)
{
const pg_unicode_test *test;
for (test = UnicodeNormalizationTests; test->input[0] != 0; test++)
{
pg_wchar *result;
result = unicode_normalize_kc(test->input);
if (pg_wcscmp(test->output, result) != 0)
{
printf("FAILURE (Normalizationdata.txt line %d):\n", test->linenum);
printf("input:\t%s\n", print_wchar_str(test->input));
printf("expected:\t%s\n", print_wchar_str(test->output));
printf("got\t%s\n", print_wchar_str(result));
printf("\n");
exit(1);
}
}
printf("All tests successful!\n");
exit(0);
}

437
src/common/unicode_norm.c Normal file
View File

@ -0,0 +1,437 @@
/*-------------------------------------------------------------------------
* unicode_norm.c
* Normalize a Unicode string to NFKC form
*
* This implements Unicode normalization, per the documentation at
* http://www.unicode.org/reports/tr15/.
*
* Portions Copyright (c) 2017, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/common/unicode_norm.c
*
*-------------------------------------------------------------------------
*/
#ifndef FRONTEND
#include "postgres.h"
#else
#include "postgres_fe.h"
#endif
#include "common/unicode_norm.h"
#include "common/unicode_norm_table.h"
#ifndef FRONTEND
#define ALLOC(size) palloc(size)
#define FREE(size) pfree(size)
#else
#define ALLOC(size) malloc(size)
#define FREE(size) free(size)
#endif
/* Constants for calculations with Hangul characters */
#define SBASE 0xAC00 /* U+AC00 */
#define LBASE 0x1100 /* U+1100 */
#define VBASE 0x1161 /* U+1161 */
#define TBASE 0x11A7 /* U+11A7 */
#define LCOUNT 19
#define VCOUNT 21
#define TCOUNT 28
#define NCOUNT VCOUNT * TCOUNT
#define SCOUNT LCOUNT * NCOUNT
/* comparison routine for bsearch() of decomposition lookup table. */
static int
conv_compare(const void *p1, const void *p2)
{
uint32 v1,
v2;
v1 = *(const uint32 *) p1;
v2 = ((const pg_unicode_decomposition *) p2)->codepoint;
return (v1 > v2) ? 1 : ((v1 == v2) ? 0 : -1);
}
/*
* Get the entry corresponding to code in the decomposition lookup table.
*/
static pg_unicode_decomposition *
get_code_entry(pg_wchar code)
{
return bsearch(&(code),
(void *) UnicodeDecompMain,
lengthof(UnicodeDecompMain),
sizeof(pg_unicode_decomposition),
conv_compare);
}
/*
* Given a decomposition entry looked up earlier, get the decomposed
* characters.
*
* Note: the returned pointer can point to statically allocated buffer, and
* is only valid until next call to this function!
*/
static const pg_wchar *
get_code_decomposition(pg_unicode_decomposition * entry, int *dec_size)
{
static pg_wchar x;
if (DECOMPOSITION_IS_INLINE(entry))
{
Assert(DECOMPOSITION_SIZE(entry) == 1);
x = (pg_wchar) entry->dec_index;
*dec_size = 1;
return &x;
}
else
{
*dec_size = DECOMPOSITION_SIZE(entry);
return &UnicodeDecomp_codepoints[entry->dec_index];
}
}
/*
* Calculate how many characters a given character will decompose to.
*
* This needs to recurse, if the character decomposes into characters that
* are, in turn, decomposable.
*/
static int
get_decomposed_size(pg_wchar code)
{
pg_unicode_decomposition *entry;
int size = 0;
int i;
const uint32 *decomp;
int dec_size;
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
* http://unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
* the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
uint32 tindex,
sindex;
sindex = code - SBASE;
tindex = sindex % TCOUNT;
if (tindex != 0)
return 3;
return 2;
}
entry = get_code_entry(code);
/*
* Just count current code if no other decompositions. A NULL entry is
* equivalent to a character with class 0 and no decompositions.
*/
if (entry == NULL || DECOMPOSITION_SIZE(entry) == 0)
return 1;
/*
* If this entry has other decomposition codes look at them as well. First
* get its decomposition in the list of tables available.
*/
decomp = get_code_decomposition(entry, &dec_size);
for (i = 0; i < dec_size; i++)
{
uint32 lcode = decomp[i];
size += get_decomposed_size(lcode);
}
return size;
}
/*
* Recompose a set of characters. For hangul characters, the calculation
* is algorithmic. For others, an inverse lookup at the decomposition
* table is necessary. Returns true if a recomposition can be done, and
* false otherwise.
*/
static bool
recompose_code(uint32 start, uint32 code, uint32 *result)
{
/*
* Handle Hangul characters algorithmically, per the Unicode spec.
*
* Check if two current characters are L and V.
*/
if (start >= LBASE && start < LBASE + LCOUNT &&
code >= VBASE && code < VBASE + VCOUNT)
{
/* make syllable of form LV */
uint32 lindex = start - LBASE;
uint32 vindex = code - VBASE;
*result = SBASE + (lindex * VCOUNT + vindex) * TCOUNT;
return true;
}
/* Check if two current characters are LV and T */
else if (start >= SBASE && start < (SBASE + SCOUNT) &&
((start - SBASE) % TCOUNT) == 0 &&
code >= TBASE && code < (TBASE + TCOUNT))
{
/* make syllable of from LVT */
uint32 tindex = code - TBASE;
*result = start + tindex;
return true;
}
else
{
int i;
/*
* Do an inverse lookup of the decomposition tables to see if anything
* matches. The comparison just needs to be a perfect match on the
* sub-table of size two, because the start character has already been
* recomposed partially.
*/
for (i = 0; i < lengthof(UnicodeDecompMain); i++)
{
const pg_unicode_decomposition *entry = &UnicodeDecompMain[i];
if (DECOMPOSITION_SIZE(entry) != 2)
continue;
if (DECOMPOSITION_NO_COMPOSE(entry))
continue;
if (start == UnicodeDecomp_codepoints[entry->dec_index] &&
code == UnicodeDecomp_codepoints[entry->dec_index + 1])
{
*result = entry->codepoint;
return true;
}
}
}
return false;
}
/*
* Decompose the given code into the array given by caller. The
* decomposition begins at the position given by caller, saving one
* lookup on the decomposition table. The current position needs to be
* updated here to let the caller know from where to continue filling
* in the array result.
*/
static void
decompose_code(pg_wchar code, pg_wchar **result, int *current)
{
pg_unicode_decomposition *entry;
int i;
const uint32 *decomp;
int dec_size;
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
* http://unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
* the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
uint32 l,
v,
tindex,
sindex;
pg_wchar *res = *result;
sindex = code - SBASE;
l = LBASE + sindex / (VCOUNT * TCOUNT);
v = VBASE + (sindex % (VCOUNT * TCOUNT)) / TCOUNT;
tindex = sindex % TCOUNT;
res[*current] = l;
(*current)++;
res[*current] = v;
(*current)++;
if (tindex != 0)
{
res[*current] = TBASE + tindex;
(*current)++;
}
return;
}
entry = get_code_entry(code);
/*
* Just fill in with the current decomposition if there are no
* decomposition codes to recurse to. A NULL entry is equivalent to a
* character with class 0 and no decompositions, so just leave also in
* this case.
*/
if (entry == NULL || DECOMPOSITION_SIZE(entry) == 0)
{
pg_wchar *res = *result;
res[*current] = code;
(*current)++;
return;
}
/*
* If this entry has other decomposition codes look at them as well.
*/
decomp = get_code_decomposition(entry, &dec_size);
for (i = 0; i < dec_size; i++)
{
pg_wchar lcode = (pg_wchar) decomp[i];
/* Leave if no more decompositions */
decompose_code(lcode, result, current);
}
}
/*
* unicode_normalize_kc - Normalize a Unicode string to NFKC form.
*
* The input is a 0-terminated array of codepoints.
*
* In frontend, returns a 0-terminated array of codepoints, allocated with
* malloc. Or NULL if we run out of memory. In frontend, the returned
* string is palloc'd instead, and OOM is reported with ereport().
*/
pg_wchar *
unicode_normalize_kc(const pg_wchar *input)
{
pg_wchar *decomp_chars;
pg_wchar *recomp_chars;
int decomp_size,
current_size;
int count;
const pg_wchar *p;
/* variables for recomposition */
int last_class;
int starter_pos;
int target_pos;
uint32 starter_ch;
/* First, do character decomposition */
/*
* Calculate how many characters long the decomposed version will be.
*/
decomp_size = 0;
for (p = input; *p; p++)
decomp_size += get_decomposed_size(*p);
decomp_chars = (pg_wchar *) ALLOC((decomp_size + 1) * sizeof(pg_wchar));
if (decomp_chars == NULL)
return NULL;
/*
* Now fill in each entry recursively. This needs a second pass on the
* decomposition table.
*/
current_size = 0;
for (p = input; *p; p++)
decompose_code(*p, &decomp_chars, &current_size);
decomp_chars[decomp_size] = '\0';
Assert(decomp_size == current_size);
/*
* Now apply canonical ordering.
*/
for (count = 1; count < decomp_size; count++)
{
pg_wchar prev = decomp_chars[count - 1];
pg_wchar next = decomp_chars[count];
pg_wchar tmp;
pg_unicode_decomposition *prevEntry = get_code_entry(prev);
pg_unicode_decomposition *nextEntry = get_code_entry(next);
/*
* If no entries are found, the character used is either an Hangul
* character or a character with a class of 0 and no decompositions,
* so move to next result.
*/
if (prevEntry == NULL || nextEntry == NULL)
continue;
/*
* Per Unicode (http://unicode.org/reports/tr15/tr15-18.html) annex 4,
* a sequence of two adjacent characters in a string is an
* exchangeable pair if the combining class (from the Unicode
* Character Database) for the first character is greater than the
* combining class for the second, and the second is not a starter. A
* character is a starter if its combining class is 0.
*/
if (nextEntry->class == 0x0 || prevEntry->class == 0x0)
continue;
if (prevEntry->class <= nextEntry->class)
continue;
/* exchange can happen */
tmp = decomp_chars[count - 1];
decomp_chars[count - 1] = decomp_chars[count];
decomp_chars[count] = tmp;
/* backtrack to check again */
if (count > 1)
count -= 2;
}
/*
* The last phase of NFKC is the recomposition of the reordered Unicode
* string using combining classes. The recomposed string cannot be longer
* than the decomposed one, so make the allocation of the output string
* based on that assumption.
*/
recomp_chars = (pg_wchar *) ALLOC((decomp_size + 1) * sizeof(pg_wchar));
if (!recomp_chars)
{
FREE(decomp_chars);
return NULL;
}
last_class = -1; /* this eliminates a special check */
starter_pos = 0;
target_pos = 1;
starter_ch = recomp_chars[0] = decomp_chars[0];
for (count = 1; count < decomp_size; count++)
{
pg_wchar ch = decomp_chars[count];
pg_unicode_decomposition *ch_entry = get_code_entry(ch);
int ch_class = (ch_entry == NULL) ? 0 : ch_entry->class;
pg_wchar composite;
if (last_class < ch_class &&
recompose_code(starter_ch, ch, &composite))
{
recomp_chars[starter_pos] = composite;
starter_ch = composite;
}
else if (ch_class == 0)
{
starter_pos = target_pos;
starter_ch = ch;
last_class = -1;
recomp_chars[target_pos++] = ch;
}
else
{
last_class = ch_class;
recomp_chars[target_pos++] = ch;
}
}
recomp_chars[target_pos] = (pg_wchar) '\0';
FREE(decomp_chars);
return recomp_chars;
}

View File

@ -0,0 +1,30 @@
/*-------------------------------------------------------------------------
*
* saslprep.h
* SASLprep normalization, for SCRAM authentication
*
* These definitions are used by both frontend and backend code.
*
* Copyright (c) 2017, PostgreSQL Global Development Group
*
* src/include/common/saslprep.h
*
*-------------------------------------------------------------------------
*/
#ifndef SASLPREP_H
#define SASLPREP_H
/*
* Return codes for pg_saslprep() function.
*/
typedef enum
{
SASLPREP_SUCCESS = 0,
SASLPREP_OOM = -1, /* out of memory (only in frontend) */
SASLPREP_INVALID_UTF8 = -2, /* input is not a valid UTF-8 string */
SASLPREP_PROHIBITED = -3 /* output would contain prohibited characters */
} pg_saslprep_rc;
extern pg_saslprep_rc pg_saslprep(const char *input, char **output);
#endif /* SASLPREP_H */

View File

@ -0,0 +1,21 @@
/*-------------------------------------------------------------------------
*
* unicode_norm.h
* Routines for normalizing Unicode strings
*
* These definitions are used by both frontend and backend code.
*
* Copyright (c) 2017, PostgreSQL Global Development Group
*
* src/include/common/unicode_norm.h
*
*-------------------------------------------------------------------------
*/
#ifndef UNICODE_NORM_H
#define UNICODE_NORM_H
#include "mb/pg_wchar.h"
extern pg_wchar *unicode_normalize_kc(const pg_wchar *input);
#endif /* UNICODE_NORM_H */

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@
/pg_strong_random.c
/pgstrcasecmp.c
/pqsignal.c
/saslprep.c
/scram-common.c
/sha2.c
/sha2_openssl.c
@ -19,6 +20,7 @@
/strlcpy.c
/system.c
/thread.c
/unicode_norm.c
/win32error.c
/win32setlocale.c
/pgsleep.c

View File

@ -49,7 +49,7 @@ endif
# src/backend/utils/mb
OBJS += encnames.o wchar.o
# src/common
OBJS += base64.o ip.o md5.o scram-common.o
OBJS += base64.o ip.o md5.o scram-common.o saslprep.o unicode_norm.o
ifeq ($(with_openssl),yes)
OBJS += fe-secure-openssl.o sha2_openssl.o
@ -106,7 +106,7 @@ backend_src = $(top_srcdir)/src/backend
chklocale.c crypt.c erand48.c getaddrinfo.c getpeereid.c inet_aton.c inet_net_ntop.c noblock.c open.c system.c pgsleep.c pg_strong_random.c pgstrcasecmp.c pqsignal.c snprintf.c strerror.c strlcpy.c thread.c win32error.c win32setlocale.c: % : $(top_srcdir)/src/port/%
rm -f $@ && $(LN_S) $< .
ip.c md5.c base64.c scram-common.c sha2.c sha2_openssl.c: % : $(top_srcdir)/src/common/%
ip.c md5.c base64.c scram-common.c sha2.c sha2_openssl.c saslprep.c unicode_norm.c: % : $(top_srcdir)/src/common/%
rm -f $@ && $(LN_S) $< .
encnames.c wchar.c: % : $(backend_src)/utils/mb/%

View File

@ -15,6 +15,7 @@
#include "postgres_fe.h"
#include "common/base64.h"
#include "common/saslprep.h"
#include "common/scram-common.h"
#include "fe-auth.h"
@ -42,7 +43,7 @@ typedef struct
/* These are supplied by the user */
const char *username;
const char *password;
char *password;
/* We construct these */
char *client_nonce;
@ -82,6 +83,8 @@ void *
pg_fe_scram_init(const char *username, const char *password)
{
fe_scram_state *state;
char *prep_password;
pg_saslprep_rc rc;
state = (fe_scram_state *) malloc(sizeof(fe_scram_state));
if (!state)
@ -89,7 +92,24 @@ pg_fe_scram_init(const char *username, const char *password)
memset(state, 0, sizeof(fe_scram_state));
state->state = FE_SCRAM_INIT;
state->username = username;
state->password = password;
/* Normalize the password with SASLprep, if possible */
rc = pg_saslprep(password, &prep_password);
if (rc == SASLPREP_OOM)
{
free(state);
return NULL;
}
if (rc != SASLPREP_SUCCESS)
{
prep_password = strdup(password);
if (!prep_password)
{
free(state);
return NULL;
}
}
state->password = prep_password;
return state;
}
@ -102,6 +122,9 @@ pg_fe_scram_free(void *opaq)
{
fe_scram_state *state = (fe_scram_state *) opaq;
if (state->password)
free(state->password);
/* client messages */
if (state->client_nonce)
free(state->client_nonce);

View File

@ -0,0 +1,98 @@
# Test password normalization in SCRAM.
#
# This test cannot run on Windows as Postgres cannot be set up with Unix
# sockets and needs to go through SSPI.
use strict;
use warnings;
use PostgresNode;
use TestLib;
use Test::More tests => 12;
# Delete pg_hba.conf from the given node, add a new entry to it
# and then execute a reload to refresh it.
sub reset_pg_hba
{
my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
$node->append_conf('pg_hba.conf', "local all all $hba_method");
$node->reload;
}
# Test access for a single role, useful to wrap all tests into one.
sub test_login
{
my $node = shift;
my $role = shift;
my $password = shift;
my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
$ENV{"PGPASSWORD"} = $password;
my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
is($res, $expected_res,
"authentication $status_string for role $role with password $password");
}
SKIP:
{
skip "authentication tests cannot run on Windows", 12 if ($windows_os);
# Initialize master node
my $node = get_new_node('master');
$node->init;
$node->start;
# These tests are based on the example strings from RFC4013.txt,
# Section "3. Examples":
#
# # Input Output Comments
# - ----- ------ --------
# 1 I<U+00AD>X IX SOFT HYPHEN mapped to nothing
# 2 user user no transformation
# 3 USER USER case preserved, will not match #2
# 4 <U+00AA> a output is NFKC, input in ISO 8859-1
# 5 <U+2168> IX output is NFKC, will match #1
# 6 <U+0007> Error - prohibited character
# 7 <U+0627><U+0031> Error - bidirectional check
# Create test roles.
$node->safe_psql('postgres',
"SET password_encryption='scram';
SET client_encoding='utf8';
CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX';
CREATE ROLE saslpreptest4a_role LOGIN PASSWORD 'a';
CREATE ROLE saslpreptest4b_role LOGIN PASSWORD E'\\xc2\\xaa';
CREATE ROLE saslpreptest6_role LOGIN PASSWORD E'foo\\x07bar';
CREATE ROLE saslpreptest7_role LOGIN PASSWORD E'foo\\u0627\\u0031bar';
");
# Require password from now on.
reset_pg_hba($node, 'scram');
# Check that #1 and #5 are treated the same as just 'IX'
test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0);
# but different from lower case 'ix'
test_login($node, 'saslpreptest1_role', "ix", 2);
# Check #4
test_login($node, 'saslpreptest4a_role', "a", 0);
test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0);
test_login($node, 'saslpreptest4b_role', "a", 0);
test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0);
# Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password
# contains prohibited characters, we use it as is, without normalization.
test_login($node, 'saslpreptest6_role', "foo\x07bar", 0);
test_login($node, 'saslpreptest6_role', "foobar", 2);
test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0);
test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2);
test_login($node, 'saslpreptest7_role', "foobar", 2);
}

View File

@ -112,7 +112,8 @@ sub mkvcbuild
our @pgcommonallfiles = qw(
base64.c config_info.c controldata_utils.c exec.c ip.c keywords.c
md5.c pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c
scram-common.c string.c username.c wait_error.c);
saslprep.c scram-common.c string.c unicode_norm.c username.c
wait_error.c);
if ($solution->{options}->{openssl})
{