mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-09-28 01:01:49 +02:00
Invent open_auth_file() in hba.c to refactor authentication file opening
This adds a check on the recursion depth when including authentication configuration files, something that has never been done when processing '@' files for database and user name lists in pg_hba.conf. On HEAD, this was leading to a rather confusing error, as of: FATAL: exceeded maxAllocatedDescs (NN) while trying to open file "/path/blah.conf" This refactors the code so as the error reported is now the following, which is the same as for GUCs: FATAL: could not open file "/path/blah.conf": maximum nesting depth exceeded This reduces a bit the verbosity of the error message used for files included in user and database lists, reporting only the file name of what's failing to load, without mentioning the relative or absolute path specified after '@' in a HBA file. The absolute path is built upon what '@' defines anyway, so there is no actual loss of information. This makes the future inclusion logic much simpler. A follow-up patch will add an error context to be able to track on which line of which file the inclusion is failing, to close the loop, providing all the information needed to know the full chain of events. This logic has been extracted from a larger patch written by Julien, rewritten by me to have a unique code path calling AllocateFile() on authentication files, and is useful on its own. This new interface will be used later for authentication files included with @include[_dir,_if_exists], in a follow-up patch. Author: Michael Paquier, Julien Rouhaud Discussion: https://www.postgresql.org/message-id/Y2xUBJ+S+Z0zbxRW@paquier.xyz
This commit is contained in:
parent
45d5ecab49
commit
783e8c69cb
@ -117,7 +117,8 @@ static const char *const UserAuthName[] =
|
||||
|
||||
|
||||
static List *tokenize_inc_file(List *tokens, const char *outer_filename,
|
||||
const char *inc_filename, int elevel, char **err_msg);
|
||||
const char *inc_filename, int elevel,
|
||||
int depth, char **err_msg);
|
||||
static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
|
||||
int elevel, char **err_msg);
|
||||
static int regcomp_auth_token(AuthToken *token, char *filename, int line_num,
|
||||
@ -414,7 +415,7 @@ regexec_auth_token(const char *match, AuthToken *token, size_t nmatch,
|
||||
*/
|
||||
static List *
|
||||
next_field_expand(const char *filename, char **lineptr,
|
||||
int elevel, char **err_msg)
|
||||
int elevel, int depth, char **err_msg)
|
||||
{
|
||||
char buf[MAX_TOKEN];
|
||||
bool trailing_comma;
|
||||
@ -431,7 +432,7 @@ next_field_expand(const char *filename, char **lineptr,
|
||||
/* Is this referencing a file? */
|
||||
if (!initial_quote && buf[0] == '@' && buf[1] != '\0')
|
||||
tokens = tokenize_inc_file(tokens, filename, buf + 1,
|
||||
elevel, err_msg);
|
||||
elevel, depth + 1, err_msg);
|
||||
else
|
||||
tokens = lappend(tokens, make_auth_token(buf, initial_quote));
|
||||
} while (trailing_comma && (*err_msg == NULL));
|
||||
@ -459,6 +460,7 @@ tokenize_inc_file(List *tokens,
|
||||
const char *outer_filename,
|
||||
const char *inc_filename,
|
||||
int elevel,
|
||||
int depth,
|
||||
char **err_msg)
|
||||
{
|
||||
char *inc_fullname;
|
||||
@ -468,24 +470,18 @@ tokenize_inc_file(List *tokens,
|
||||
MemoryContext linecxt;
|
||||
|
||||
inc_fullname = AbsoluteConfigLocation(inc_filename, outer_filename);
|
||||
inc_file = open_auth_file(inc_fullname, elevel, depth, err_msg);
|
||||
|
||||
inc_file = AllocateFile(inc_fullname, "r");
|
||||
if (inc_file == NULL)
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open secondary authentication file \"@%s\" as \"%s\": %m",
|
||||
inc_filename, inc_fullname)));
|
||||
*err_msg = psprintf("could not open secondary authentication file \"@%s\" as \"%s\": %s",
|
||||
inc_filename, inc_fullname, strerror(save_errno));
|
||||
/* error already logged */
|
||||
pfree(inc_fullname);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
/* There is possible recursion here if the file contains @ */
|
||||
linecxt = tokenize_auth_file(inc_fullname, inc_file, &inc_lines, elevel);
|
||||
linecxt = tokenize_auth_file(inc_fullname, inc_file, &inc_lines, elevel,
|
||||
depth);
|
||||
|
||||
FreeFile(inc_file);
|
||||
pfree(inc_fullname);
|
||||
@ -521,6 +517,59 @@ tokenize_inc_file(List *tokens,
|
||||
return tokens;
|
||||
}
|
||||
|
||||
/*
|
||||
* open_auth_file
|
||||
* Open the given file.
|
||||
*
|
||||
* filename: the absolute path to the target file
|
||||
* elevel: message logging level
|
||||
* depth: recursion level when opening the file
|
||||
* err_msg: details about the error
|
||||
*
|
||||
* Return value is the opened file. On error, returns NULL with details
|
||||
* about the error stored in "err_msg".
|
||||
*/
|
||||
FILE *
|
||||
open_auth_file(const char *filename, int elevel, int depth,
|
||||
char **err_msg)
|
||||
{
|
||||
FILE *file;
|
||||
|
||||
/*
|
||||
* Reject too-deep include nesting depth. This is just a safety check to
|
||||
* avoid dumping core due to stack overflow if an include file loops back
|
||||
* to itself. The maximum nesting depth is pretty arbitrary.
|
||||
*/
|
||||
if (depth > 10)
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open file \"%s\": maximum nesting depth exceeded",
|
||||
filename)));
|
||||
if (err_msg)
|
||||
*err_msg = psprintf("could not open file \"%s\": maximum nesting depth exceeded",
|
||||
filename);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
file = AllocateFile(filename, "r");
|
||||
if (file == NULL)
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open file \"%s\": %m",
|
||||
filename)));
|
||||
if (err_msg)
|
||||
*err_msg = psprintf("could not open file \"%s\": %s",
|
||||
filename, strerror(save_errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
/*
|
||||
* tokenize_auth_file
|
||||
* Tokenize the given file.
|
||||
@ -532,6 +581,7 @@ tokenize_inc_file(List *tokens,
|
||||
* file: the already-opened target file
|
||||
* tok_lines: receives output list
|
||||
* elevel: message logging level
|
||||
* depth: level of recursion when tokenizing the target file
|
||||
*
|
||||
* Errors are reported by logging messages at ereport level elevel and by
|
||||
* adding TokenizedAuthLine structs containing non-null err_msg fields to the
|
||||
@ -542,7 +592,7 @@ tokenize_inc_file(List *tokens,
|
||||
*/
|
||||
MemoryContext
|
||||
tokenize_auth_file(const char *filename, FILE *file, List **tok_lines,
|
||||
int elevel)
|
||||
int elevel, int depth)
|
||||
{
|
||||
int line_number = 1;
|
||||
StringInfoData buf;
|
||||
@ -613,7 +663,7 @@ tokenize_auth_file(const char *filename, FILE *file, List **tok_lines,
|
||||
List *current_field;
|
||||
|
||||
current_field = next_field_expand(filename, &lineptr,
|
||||
elevel, &err_msg);
|
||||
elevel, depth, &err_msg);
|
||||
/* add field to line, unless we are at EOL or comment start */
|
||||
if (current_field != NIL)
|
||||
current_line = lappend(current_line, current_field);
|
||||
@ -2332,17 +2382,14 @@ load_hba(void)
|
||||
MemoryContext oldcxt;
|
||||
MemoryContext hbacxt;
|
||||
|
||||
file = AllocateFile(HbaFileName, "r");
|
||||
file = open_auth_file(HbaFileName, LOG, 0, NULL);
|
||||
if (file == NULL)
|
||||
{
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open configuration file \"%s\": %m",
|
||||
HbaFileName)));
|
||||
/* error already logged */
|
||||
return false;
|
||||
}
|
||||
|
||||
linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, LOG);
|
||||
linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, LOG, 0);
|
||||
FreeFile(file);
|
||||
|
||||
/* Now parse all the lines */
|
||||
@ -2703,18 +2750,15 @@ load_ident(void)
|
||||
MemoryContext ident_context;
|
||||
IdentLine *newline;
|
||||
|
||||
file = AllocateFile(IdentFileName, "r");
|
||||
/* not FATAL ... we just won't do any special ident maps */
|
||||
file = open_auth_file(IdentFileName, LOG, 0, NULL);
|
||||
if (file == NULL)
|
||||
{
|
||||
/* not fatal ... we just won't do any special ident maps */
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open usermap file \"%s\": %m",
|
||||
IdentFileName)));
|
||||
/* error already logged */
|
||||
return false;
|
||||
}
|
||||
|
||||
linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, LOG);
|
||||
linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, LOG, 0);
|
||||
FreeFile(file);
|
||||
|
||||
/* Now parse all the lines */
|
||||
|
@ -380,14 +380,9 @@ fill_hba_view(Tuplestorestate *tuple_store, TupleDesc tupdesc)
|
||||
* (Most other error conditions should result in a message in a view
|
||||
* entry.)
|
||||
*/
|
||||
file = AllocateFile(HbaFileName, "r");
|
||||
if (file == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open configuration file \"%s\": %m",
|
||||
HbaFileName)));
|
||||
file = open_auth_file(HbaFileName, ERROR, 0, NULL);
|
||||
|
||||
linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, DEBUG3);
|
||||
linecxt = tokenize_auth_file(HbaFileName, file, &hba_lines, DEBUG3, 0);
|
||||
FreeFile(file);
|
||||
|
||||
/* Now parse all the lines */
|
||||
@ -529,14 +524,9 @@ fill_ident_view(Tuplestorestate *tuple_store, TupleDesc tupdesc)
|
||||
* (Most other error conditions should result in a message in a view
|
||||
* entry.)
|
||||
*/
|
||||
file = AllocateFile(IdentFileName, "r");
|
||||
if (file == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open usermap file \"%s\": %m",
|
||||
IdentFileName)));
|
||||
file = open_auth_file(IdentFileName, ERROR, 0, NULL);
|
||||
|
||||
linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, DEBUG3);
|
||||
linecxt = tokenize_auth_file(IdentFileName, file, &ident_lines, DEBUG3, 0);
|
||||
FreeFile(file);
|
||||
|
||||
/* Now parse all the lines */
|
||||
|
@ -177,7 +177,9 @@ extern int check_usermap(const char *usermap_name,
|
||||
extern HbaLine *parse_hba_line(TokenizedAuthLine *tok_line, int elevel);
|
||||
extern IdentLine *parse_ident_line(TokenizedAuthLine *tok_line, int elevel);
|
||||
extern bool pg_isblank(const char c);
|
||||
extern FILE *open_auth_file(const char *filename, int elevel, int depth,
|
||||
char **err_msg);
|
||||
extern MemoryContext tokenize_auth_file(const char *filename, FILE *file,
|
||||
List **tok_lines, int elevel);
|
||||
List **tok_lines, int elevel, int depth);
|
||||
|
||||
#endif /* HBA_H */
|
||||
|
Loading…
Reference in New Issue
Block a user