postgresql/src/backend/regex/regc_locale.c

767 lines
15 KiB
C
Raw Normal View History

2003-08-04 02:43:34 +02:00
/*
* regc_locale.c --
*
* This file contains locale-specific regexp routines.
* This file is #included by regcomp.c.
*
* Copyright (c) 1998 by Scriptics Corporation.
*
* This software is copyrighted by the Regents of the University of
* California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState
* Corporation and other parties. The following terms apply to all files
* associated with the software unless explicitly disclaimed in
* individual files.
2003-08-04 02:43:34 +02:00
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
2003-08-04 02:43:34 +02:00
*
* IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
* FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
* DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
2003-08-04 02:43:34 +02:00
*
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
2003-08-04 02:43:34 +02:00
*
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
2003-08-04 02:43:34 +02:00
* in the software and related documentation as defined in the Federal
* Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
* 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
* authors grant the U.S. Government and others acting in its behalf
* permission to use and distribute the software in accordance with the
2003-08-04 02:43:34 +02:00
* terms specified in this license.
*
2010-09-20 22:08:53 +02:00
* src/backend/regex/regc_locale.c
*/
/* ASCII character-name table */
static const struct cname
2003-08-04 02:43:34 +02:00
{
const char *name;
const char code;
2003-08-04 02:43:34 +02:00
} cnames[] =
{
{
"NUL", '\0'
},
{
"SOH", '\001'
},
{
"STX", '\002'
},
{
"ETX", '\003'
},
{
"EOT", '\004'
},
{
"ENQ", '\005'
},
{
"ACK", '\006'
},
{
"BEL", '\007'
},
{
"alert", '\007'
},
{
"BS", '\010'
},
{
"backspace", '\b'
},
{
"HT", '\011'
},
{
"tab", '\t'
},
{
"LF", '\012'
},
{
"newline", '\n'
},
{
"VT", '\013'
},
{
"vertical-tab", '\v'
},
{
"FF", '\014'
},
{
"form-feed", '\f'
},
{
"CR", '\015'
},
{
"carriage-return", '\r'
},
{
"SO", '\016'
},
{
"SI", '\017'
},
{
"DLE", '\020'
},
{
"DC1", '\021'
},
{
"DC2", '\022'
},
{
"DC3", '\023'
},
{
"DC4", '\024'
},
{
"NAK", '\025'
},
{
"SYN", '\026'
},
{
"ETB", '\027'
},
{
"CAN", '\030'
},
{
"EM", '\031'
},
{
"SUB", '\032'
},
{
"ESC", '\033'
},
{
"IS4", '\034'
},
{
"FS", '\034'
},
{
"IS3", '\035'
},
{
"GS", '\035'
},
{
"IS2", '\036'
},
{
"RS", '\036'
},
{
"IS1", '\037'
},
{
"US", '\037'
},
{
"space", ' '
},
{
"exclamation-mark", '!'
},
{
"quotation-mark", '"'
},
{
"number-sign", '#'
},
{
"dollar-sign", '$'
},
{
"percent-sign", '%'
},
{
"ampersand", '&'
},
{
"apostrophe", '\''
},
{
"left-parenthesis", '('
},
{
"right-parenthesis", ')'
},
{
"asterisk", '*'
},
{
"plus-sign", '+'
},
{
"comma", ','
},
{
"hyphen", '-'
},
{
"hyphen-minus", '-'
},
{
"period", '.'
},
{
"full-stop", '.'
},
{
"slash", '/'
},
{
"solidus", '/'
},
{
"zero", '0'
},
{
"one", '1'
},
{
"two", '2'
},
{
"three", '3'
},
{
"four", '4'
},
{
"five", '5'
},
{
"six", '6'
},
{
"seven", '7'
},
{
"eight", '8'
},
{
"nine", '9'
},
{
"colon", ':'
},
{
"semicolon", ';'
},
{
"less-than-sign", '<'
},
{
"equals-sign", '='
},
{
"greater-than-sign", '>'
},
{
"question-mark", '?'
},
{
"commercial-at", '@'
},
{
"left-square-bracket", '['
},
{
"backslash", '\\'
},
{
"reverse-solidus", '\\'
},
{
"right-square-bracket", ']'
},
{
"circumflex", '^'
},
{
"circumflex-accent", '^'
},
{
"underscore", '_'
},
{
"low-line", '_'
},
{
"grave-accent", '`'
},
{
"left-brace", '{'
},
{
"left-curly-bracket", '{'
},
{
"vertical-line", '|'
},
{
"right-brace", '}'
},
{
"right-curly-bracket", '}'
},
{
"tilde", '~'
},
{
"DEL", '\177'
},
{
NULL, 0
}
};
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
/*
* The following arrays define the valid character class names.
*/
static const char *const classNames[NUM_CCLASSES + 1] = {
"alnum", "alpha", "ascii", "blank", "cntrl", "digit", "graph",
"lower", "print", "punct", "space", "upper", "xdigit", NULL
};
enum classes
{
CC_ALNUM, CC_ALPHA, CC_ASCII, CC_BLANK, CC_CNTRL, CC_DIGIT, CC_GRAPH,
CC_LOWER, CC_PRINT, CC_PUNCT, CC_SPACE, CC_UPPER, CC_XDIGIT
};
/*
* We do not use the hard-wired Unicode classification tables that Tcl does.
* This is because (a) we need to deal with other encodings besides Unicode,
* and (b) we want to track the behavior of the libc locale routines as
* closely as possible. For example, it wouldn't be unreasonable for a
* locale to not consider every Unicode letter as a letter. So we build
* character classification cvecs by asking libc, even for Unicode.
*/
/*
* element - map collating-element name to chr
*/
static chr
2003-08-04 02:43:34 +02:00
element(struct vars * v, /* context */
const chr *startp, /* points to start of name */
const chr *endp) /* points just past end of name */
{
const struct cname *cn;
2003-08-04 02:43:34 +02:00
size_t len;
/* generic: one-chr names stand for themselves */
assert(startp < endp);
len = endp - startp;
if (len == 1)
return *startp;
NOTE(REG_ULOCALE);
/* search table */
for (cn = cnames; cn->name != NULL; cn++)
{
if (strlen(cn->name) == len &&
pg_char_and_wchar_strncmp(cn->name, startp, len) == 0)
{
break; /* NOTE BREAK OUT */
}
}
2003-08-04 02:43:34 +02:00
if (cn->name != NULL)
return CHR(cn->code);
/* couldn't find it */
ERR(REG_ECOLLATE);
return 0;
}
/*
* range - supply cvec for a range, including legality check
*/
static struct cvec *
2003-08-04 02:43:34 +02:00
range(struct vars * v, /* context */
chr a, /* range start */
chr b, /* range end, might equal a */
int cases) /* case-independent? */
{
2003-08-04 02:43:34 +02:00
int nchrs;
struct cvec *cv;
chr c,
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
cc;
2003-08-04 02:43:34 +02:00
if (a != b && !before(a, b))
{
ERR(REG_ERANGE);
return NULL;
}
2003-08-04 02:43:34 +02:00
if (!cases)
{ /* easy version */
cv = getcvec(v, 0, 1);
2003-08-04 02:43:34 +02:00
NOERRN();
addrange(cv, a, b);
return cv;
}
2003-08-04 02:43:34 +02:00
/*
2005-10-15 04:49:52 +02:00
* When case-independent, it's hard to decide when cvec ranges are usable,
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
* so for now at least, we won't try. We use a range for the originally
* specified chrs and then add on any case-equivalents that are outside
* that range as individual chrs.
*
* To ensure sane behavior if someone specifies a very large range, limit
* the allocation size to 100000 chrs (arbitrary) and check for overrun
* inside the loop below.
2003-08-04 02:43:34 +02:00
*/
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
nchrs = b - a + 1;
if (nchrs <= 0 || nchrs > 100000)
nchrs = 100000;
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
cv = getcvec(v, nchrs, 1);
2003-08-04 02:43:34 +02:00
NOERRN();
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
addrange(cv, a, b);
2003-08-04 02:43:34 +02:00
for (c = a; c <= b; c++)
{
cc = pg_wc_tolower(c);
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
if (cc != c &&
(before(cc, a) || before(b, cc)))
{
if (cv->nchrs >= cv->chrspace)
{
ERR(REG_ETOOBIG);
return NULL;
}
addchr(cv, cc);
}
cc = pg_wc_toupper(c);
Fix some regex issues with out-of-range characters and large char ranges. Previously, our regex code defined CHR_MAX as 0xfffffffe, which is a bad choice because it is outside the range of type "celt" (int32). Characters approaching that limit could lead to infinite loops in logic such as "for (c = a; c <= b; c++)" where c is of type celt but the range bounds are chr. Such loops will work safely only if CHR_MAX+1 is representable in celt, since c must advance to beyond b before the loop will exit. Fortunately, there seems no reason not to restrict CHR_MAX to 0x7ffffffe. It's highly unlikely that Unicode will ever assign codes that high, and none of our other backend encodings need characters beyond that either. In addition to modifying the macro, we have to explicitly enforce character range restrictions on the values of \u, \U, and \x escape sequences, else the limit is trivially bypassed. Also, the code for expanding case-independent character ranges in bracket expressions had a potential integer overflow in its calculation of the number of characters it could generate, which could lead to allocating too small a character vector and then overwriting memory. An attacker with the ability to supply arbitrary regex patterns could easily cause transient DOS via server crashes, and the possibility for privilege escalation has not been ruled out. Quite aside from the integer-overflow problem, the range expansion code was unnecessarily inefficient in that it always produced a result consisting of individual characters, abandoning the knowledge that we had a range to start with. If the input range is large, this requires excessive memory. Change it so that the original range is reported as-is, and then we add on any case-equivalent characters that are outside that range. With this approach, we can bound the number of individual characters allowed without sacrificing much. This patch allows at most 100000 individual characters, which I believe to be more than the number of case pairs existing in Unicode, so that the restriction will never be hit in practice. It's still possible for range() to take awhile given a large character code range, so also add statement-cancel detection to its loop. The downstream function dovec() also lacked cancel detection, and could take a long time given a large output from range(). Per fuzz testing by Greg Stark. Back-patch to all supported branches. Security: CVE-2016-0773
2016-02-08 16:25:40 +01:00
if (cc != c &&
(before(cc, a) || before(b, cc)))
{
if (cv->nchrs >= cv->chrspace)
{
ERR(REG_ETOOBIG);
return NULL;
}
addchr(cv, cc);
}
if (CANCEL_REQUESTED(v->re))
{
ERR(REG_CANCEL);
return NULL;
}
}
2003-08-04 02:43:34 +02:00
return cv;
}
/*
* before - is chr x before chr y, for purposes of range legality?
*/
2003-08-04 02:43:34 +02:00
static int /* predicate */
before(chr x, chr y)
{
2003-08-04 02:43:34 +02:00
if (x < y)
return 1;
return 0;
}
/*
* eclass - supply cvec for an equivalence class
* Must include case counterparts on request.
*/
static struct cvec *
2003-08-04 02:43:34 +02:00
eclass(struct vars * v, /* context */
chr c, /* Collating element representing the
2003-08-04 02:43:34 +02:00
* equivalence class. */
int cases) /* all cases? */
{
2003-08-04 02:43:34 +02:00
struct cvec *cv;
/* crude fake equivalence class for testing */
if ((v->cflags & REG_FAKE) && c == 'x')
{
cv = getcvec(v, 4, 0);
addchr(cv, CHR('x'));
addchr(cv, CHR('y'));
2003-08-04 02:43:34 +02:00
if (cases)
{
addchr(cv, CHR('X'));
addchr(cv, CHR('Y'));
2003-08-04 02:43:34 +02:00
}
return cv;
}
2003-08-04 02:43:34 +02:00
/* otherwise, none */
if (cases)
return allcases(v, c);
cv = getcvec(v, 1, 0);
2003-08-04 02:43:34 +02:00
assert(cv != NULL);
addchr(cv, c);
return cv;
}
/*
* cclass - supply cvec for a character class
*
* Must include case counterparts if "cases" is true.
*
* The returned cvec might be either a transient cvec gotten from getcvec(),
* or a permanently cached one from pg_ctype_get_cache(). This is okay
* because callers are not supposed to explicitly free the result either way.
*/
static struct cvec *
2003-08-04 02:43:34 +02:00
cclass(struct vars * v, /* context */
const chr *startp, /* where the name starts */
const chr *endp, /* just past the end of the name */
int cases) /* case-independent? */
{
2003-08-04 02:43:34 +02:00
size_t len;
struct cvec *cv = NULL;
const char *const * namePtr;
2003-08-04 02:43:34 +02:00
int i,
index;
/*
* Map the name to the corresponding enumerated value.
*/
len = endp - startp;
index = -1;
for (namePtr = classNames, i = 0; *namePtr != NULL; namePtr++, i++)
{
if (strlen(*namePtr) == len &&
pg_char_and_wchar_strncmp(*namePtr, startp, len) == 0)
{
index = i;
break;
}
}
if (index == -1)
{
ERR(REG_ECTYPE);
return NULL;
}
2003-08-04 02:43:34 +02:00
/*
* Remap lower and upper to alpha if the match is case insensitive.
*/
2003-08-04 02:43:34 +02:00
if (cases &&
((enum classes) index == CC_LOWER ||
(enum classes) index == CC_UPPER))
index = (int) CC_ALPHA;
2003-08-04 02:43:34 +02:00
/*
* Now compute the character class contents. For classes that are based
* on the behavior of a <wctype.h> or <ctype.h> function, we use
* pg_ctype_get_cache so that we can cache the results. Other classes
* have definitions that are hard-wired here, and for those we just
* construct a transient cvec on the fly.
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
*
* NB: keep this code in sync with cclass_column_index(), below.
2003-08-04 02:43:34 +02:00
*/
switch ((enum classes) index)
{
case CC_PRINT:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isprint, index);
break;
2003-08-04 02:43:34 +02:00
case CC_ALNUM:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isalnum, index);
2003-08-04 02:43:34 +02:00
break;
case CC_ALPHA:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isalpha, index);
2003-08-04 02:43:34 +02:00
break;
case CC_ASCII:
/* hard-wired meaning */
cv = getcvec(v, 0, 1);
2003-08-04 02:43:34 +02:00
if (cv)
addrange(cv, 0, 0x7f);
break;
case CC_BLANK:
/* hard-wired meaning */
cv = getcvec(v, 2, 0);
2003-08-04 02:43:34 +02:00
addchr(cv, '\t');
addchr(cv, ' ');
break;
case CC_CNTRL:
/* hard-wired meaning */
cv = getcvec(v, 0, 2);
2003-08-04 02:43:34 +02:00
addrange(cv, 0x0, 0x1f);
addrange(cv, 0x7f, 0x9f);
break;
case CC_DIGIT:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isdigit, index);
2003-08-04 02:43:34 +02:00
break;
case CC_PUNCT:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_ispunct, index);
2003-08-04 02:43:34 +02:00
break;
case CC_XDIGIT:
/*
* It's not clear how to define this in non-western locales, and
* even less clear that there's any particular use in trying. So
* just hard-wire the meaning.
*/
cv = getcvec(v, 0, 3);
2003-08-04 02:43:34 +02:00
if (cv)
{
addrange(cv, '0', '9');
addrange(cv, 'a', 'f');
addrange(cv, 'A', 'F');
}
break;
case CC_SPACE:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isspace, index);
2003-08-04 02:43:34 +02:00
break;
case CC_LOWER:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_islower, index);
2003-08-04 02:43:34 +02:00
break;
case CC_UPPER:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isupper, index);
2003-08-04 02:43:34 +02:00
break;
case CC_GRAPH:
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
cv = pg_ctype_get_cache(pg_wc_isgraph, index);
2003-08-04 02:43:34 +02:00
break;
}
/* If cv is NULL now, the reason must be "out of memory" */
2003-08-04 02:43:34 +02:00
if (cv == NULL)
ERR(REG_ESPACE);
return cv;
}
Make locale-dependent regex character classes work for large char codes. Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
2016-09-05 23:06:29 +02:00
/*
* cclass_column_index - get appropriate high colormap column index for chr
*/
static int
cclass_column_index(struct colormap * cm, chr c)
{
int colnum = 0;
/* Shouldn't go through all these pushups for simple chrs */
assert(c > MAX_SIMPLE_CHR);
/*
* Note: we should not see requests to consider cclasses that are not
* treated as locale-specific by cclass(), above.
*/
if (cm->classbits[CC_PRINT] && pg_wc_isprint(c))
colnum |= cm->classbits[CC_PRINT];
if (cm->classbits[CC_ALNUM] && pg_wc_isalnum(c))
colnum |= cm->classbits[CC_ALNUM];
if (cm->classbits[CC_ALPHA] && pg_wc_isalpha(c))
colnum |= cm->classbits[CC_ALPHA];
assert(cm->classbits[CC_ASCII] == 0);
assert(cm->classbits[CC_BLANK] == 0);
assert(cm->classbits[CC_CNTRL] == 0);
if (cm->classbits[CC_DIGIT] && pg_wc_isdigit(c))
colnum |= cm->classbits[CC_DIGIT];
if (cm->classbits[CC_PUNCT] && pg_wc_ispunct(c))
colnum |= cm->classbits[CC_PUNCT];
assert(cm->classbits[CC_XDIGIT] == 0);
if (cm->classbits[CC_SPACE] && pg_wc_isspace(c))
colnum |= cm->classbits[CC_SPACE];
if (cm->classbits[CC_LOWER] && pg_wc_islower(c))
colnum |= cm->classbits[CC_LOWER];
if (cm->classbits[CC_UPPER] && pg_wc_isupper(c))
colnum |= cm->classbits[CC_UPPER];
if (cm->classbits[CC_GRAPH] && pg_wc_isgraph(c))
colnum |= cm->classbits[CC_GRAPH];
return colnum;
}
/*
* allcases - supply cvec for all case counterparts of a chr (including itself)
*
* This is a shortcut, preferably an efficient one, for simple characters;
* messy cases are done via range().
*/
static struct cvec *
2003-08-04 02:43:34 +02:00
allcases(struct vars * v, /* context */
chr c) /* character to get case equivs of */
{
2003-08-04 02:43:34 +02:00
struct cvec *cv;
chr lc,
uc;
lc = pg_wc_tolower(c);
uc = pg_wc_toupper(c);
cv = getcvec(v, 2, 0);
2003-08-04 02:43:34 +02:00
addchr(cv, lc);
if (lc != uc)
addchr(cv, uc);
return cv;
}
/*
* cmp - chr-substring compare
*
* Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
*/
2003-08-04 02:43:34 +02:00
static int /* 0 for equal, nonzero for unequal */
cmp(const chr *x, const chr *y, /* strings to compare */
2003-08-04 02:43:34 +02:00
size_t len) /* exact length of comparison */
{
2003-08-04 02:43:34 +02:00
return memcmp(VS(x), VS(y), len * sizeof(chr));
}
/*
* casecmp - case-independent chr-substring compare
*
* REG_ICASE backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
*/
2003-08-04 02:43:34 +02:00
static int /* 0 for equal, nonzero for unequal */
casecmp(const chr *x, const chr *y, /* strings to compare */
size_t len) /* exact length of comparison */
{
2003-08-04 02:43:34 +02:00
for (; len > 0; len--, x++, y++)
{
if ((*x != *y) && (pg_wc_tolower(*x) != pg_wc_tolower(*y)))
2003-08-04 02:43:34 +02:00
return 1;
}
2003-08-04 02:43:34 +02:00
return 0;
}