postgresql/src/backend/regex/regexec.c

1426 lines
36 KiB
C
Raw Normal View History

/*
* re_*exec and friends - match REs
*
* Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
2003-08-04 02:43:34 +02:00
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
2003-08-04 02:43:34 +02:00
* thanks all of them.
*
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
2003-08-04 02:43:34 +02:00
*
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
2003-08-04 02:43:34 +02:00
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* HENRY SPENCER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
2010-09-20 22:08:53 +02:00
* src/backend/regex/regexec.c
*
*/
#include "regex/regguts.h"
/* lazy-DFA representation */
2003-08-04 02:43:34 +02:00
struct arcp
{ /* "pointer" to an outarc */
struct sset *ss;
2003-08-04 02:43:34 +02:00
color co;
};
2003-08-04 02:43:34 +02:00
struct sset
{ /* state set */
unsigned *states; /* pointer to bitvector */
unsigned hash; /* hash of bitvector */
#define HASH(bv, nw) (((nw) == 1) ? *(bv) : hash(bv, nw))
#define HIT(h,bv,ss,nw) ((ss)->hash == (h) && ((nw) == 1 || \
memcmp(VS(bv), VS((ss)->states), (nw)*sizeof(unsigned)) == 0))
2003-08-04 02:43:34 +02:00
int flags;
#define STARTER 01 /* the initial state set */
#define POSTSTATE 02 /* includes the goal state */
#define LOCKED 04 /* locked in cache */
#define NOPROGRESS 010 /* zero-progress state set */
struct arcp ins; /* chain of inarcs pointing here */
chr *lastseen; /* last entered on arrival here */
struct sset **outs; /* outarc vector indexed by color */
struct arcp *inchain; /* chain-pointer vector for outarcs */
};
2003-08-04 02:43:34 +02:00
struct dfa
{
int nssets; /* size of cache */
int nssused; /* how many entries occupied yet */
int nstates; /* number of states */
int ncolors; /* length of outarc and inchain vectors */
int wordsper; /* length of state-set bitvectors */
struct sset *ssets; /* state-set cache */
unsigned *statesarea; /* bitvector storage */
unsigned *work; /* pointer to work area within statesarea */
struct sset **outsarea; /* outarc-vector storage */
struct arcp *incarea; /* inchain storage */
struct cnfa *cnfa;
struct colormap *cm;
2003-08-04 02:43:34 +02:00
chr *lastpost; /* location of last cache-flushed success */
2005-10-15 04:49:52 +02:00
chr *lastnopr; /* location of last cache-flushed NOPROGRESS */
2003-08-04 02:43:34 +02:00
struct sset *search; /* replacement-search-pointer memory */
int cptsmalloced; /* were the areas individually malloced? */
char *mallocarea; /* self, or master malloced area, or NULL */
};
2003-08-04 02:43:34 +02:00
#define WORK 1 /* number of work bitvectors needed */
/* setup for non-malloc allocation for small cases */
2003-08-04 02:43:34 +02:00
#define FEWSTATES 20 /* must be less than UBITS */
#define FEWCOLORS 15
struct smalldfa
{
struct dfa dfa;
struct sset ssets[FEWSTATES * 2];
unsigned statesarea[FEWSTATES * 2 + WORK];
struct sset *outsarea[FEWSTATES * 2 * FEWCOLORS];
struct arcp incarea[FEWSTATES * 2 * FEWCOLORS];
};
2003-08-04 02:43:34 +02:00
#define DOMALLOC ((struct smalldfa *)NULL) /* force malloc */
/* internal variables, bundled for easy passing around */
2003-08-04 02:43:34 +02:00
struct vars
{
regex_t *re;
struct guts *g;
2003-08-04 02:43:34 +02:00
int eflags; /* copies of arguments */
size_t nmatch;
regmatch_t *pmatch;
rm_detail_t *details;
2003-08-04 02:43:34 +02:00
chr *start; /* start of string */
chr *search_start; /* search start of string */
2003-08-04 02:43:34 +02:00
chr *stop; /* just past end of string */
int err; /* error code if any (0 none) */
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
struct dfa **subdfas; /* per-tree-subre DFAs */
struct dfa **ladfas; /* per-lacon-subre DFAs */
struct sset **lblastcss; /* per-lacon-subre lookbehind restart data */
chr **lblastcp; /* per-lacon-subre lookbehind restart data */
struct smalldfa dfa1;
struct smalldfa dfa2;
};
2003-08-04 02:43:34 +02:00
#define VISERR(vv) ((vv)->err != 0) /* have we seen an error yet? */
#define ISERR() VISERR(v)
#define VERR(vv,e) ((vv)->err = ((vv)->err ? (vv)->err : (e)))
2003-08-04 02:43:34 +02:00
#define ERR(e) VERR(v, e) /* record an error */
2005-10-15 04:49:52 +02:00
#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return it */
2003-08-04 02:43:34 +02:00
#define OFF(p) ((p) - v->start)
#define LOFF(p) ((long)OFF(p))
/*
* forward declarations
*/
/* === regexec.c === */
static struct dfa *getsubdfa(struct vars *, struct subre *);
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
static struct dfa *getladfa(struct vars *, int);
2003-08-04 02:43:34 +02:00
static int find(struct vars *, struct cnfa *, struct colormap *);
static int cfind(struct vars *, struct cnfa *, struct colormap *);
static int cfindloop(struct vars *, struct cnfa *, struct colormap *, struct dfa *, struct dfa *, chr **);
static void zapallsubs(regmatch_t *, size_t);
static void zaptreesubs(struct vars *, struct subre *);
2003-08-04 02:43:34 +02:00
static void subset(struct vars *, struct subre *, chr *, chr *);
static int cdissect(struct vars *, struct subre *, chr *, chr *);
static int ccondissect(struct vars *, struct subre *, chr *, chr *);
static int crevcondissect(struct vars *, struct subre *, chr *, chr *);
2003-08-04 02:43:34 +02:00
static int cbrdissect(struct vars *, struct subre *, chr *, chr *);
static int caltdissect(struct vars *, struct subre *, chr *, chr *);
static int citerdissect(struct vars *, struct subre *, chr *, chr *);
static int creviterdissect(struct vars *, struct subre *, chr *, chr *);
2003-08-04 02:43:34 +02:00
/* === rege_dfa.c === */
2003-08-04 02:43:34 +02:00
static chr *longest(struct vars *, struct dfa *, chr *, chr *, int *);
static chr *shortest(struct vars *, struct dfa *, chr *, chr *, chr *, chr **, int *);
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
static int matchuntil(struct vars *, struct dfa *, chr *, struct sset **, chr **);
2003-08-04 02:43:34 +02:00
static chr *lastcold(struct vars *, struct dfa *);
static struct dfa *newdfa(struct vars *, struct cnfa *, struct colormap *, struct smalldfa *);
static void freedfa(struct dfa *);
static unsigned hash(unsigned *, int);
static struct sset *initialize(struct vars *, struct dfa *, chr *);
static struct sset *miss(struct vars *, struct dfa *, struct sset *, color, chr *, chr *);
static int lacon(struct vars *, struct cnfa *, chr *, color);
2003-08-04 02:43:34 +02:00
static struct sset *getvacant(struct vars *, struct dfa *, chr *, chr *);
static struct sset *pickss(struct vars *, struct dfa *, chr *, chr *);
/*
* pg_regexec - match regular expression
*/
int
pg_regexec(regex_t *re,
const chr *string,
size_t len,
size_t search_start,
rm_detail_t *details,
size_t nmatch,
regmatch_t pmatch[],
int flags)
{
struct vars var;
register struct vars *v = &var;
2003-08-04 02:43:34 +02:00
int st;
size_t n;
size_t i;
2003-08-04 02:43:34 +02:00
int backref;
#define LOCALMAT 20
regmatch_t mat[LOCALMAT];
#define LOCALDFAS 40
struct dfa *subdfas[LOCALDFAS];
/* sanity checks */
if (re == NULL || string == NULL || re->re_magic != REMAGIC)
return REG_INVARG;
if (re->re_csize != sizeof(chr))
return REG_MIXED;
/* Initialize locale-dependent support */
pg_set_regex_collation(re->re_collation);
/* setup */
v->re = re;
2003-08-04 02:43:34 +02:00
v->g = (struct guts *) re->re_guts;
if ((v->g->cflags & REG_EXPECT) && details == NULL)
return REG_INVARG;
2003-08-04 02:43:34 +02:00
if (v->g->info & REG_UIMPOSSIBLE)
return REG_NOMATCH;
2003-08-04 02:43:34 +02:00
backref = (v->g->info & REG_UBACKREF) ? 1 : 0;
v->eflags = flags;
2003-08-04 02:43:34 +02:00
if (v->g->cflags & REG_NOSUB)
nmatch = 0; /* override client */
v->nmatch = nmatch;
2003-08-04 02:43:34 +02:00
if (backref)
{
/* need work area */
if (v->g->nsub + 1 <= LOCALMAT)
v->pmatch = mat;
else
2003-08-04 02:43:34 +02:00
v->pmatch = (regmatch_t *) MALLOC((v->g->nsub + 1) *
sizeof(regmatch_t));
if (v->pmatch == NULL)
return REG_ESPACE;
v->nmatch = v->g->nsub + 1;
2003-08-04 02:43:34 +02:00
}
else
v->pmatch = pmatch;
v->details = details;
2003-08-04 02:43:34 +02:00
v->start = (chr *) string;
v->search_start = (chr *) string + search_start;
2003-08-04 02:43:34 +02:00
v->stop = (chr *) string + len;
v->err = 0;
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
v->subdfas = NULL;
v->ladfas = NULL;
v->lblastcss = NULL;
v->lblastcp = NULL;
/* below this point, "goto cleanup" will behave sanely */
assert(v->g->ntree >= 0);
n = (size_t) v->g->ntree;
if (n <= LOCALDFAS)
v->subdfas = subdfas;
else
{
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
v->subdfas = (struct dfa **) MALLOC(n * sizeof(struct dfa *));
if (v->subdfas == NULL)
{
st = REG_ESPACE;
goto cleanup;
}
}
for (i = 0; i < n; i++)
v->subdfas[i] = NULL;
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
assert(v->g->nlacons >= 0);
n = (size_t) v->g->nlacons;
if (n > 0)
{
v->ladfas = (struct dfa **) MALLOC(n * sizeof(struct dfa *));
if (v->ladfas == NULL)
{
st = REG_ESPACE;
goto cleanup;
}
for (i = 0; i < n; i++)
v->ladfas[i] = NULL;
v->lblastcss = (struct sset **) MALLOC(n * sizeof(struct sset *));
v->lblastcp = (chr **) MALLOC(n * sizeof(chr *));
if (v->lblastcss == NULL || v->lblastcp == NULL)
{
st = REG_ESPACE;
goto cleanup;
}
for (i = 0; i < n; i++)
{
v->lblastcss[i] = NULL;
v->lblastcp[i] = NULL;
}
}
/* do it */
assert(v->g->tree != NULL);
if (backref)
st = cfind(v, &v->g->tree->cnfa, &v->g->cmap);
else
st = find(v, &v->g->tree->cnfa, &v->g->cmap);
/* copy (portion of) match vector over if necessary */
2003-08-04 02:43:34 +02:00
if (st == REG_OKAY && v->pmatch != pmatch && nmatch > 0)
{
zapallsubs(pmatch, nmatch);
n = (nmatch < v->nmatch) ? nmatch : v->nmatch;
2003-08-04 02:43:34 +02:00
memcpy(VS(pmatch), VS(v->pmatch), n * sizeof(regmatch_t));
}
/* clean up */
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
cleanup:
if (v->pmatch != pmatch && v->pmatch != mat)
FREE(v->pmatch);
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
if (v->subdfas != NULL)
{
n = (size_t) v->g->ntree;
for (i = 0; i < n; i++)
{
if (v->subdfas[i] != NULL)
freedfa(v->subdfas[i]);
}
if (v->subdfas != subdfas)
FREE(v->subdfas);
}
if (v->ladfas != NULL)
{
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
n = (size_t) v->g->nlacons;
for (i = 0; i < n; i++)
{
if (v->ladfas[i] != NULL)
freedfa(v->ladfas[i]);
}
FREE(v->ladfas);
}
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
if (v->lblastcss != NULL)
FREE(v->lblastcss);
if (v->lblastcp != NULL)
FREE(v->lblastcp);
return st;
}
/*
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
* getsubdfa - create or re-fetch the DFA for a tree subre node
*
* We only need to create the DFA once per overall regex execution.
* The DFA will be freed by the cleanup step in pg_regexec().
*/
static struct dfa *
2017-06-21 20:39:04 +02:00
getsubdfa(struct vars *v,
struct subre *t)
{
if (v->subdfas[t->id] == NULL)
{
v->subdfas[t->id] = newdfa(v, &t->cnfa, &v->g->cmap, DOMALLOC);
if (ISERR())
return NULL;
}
return v->subdfas[t->id];
}
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
/*
* getladfa - create or re-fetch the DFA for a LACON subre node
*
* Same as above, but for LACONs.
*/
static struct dfa *
2017-06-21 20:39:04 +02:00
getladfa(struct vars *v,
Implement lookbehind constraints in our regular-expression engine. A lookbehind constraint is like a lookahead constraint in that it consumes no text; but it checks for existence (or nonexistence) of a match *ending* at the current point in the string, rather than one *starting* at the current point. This is a long-requested feature since it exists in many other regex libraries, but Henry Spencer had never got around to implementing it in the code we use. Just making it work is actually pretty trivial; but naive copying of the logic for lookahead constraints leads to code that often spends O(N^2) time to scan an N-character string, because we have to run the match engine from string start to the current probe point each time the constraint is checked. In typical use-cases a lookbehind constraint will be written at the start of the regex and hence will need to be checked at every character --- so O(N^2) work overall. To fix that, I introduced a third copy of the core DFA matching loop, paralleling the existing longest() and shortest() loops. This version, matchuntil(), can suspend and resume matching given a couple of pointers' worth of storage space. So we need only run it across the string once, stopping at each interesting probe point and then resuming to advance to the next one. I also put in an optimization that simplifies one-character lookahead and lookbehind constraints, such as "(?=x)" or "(?<!\w)", into AHEAD and BEHIND constraints, which already existed in the engine. This avoids the overhead of the LACON machinery entirely for these rather common cases. The net result is that lookbehind constraints run a factor of three or so slower than Perl's for multi-character constraints, but faster than Perl's for one-character constraints ... and they work fine for variable-length constraints, which Perl gives up on entirely. So that's not bad from a competitive perspective, and there's room for further optimization if anyone cares. (In reality, raw scan rate across a large input string is probably not that big a deal for Postgres usage anyway; so I'm happy if it's linear.)
2015-10-31 00:14:19 +01:00
int n)
{
assert(n > 0 && n < v->g->nlacons && v->g->lacons != NULL);
if (v->ladfas[n] == NULL)
{
struct subre *sub = &v->g->lacons[n];
v->ladfas[n] = newdfa(v, &sub->cnfa, &v->g->cmap, DOMALLOC);
if (ISERR())
return NULL;
}
return v->ladfas[n];
}
/*
* find - find a match for the main NFA (no-complications case)
*/
static int
2017-06-21 20:39:04 +02:00
find(struct vars *v,
struct cnfa *cnfa,
struct colormap *cm)
{
struct dfa *s;
struct dfa *d;
2003-08-04 02:43:34 +02:00
chr *begin;
chr *end = NULL;
chr *cold;
2005-10-15 04:49:52 +02:00
chr *open; /* open and close of range of possible starts */
2003-08-04 02:43:34 +02:00
chr *close;
int hitend;
int shorter = (v->g->tree->flags & SHORTER) ? 1 : 0;
/* first, a shot with the search RE */
s = newdfa(v, &v->g->search, cm, &v->dfa1);
assert(!(ISERR() && s != NULL));
NOERR();
MDEBUG(("\nsearch at %ld\n", LOFF(v->start)));
cold = NULL;
close = shortest(v, s, v->search_start, v->search_start, v->stop,
&cold, (int *) NULL);
freedfa(s);
NOERR();
2003-08-04 02:43:34 +02:00
if (v->g->cflags & REG_EXPECT)
{
assert(v->details != NULL);
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
2003-08-04 02:43:34 +02:00
if (close == NULL) /* not found */
return REG_NOMATCH;
2003-08-04 02:43:34 +02:00
if (v->nmatch == 0) /* found, don't need exact location */
return REG_OKAY;
/* find starting point and match */
assert(cold != NULL);
open = cold;
cold = NULL;
MDEBUG(("between %ld and %ld\n", LOFF(open), LOFF(close)));
d = newdfa(v, cnfa, cm, &v->dfa1);
assert(!(ISERR() && d != NULL));
NOERR();
2003-08-04 02:43:34 +02:00
for (begin = open; begin <= close; begin++)
{
MDEBUG(("\nfind trying at %ld\n", LOFF(begin)));
if (shorter)
end = shortest(v, d, begin, begin, v->stop,
2003-08-04 02:43:34 +02:00
(chr **) NULL, &hitend);
else
end = longest(v, d, begin, v->stop, &hitend);
if (ISERR())
{
freedfa(d);
return v->err;
}
if (hitend && cold == NULL)
cold = begin;
if (end != NULL)
2003-08-04 02:43:34 +02:00
break; /* NOTE BREAK OUT */
}
assert(end != NULL); /* search RE succeeded so loop should */
freedfa(d);
/* and pin down details */
assert(v->nmatch > 0);
v->pmatch[0].rm_so = OFF(begin);
v->pmatch[0].rm_eo = OFF(end);
2003-08-04 02:43:34 +02:00
if (v->g->cflags & REG_EXPECT)
{
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
2003-08-04 02:43:34 +02:00
if (v->nmatch == 1) /* no need for submatches */
return REG_OKAY;
/* find submatches */
zapallsubs(v->pmatch, v->nmatch);
return cdissect(v, v->g->tree, begin, end);
}
/*
* cfind - find a match for the main NFA (with complications)
*/
static int
2017-06-21 20:39:04 +02:00
cfind(struct vars *v,
struct cnfa *cnfa,
struct colormap *cm)
{
struct dfa *s;
struct dfa *d;
2003-08-04 02:43:34 +02:00
chr *cold;
int ret;
s = newdfa(v, &v->g->search, cm, &v->dfa1);
NOERR();
d = newdfa(v, cnfa, cm, &v->dfa2);
2003-08-04 02:43:34 +02:00
if (ISERR())
{
assert(d == NULL);
freedfa(s);
return v->err;
}
ret = cfindloop(v, cnfa, cm, d, s, &cold);
freedfa(d);
freedfa(s);
NOERR();
2003-08-04 02:43:34 +02:00
if (v->g->cflags & REG_EXPECT)
{
assert(v->details != NULL);
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
return ret;
}
/*
* cfindloop - the heart of cfind
*/
static int
2017-06-21 20:39:04 +02:00
cfindloop(struct vars *v,
struct cnfa *cnfa,
struct colormap *cm,
struct dfa *d,
struct dfa *s,
chr **coldp) /* where to put coldstart pointer */
{
2003-08-04 02:43:34 +02:00
chr *begin;
chr *end;
chr *cold;
2005-10-15 04:49:52 +02:00
chr *open; /* open and close of range of possible starts */
2003-08-04 02:43:34 +02:00
chr *close;
chr *estart;
chr *estop;
int er;
int shorter = v->g->tree->flags & SHORTER;
int hitend;
assert(d != NULL && s != NULL);
cold = NULL;
close = v->search_start;
2003-08-04 02:43:34 +02:00
do
{
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/* Search with the search RE for match range at/beyond "close" */
MDEBUG(("\ncsearch at %ld\n", LOFF(close)));
2003-08-04 02:43:34 +02:00
close = shortest(v, s, close, close, v->stop, &cold, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
if (ISERR())
{
*coldp = cold;
return v->err;
}
if (close == NULL)
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
break; /* no more possible match anywhere */
assert(cold != NULL);
open = cold;
cold = NULL;
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/* Search for matches starting between "open" and "close" inclusive */
MDEBUG(("cbetween %ld and %ld\n", LOFF(open), LOFF(close)));
2003-08-04 02:43:34 +02:00
for (begin = open; begin <= close; begin++)
{
MDEBUG(("\ncfind trying at %ld\n", LOFF(begin)));
estart = begin;
estop = v->stop;
2003-08-04 02:43:34 +02:00
for (;;)
{
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/* Here we use the top node's detailed RE */
if (shorter)
end = shortest(v, d, begin, estart,
2003-08-04 02:43:34 +02:00
estop, (chr **) NULL, &hitend);
else
end = longest(v, d, begin, estop,
2003-08-04 02:43:34 +02:00
&hitend);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
if (ISERR())
{
*coldp = cold;
return v->err;
}
if (hitend && cold == NULL)
cold = begin;
if (end == NULL)
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
break; /* no match with this begin point, try next */
MDEBUG(("tentative end %ld\n", LOFF(end)));
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/* Dissect the potential match to see if it really matches */
zapallsubs(v->pmatch, v->nmatch);
er = cdissect(v, v->g->tree, begin, end);
2003-08-04 02:43:34 +02:00
if (er == REG_OKAY)
{
if (v->nmatch > 0)
{
v->pmatch[0].rm_so = OFF(begin);
v->pmatch[0].rm_eo = OFF(end);
}
*coldp = cold;
return REG_OKAY;
}
2003-08-04 02:43:34 +02:00
if (er != REG_NOMATCH)
{
ERR(er);
*coldp = cold;
return er;
}
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/* Try next longer/shorter match with same begin point */
if (shorter)
{
if (end == estop)
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
break; /* no more, so try next begin point */
estart = end + 1;
}
else
{
if (end == begin)
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
break; /* no more, so try next begin point */
estop = end - 1;
}
} /* end loop over endpoint positions */
} /* end loop over beginning positions */
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/*
* If we get here, there is no possible match starting at or before
* "close", so consider matches beyond that. We'll do a fresh search
* with the search RE to find a new promising match range.
*/
close++;
} while (close < v->stop);
*coldp = cold;
return REG_NOMATCH;
}
/*
* zapallsubs - initialize all subexpression matches to "no match"
*/
static void
zapallsubs(regmatch_t *p,
size_t n)
{
2003-08-04 02:43:34 +02:00
size_t i;
2003-08-04 02:43:34 +02:00
for (i = n - 1; i > 0; i--)
{
p[i].rm_so = -1;
p[i].rm_eo = -1;
}
}
/*
* zaptreesubs - initialize subexpressions within subtree to "no match"
*/
static void
2017-06-21 20:39:04 +02:00
zaptreesubs(struct vars *v,
struct subre *t)
{
2003-08-04 02:43:34 +02:00
if (t->op == '(')
{
int n = t->subno;
assert(n > 0);
if ((size_t) n < v->nmatch)
{
v->pmatch[n].rm_so = -1;
v->pmatch[n].rm_eo = -1;
}
}
if (t->left != NULL)
zaptreesubs(v, t->left);
if (t->right != NULL)
zaptreesubs(v, t->right);
}
/*
* subset - set subexpression match data for a successful subre
*/
static void
2017-06-21 20:39:04 +02:00
subset(struct vars *v,
struct subre *sub,
chr *begin,
chr *end)
{
2003-08-04 02:43:34 +02:00
int n = sub->subno;
assert(n > 0);
2003-08-04 02:43:34 +02:00
if ((size_t) n >= v->nmatch)
return;
MDEBUG(("setting %d\n", n));
v->pmatch[n].rm_so = OFF(begin);
v->pmatch[n].rm_eo = OFF(end);
}
/*
* cdissect - check backrefs and determine subexpression matches
*
* cdissect recursively processes a subre tree to check matching of backrefs
* and/or identify submatch boundaries for capture nodes. The proposed match
* runs from "begin" to "end" (not including "end"), and we are basically
* "dissecting" it to see where the submatches are.
*
* Before calling any level of cdissect, the caller must have run the node's
* DFA and found that the proposed substring satisfies the DFA. (We make
* the caller do that because in concatenation and iteration nodes, it's
* much faster to check all the substrings against the child DFAs before we
* recurse.) Also, caller must have cleared subexpression match data via
* zaptreesubs (or zapallsubs at the top level).
*/
2003-08-04 02:43:34 +02:00
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
cdissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
int er;
assert(t != NULL);
MDEBUG(("cdissect %ld-%ld %c\n", LOFF(begin), LOFF(end), t->op));
/* handy place to check for operation cancel */
if (CANCEL_REQUESTED(v->re))
return REG_CANCEL;
/* ... and stack overrun */
if (STACK_TOO_DEEP(v->re))
return REG_ETOOBIG;
2003-08-04 02:43:34 +02:00
switch (t->op)
{
case '=': /* terminal node */
assert(t->left == NULL && t->right == NULL);
er = REG_OKAY; /* no action, parent did the work */
break;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
case 'b': /* back reference */
2003-08-04 02:43:34 +02:00
assert(t->left == NULL && t->right == NULL);
er = cbrdissect(v, t, begin, end);
break;
2003-08-04 02:43:34 +02:00
case '.': /* concatenation */
assert(t->left != NULL && t->right != NULL);
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
if (t->left->flags & SHORTER) /* reverse scan */
er = crevcondissect(v, t, begin, end);
else
er = ccondissect(v, t, begin, end);
break;
case '|': /* alternation */
assert(t->left != NULL);
er = caltdissect(v, t, begin, end);
break;
case '*': /* iteration */
assert(t->left != NULL);
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
if (t->left->flags & SHORTER) /* reverse scan */
er = creviterdissect(v, t, begin, end);
else
er = citerdissect(v, t, begin, end);
break;
2003-08-04 02:43:34 +02:00
case '(': /* capturing */
assert(t->left != NULL && t->right == NULL);
assert(t->subno > 0);
er = cdissect(v, t->left, begin, end);
if (er == REG_OKAY)
subset(v, t, begin, end);
break;
2003-08-04 02:43:34 +02:00
default:
er = REG_ASSERT;
break;
}
/*
* We should never have a match failure unless backrefs lurk below;
* otherwise, either caller failed to check the DFA, or there's some
* inconsistency between the DFA and the node's innards.
*/
assert(er != REG_NOMATCH || (t->flags & BACKR));
return er;
}
/*
* ccondissect - dissect match for concatenation node
*/
2003-08-04 02:43:34 +02:00
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
ccondissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
struct dfa *d;
struct dfa *d2;
2003-08-04 02:43:34 +02:00
chr *mid;
int er;
assert(t->op == '.');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(t->right != NULL && t->right->cnfa.nstates > 0);
assert(!(t->left->flags & SHORTER));
d = getsubdfa(v, t->left);
NOERR();
d2 = getsubdfa(v, t->right);
NOERR();
MDEBUG(("cconcat %d\n", t->id));
/* pick a tentative midpoint */
mid = longest(v, d, begin, end, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
if (mid == NULL)
return REG_NOMATCH;
MDEBUG(("tentative midpoint %ld\n", LOFF(mid)));
/* iterate until satisfaction or failure */
2003-08-04 02:43:34 +02:00
for (;;)
{
/* try this midpoint on for size */
if (longest(v, d2, mid, end, (int *) NULL) == end)
2003-08-04 02:43:34 +02:00
{
er = cdissect(v, t->left, begin, mid);
if (er == REG_OKAY)
{
er = cdissect(v, t->right, mid, end);
if (er == REG_OKAY)
{
/* satisfaction */
MDEBUG(("successful\n"));
return REG_OKAY;
}
}
if (er != REG_NOMATCH)
return er;
}
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
/* that midpoint didn't work, find a new one */
2003-08-04 02:43:34 +02:00
if (mid == begin)
{
/* all possibilities exhausted */
MDEBUG(("%d no midpoint\n", t->id));
return REG_NOMATCH;
}
2003-08-04 02:43:34 +02:00
mid = longest(v, d, begin, mid - 1, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
2003-08-04 02:43:34 +02:00
if (mid == NULL)
{
/* failed to find a new one */
MDEBUG(("%d failed midpoint\n", t->id));
return REG_NOMATCH;
}
MDEBUG(("%d: new midpoint %ld\n", t->id, LOFF(mid)));
zaptreesubs(v, t->left);
zaptreesubs(v, t->right);
}
/* can't get here */
return REG_ASSERT;
}
/*
* crevcondissect - dissect match for concatenation node, shortest-first
*/
2003-08-04 02:43:34 +02:00
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
crevcondissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
struct dfa *d;
struct dfa *d2;
2003-08-04 02:43:34 +02:00
chr *mid;
int er;
assert(t->op == '.');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(t->right != NULL && t->right->cnfa.nstates > 0);
2003-08-04 02:43:34 +02:00
assert(t->left->flags & SHORTER);
d = getsubdfa(v, t->left);
NOERR();
d2 = getsubdfa(v, t->right);
NOERR();
MDEBUG(("crevcon %d\n", t->id));
/* pick a tentative midpoint */
mid = shortest(v, d, begin, begin, end, (chr **) NULL, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
if (mid == NULL)
return REG_NOMATCH;
MDEBUG(("tentative midpoint %ld\n", LOFF(mid)));
/* iterate until satisfaction or failure */
2003-08-04 02:43:34 +02:00
for (;;)
{
/* try this midpoint on for size */
if (longest(v, d2, mid, end, (int *) NULL) == end)
2003-08-04 02:43:34 +02:00
{
er = cdissect(v, t->left, begin, mid);
if (er == REG_OKAY)
{
er = cdissect(v, t->right, mid, end);
if (er == REG_OKAY)
{
/* satisfaction */
MDEBUG(("successful\n"));
return REG_OKAY;
}
}
if (er != REG_NOMATCH)
return er;
}
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
/* that midpoint didn't work, find a new one */
2003-08-04 02:43:34 +02:00
if (mid == end)
{
/* all possibilities exhausted */
MDEBUG(("%d no midpoint\n", t->id));
return REG_NOMATCH;
}
2003-08-04 02:43:34 +02:00
mid = shortest(v, d, begin, mid + 1, end, (chr **) NULL, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
2003-08-04 02:43:34 +02:00
if (mid == NULL)
{
/* failed to find a new one */
MDEBUG(("%d failed midpoint\n", t->id));
return REG_NOMATCH;
}
MDEBUG(("%d: new midpoint %ld\n", t->id, LOFF(mid)));
zaptreesubs(v, t->left);
zaptreesubs(v, t->right);
}
/* can't get here */
return REG_ASSERT;
}
/*
* cbrdissect - dissect match for backref node
*/
2003-08-04 02:43:34 +02:00
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
cbrdissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
2003-08-04 02:43:34 +02:00
int n = t->subno;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
size_t numreps;
size_t tlen;
size_t brlen;
chr *brstring;
2003-08-04 02:43:34 +02:00
chr *p;
int min = t->min;
int max = t->max;
assert(t != NULL);
assert(t->op == 'b');
assert(n >= 0);
2003-08-04 02:43:34 +02:00
assert((size_t) n < v->nmatch);
MDEBUG(("cbackref n%d %d{%d-%d}\n", t->id, n, min, max));
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
/* get the backreferenced string */
if (v->pmatch[n].rm_so == -1)
return REG_NOMATCH;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
brstring = v->start + v->pmatch[n].rm_so;
brlen = v->pmatch[n].rm_eo - v->pmatch[n].rm_so;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
/* special cases for zero-length strings */
if (brlen == 0)
{
/*
* matches only if target is zero length, but any number of
* repetitions can be considered to be present
*/
if (begin == end && min <= max)
{
MDEBUG(("cbackref matched trivially\n"));
return REG_OKAY;
}
return REG_NOMATCH;
}
if (begin == end)
2003-08-04 02:43:34 +02:00
{
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
/* matches only if zero repetitions are okay */
if (min == 0)
{
MDEBUG(("cbackref matched trivially\n"));
return REG_OKAY;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
}
return REG_NOMATCH;
}
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
/*
* check target length to see if it could possibly be an allowed number of
* repetitions of brstring
*/
assert(end > begin);
tlen = end - begin;
if (tlen % brlen != 0)
return REG_NOMATCH;
numreps = tlen / brlen;
if (numreps < min || (numreps > max && max != DUPINF))
return REG_NOMATCH;
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
/* okay, compare the actual string contents */
p = begin;
while (numreps-- > 0)
2003-08-04 02:43:34 +02:00
{
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
if ((*v->g->compare) (brstring, p, brlen) != 0)
return REG_NOMATCH;
p += brlen;
}
Fix regex back-references that are directly quantified with *. The syntax "\n*", that is a backref with a * quantifier directly applied to it, has never worked correctly in Spencer's library. This has been an open bug in the Tcl bug tracker since 2005: https://sourceforge.net/tracker/index.php?func=detail&aid=1115587&group_id=10894&atid=110894 The core of the problem is in parseqatom(), which first changes "\n*" to "\n+|" and then applies repeat() to the NFA representing the backref atom. repeat() thinks that any arc leading into its "rp" argument is part of the sub-NFA to be repeated. Unfortunately, since parseqatom() already created the arc that was intended to represent the empty bypass around "\n+", this arc gets moved too, so that it now leads into the state loop created by repeat(). Thus, what was supposed to be an "empty" bypass gets turned into something that represents zero or more repetitions of the NFA representing the backref atom. In the original example, in place of ^([bc])\1*$ we now have something that acts like ^([bc])(\1+|[bc]*)$ At runtime, the branch involving the actual backref fails, as it's supposed to, but then the other branch succeeds anyway. We could no doubt fix this by some rearrangement of the operations in parseqatom(), but that code is plenty ugly already, and what's more the whole business of converting "x*" to "x+|" probably needs to go away to fix another problem I'll mention in a moment. Instead, this patch suppresses the *-conversion when the target is a simple backref atom, leaving the case of m == 0 to be handled at runtime. This makes the patch in regcomp.c a one-liner, at the cost of having to tweak cbrdissect() a little. In the event I went a bit further than that and rewrote cbrdissect() to check all the string-length-related conditions before it starts comparing characters. It seems a bit stupid to possibly iterate through many copies of an n-character backreference, only to fail at the end because the target string's length isn't a multiple of n --- we could have found that out before starting. The existing coding could only be a win if integer division is hugely expensive compared to character comparison, but I don't know of any modern machine where that might be true. This does not fix all the problems with quantified back-references. In particular, the code is still broken for back-references that appear within a larger expression that is quantified (so that direct insertion of the quantification limits into the BACKREF node doesn't apply). I think fixing that will take some major surgery on the NFA code, specifically introducing an explicit iteration node type instead of trying to transform iteration into concatenation of modified regexps. Back-patch to all supported branches. In HEAD, also add a regression test case for this. (It may seem a bit silly to create a regression test file for just one test case; but I'm expecting that we will soon import a whole bunch of regex regression tests from Tcl, so might as well create the infrastructure now.)
2012-02-20 06:52:33 +01:00
MDEBUG(("cbackref matched\n"));
return REG_OKAY;
}
/*
* caltdissect - dissect match for alternation node
*/
2003-08-04 02:43:34 +02:00
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
caltdissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
struct dfa *d;
2003-08-04 02:43:34 +02:00
int er;
/* We loop, rather than tail-recurse, to handle a chain of alternatives */
while (t != NULL)
{
assert(t->op == '|');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
MDEBUG(("calt n%d\n", t->id));
d = getsubdfa(v, t->left);
NOERR();
if (longest(v, d, begin, end, (int *) NULL) == end)
{
MDEBUG(("calt matched\n"));
er = cdissect(v, t->left, begin, end);
if (er != REG_NOMATCH)
return er;
}
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
NOERR();
t = t->right;
}
return REG_NOMATCH;
}
/*
* citerdissect - dissect match for iteration node
*/
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
citerdissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
struct dfa *d;
chr **endpts;
chr *limit;
int min_matches;
size_t max_matches;
int nverified;
int k;
int i;
int er;
assert(t->op == '*');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(!(t->left->flags & SHORTER));
assert(begin <= end);
/*
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
* For the moment, assume the minimum number of matches is 1. If zero
* matches are allowed, and the target string is empty, we are allowed to
* match regardless of the contents of the iter node --- but we would
* prefer to match once, so that capturing parens get set. (An example of
* the concern here is a pattern like "()*\1", which historically this
* code has allowed to succeed.) Therefore, we deal with the zero-matches
* case at the bottom, after failing to find any other way to match.
*/
min_matches = t->min;
if (min_matches <= 0)
min_matches = 1;
/*
* We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
*
* For convenience, endpts[0] contains the "begin" pointer and we store
* sub-match endpoints in endpts[1..max_matches].
*/
max_matches = end - begin;
if (max_matches > t->max && t->max != DUPINF)
max_matches = t->max;
if (max_matches < min_matches)
max_matches = min_matches;
endpts = (chr **) MALLOC((max_matches + 1) * sizeof(chr *));
if (endpts == NULL)
return REG_ESPACE;
endpts[0] = begin;
d = getsubdfa(v, t->left);
if (ISERR())
{
FREE(endpts);
return v->err;
}
MDEBUG(("citer %d\n", t->id));
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
* each sub-match to confirm validity. If any validity check fails,
* backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
*/
/* initialize to consider first sub-match */
nverified = 0;
k = 1;
limit = end;
/* iterate until satisfaction or failure */
while (k > 0)
{
/* try to find an endpoint for the k'th sub-match */
endpts[k] = longest(v, d, endpts[k - 1], limit, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
if (ISERR())
{
FREE(endpts);
return v->err;
}
if (endpts[k] == NULL)
{
/* no match possible, so see if we can shorten previous one */
k--;
goto backtrack;
}
MDEBUG(("%d: working endpoint %d: %ld\n",
t->id, k, LOFF(endpts[k])));
/* k'th sub-match can no longer be considered verified */
if (nverified >= k)
nverified = k - 1;
if (endpts[k] != end)
{
/* haven't reached end yet, try another iteration if allowed */
if (k >= max_matches)
{
/* must try to shorten some previous match */
k--;
goto backtrack;
}
/* reject zero-length match unless necessary to achieve min */
if (endpts[k] == endpts[k - 1] &&
(k >= min_matches || min_matches - k < end - endpts[k]))
goto backtrack;
k++;
limit = end;
continue;
}
/*
* We've identified a way to divide the string into k sub-matches that
* works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
if (k < min_matches)
goto backtrack;
MDEBUG(("%d: verifying %d..%d\n", t->id, nverified + 1, k));
for (i = nverified + 1; i <= k; i++)
{
zaptreesubs(v, t->left);
er = cdissect(v, t->left, endpts[i - 1], endpts[i]);
if (er == REG_OKAY)
{
nverified = i;
continue;
}
if (er == REG_NOMATCH)
break;
/* oops, something failed */
FREE(endpts);
return er;
}
if (i > k)
{
/* satisfaction */
MDEBUG(("%d successful\n", t->id));
FREE(endpts);
return REG_OKAY;
}
/* match failed to verify, so backtrack */
backtrack:
/*
* Must consider shorter versions of the current sub-match. However,
* we'll only ask for a zero-length match if necessary.
*/
while (k > 0)
{
chr *prev_end = endpts[k - 1];
if (endpts[k] > prev_end)
{
limit = endpts[k] - 1;
if (limit > prev_end ||
(k < min_matches && min_matches - k >= end - prev_end))
{
/* break out of backtrack loop, continue the outer one */
break;
}
}
/* can't shorten k'th sub-match any more, consider previous one */
k--;
}
}
/* all possibilities exhausted */
FREE(endpts);
Fix potential infinite loop in regular expression execution. In cfindloop(), if the initial call to shortest() reports that a zero-length match is possible at the current search start point, but then it is unable to construct any actual match to that, it'll just loop around with the same start point, and thus make no progress. We need to force the start point to be advanced. This is safe because the loop over "begin" points has already tried and failed to match starting at "close", so there is surely no need to try that again. This bug was introduced in commit e2bd904955e2221eddf01110b1f25002de2aaa83, wherein we allowed continued searching after we'd run out of match possibilities, but evidently failed to think hard enough about exactly where we needed to search next. Because of the way this code works, such a match failure is only possible in the presence of backrefs --- otherwise, shortest()'s judgment that a match is possible should always be correct. That probably explains how come the bug has escaped detection for several years. The actual fix is a one-liner, but I took the trouble to add/improve some comments related to the loop logic. After fixing that, the submitted test case "()*\1" didn't loop anymore. But it reported failure, though it seems like it ought to match a zero-length string; both Tcl and Perl think it does. That seems to be from overenthusiastic optimization on my part when I rewrote the iteration match logic in commit 173e29aa5deefd9e71c183583ba37805c8102a72: we can't just "declare victory" for a zero-length match without bothering to set match data for capturing parens inside the iterator node. Per fuzz testing by Greg Stark. The first part of this is a bug in all supported branches, and the second part is a bug since 9.2 where the iteration rewrite happened.
2015-10-02 20:26:36 +02:00
/*
* Now consider the possibility that we can match to a zero-length string
* by using zero repetitions.
*/
if (t->min == 0 && begin == end)
{
MDEBUG(("%d allowing zero matches\n", t->id));
return REG_OKAY;
}
MDEBUG(("%d failed\n", t->id));
return REG_NOMATCH;
}
/*
* creviterdissect - dissect match for iteration node, shortest-first
*/
static int /* regexec return code */
2017-06-21 20:39:04 +02:00
creviterdissect(struct vars *v,
struct subre *t,
chr *begin, /* beginning of relevant substring */
chr *end) /* end of same */
{
struct dfa *d;
chr **endpts;
chr *limit;
int min_matches;
size_t max_matches;
int nverified;
int k;
int i;
int er;
assert(t->op == '*');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(t->left->flags & SHORTER);
assert(begin <= end);
/*
* If zero matches are allowed, and target string is empty, just declare
* victory. OTOH, if target string isn't empty, zero matches can't work
* so we pretend the min is 1.
*/
min_matches = t->min;
if (min_matches <= 0)
{
if (begin == end)
return REG_OKAY;
min_matches = 1;
}
/*
* We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
*
* For convenience, endpts[0] contains the "begin" pointer and we store
* sub-match endpoints in endpts[1..max_matches].
*/
max_matches = end - begin;
if (max_matches > t->max && t->max != DUPINF)
max_matches = t->max;
if (max_matches < min_matches)
max_matches = min_matches;
endpts = (chr **) MALLOC((max_matches + 1) * sizeof(chr *));
if (endpts == NULL)
return REG_ESPACE;
endpts[0] = begin;
d = getsubdfa(v, t->left);
if (ISERR())
{
FREE(endpts);
return v->err;
}
MDEBUG(("creviter %d\n", t->id));
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
* each sub-match to confirm validity. If any validity check fails,
* backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
*/
/* initialize to consider first sub-match */
nverified = 0;
k = 1;
limit = begin;
/* iterate until satisfaction or failure */
while (k > 0)
{
/* disallow zero-length match unless necessary to achieve min */
if (limit == endpts[k - 1] &&
limit != end &&
(k >= min_matches || min_matches - k < end - limit))
limit++;
/* if this is the last allowed sub-match, it must reach to the end */
if (k >= max_matches)
limit = end;
/* try to find an endpoint for the k'th sub-match */
endpts[k] = shortest(v, d, endpts[k - 1], limit, end,
(chr **) NULL, (int *) NULL);
Add some more query-cancel checks to regular expression matching. Commit 9662143f0c35d64d7042fbeaf879df8f0b54be32 added infrastructure to allow regular-expression operations to be terminated early in the event of SIGINT etc. However, fuzz testing by Greg Stark disclosed that there are still cases where regex compilation could run for a long time without noticing a cancel request. Specifically, the fixempties() phase never adds new states, only new arcs, so it doesn't hit the cancel check I'd put in newstate(). Add one to newarc() as well to cover that. Some experimentation of my own found that regex execution could also run for a long time despite a pending cancel. We'd put a high-level cancel check into cdissect(), but there was none inside the core text-matching routines longest() and shortest(). Ordinarily those inner loops are very very fast ... but in the presence of lookahead constraints, not so much. As a compromise, stick a cancel check into the stateset cache-miss function, which is enough to guarantee a cancel check at least once per lookahead constraint test. Making this work required more attention to error handling throughout the regex executor. Henry Spencer had apparently originally intended longest() and shortest() to be incapable of incurring errors while running, so neither they nor their subroutines had well-defined error reporting behaviors. However, that was already broken by the lookahead constraint feature, since lacon() can surely suffer an out-of-memory failure --- which, in the code as it stood, might never be reported to the user at all, but just silently be treated as a non-match of the lookahead constraint. Normalize all that by inserting explicit error tests as needed. I took the opportunity to add some more comments to the code, too. Back-patch to all supported branches, like the previous patch.
2015-10-02 19:45:39 +02:00
if (ISERR())
{
FREE(endpts);
return v->err;
}
if (endpts[k] == NULL)
{
/* no match possible, so see if we can lengthen previous one */
k--;
goto backtrack;
}
MDEBUG(("%d: working endpoint %d: %ld\n",
t->id, k, LOFF(endpts[k])));
/* k'th sub-match can no longer be considered verified */
if (nverified >= k)
nverified = k - 1;
if (endpts[k] != end)
{
/* haven't reached end yet, try another iteration if allowed */
if (k >= max_matches)
{
/* must try to lengthen some previous match */
k--;
goto backtrack;
}
k++;
limit = endpts[k - 1];
continue;
}
/*
* We've identified a way to divide the string into k sub-matches that
* works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
if (k < min_matches)
goto backtrack;
MDEBUG(("%d: verifying %d..%d\n", t->id, nverified + 1, k));
for (i = nverified + 1; i <= k; i++)
{
zaptreesubs(v, t->left);
er = cdissect(v, t->left, endpts[i - 1], endpts[i]);
if (er == REG_OKAY)
{
nverified = i;
continue;
}
if (er == REG_NOMATCH)
break;
/* oops, something failed */
FREE(endpts);
return er;
}
if (i > k)
{
/* satisfaction */
MDEBUG(("%d successful\n", t->id));
FREE(endpts);
return REG_OKAY;
}
/* match failed to verify, so backtrack */
backtrack:
/*
* Must consider longer versions of the current sub-match.
*/
while (k > 0)
{
if (endpts[k] < end)
{
limit = endpts[k] + 1;
/* break out of backtrack loop, continue the outer one */
break;
}
/* can't lengthen k'th sub-match any more, consider previous one */
k--;
}
}
/* all possibilities exhausted */
MDEBUG(("%d failed\n", t->id));
FREE(endpts);
return REG_NOMATCH;
}
#include "rege_dfa.c"