postgresql/src/backend/optimizer/util/orclauses.c

357 lines
12 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* orclauses.c
* Routines to extract restriction OR clauses from join OR clauses
*
2017-01-03 19:48:53 +01:00
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/optimizer/util/orclauses.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/orclauses.h"
#include "optimizer/restrictinfo.h"
static bool is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel);
static Expr *extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel);
static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
Expr *orclause, RestrictInfo *join_or_rinfo);
/*
* extract_restriction_or_clauses
* Examine join OR-of-AND clauses to see if any useful restriction OR
* clauses can be extracted. If so, add them to the query.
*
* Although a join clause must reference multiple relations overall,
* an OR of ANDs clause might contain sub-clauses that reference just one
* relation and can be used to build a restriction clause for that rel.
* For example consider
* WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45));
* We can transform this into
* WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45))
* AND (a.x = 42 OR a.x = 44)
* AND (b.y = 43 OR b.z = 45);
* which allows the latter clauses to be applied during the scans of a and b,
* perhaps as index qualifications, and in any case reducing the number of
* rows arriving at the join. In essence this is a partial transformation to
* CNF (AND of ORs format). It is not complete, however, because we do not
* unravel the original OR --- doing so would usually bloat the qualification
* expression to little gain.
*
* The added quals are partially redundant with the original OR, and therefore
* would cause the size of the joinrel to be underestimated when it is finally
* formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) We can compensate for this
* redundancy by changing the cached selectivity of the original OR clause,
2016-07-15 04:48:26 +02:00
* canceling out the (valid) reduction in the estimated sizes of the base
* relations so that the estimated joinrel size remains the same. This is
* a MAJOR HACK: it depends on the fact that clause selectivities are cached
* and on the fact that the same RestrictInfo node will appear in every
* joininfo list that might be used when the joinrel is formed.
* And it doesn't work in cases where the size estimation is nonlinear
* (i.e., outer and IN joins). But it beats not doing anything.
*
* We examine each base relation to see if join clauses associated with it
* contain extractable restriction conditions. If so, add those conditions
* to the rel's baserestrictinfo and update the cached selectivities of the
* join clauses. Note that the same join clause will be examined afresh
* from the point of view of each baserel that participates in it, so its
* cached selectivity may get updated multiple times.
*/
void
extract_restriction_or_clauses(PlannerInfo *root)
{
Index rti;
/* Examine each baserel for potential join OR clauses */
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
RelOptInfo *rel = root->simple_rel_array[rti];
ListCell *lc;
/* there may be empty slots corresponding to non-baserel RTEs */
if (rel == NULL)
continue;
Assert(rel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (rel->reloptkind != RELOPT_BASEREL)
continue;
/*
* Find potentially interesting OR joinclauses. We can use any
* joinclause that is considered safe to move to this rel by the
* parameterized-path machinery, even though what we are going to do
* with it is not exactly a parameterized path.
*
* However, it seems best to ignore clauses that have been marked
* redundant (by setting norm_selec > 1). That likely can't happen
* for OR clauses, but let's be safe.
*/
foreach(lc, rel->joininfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
if (restriction_is_or_clause(rinfo) &&
join_clause_is_movable_to(rinfo, rel) &&
rinfo->norm_selec <= 1)
{
/* Try to extract a qual for this rel only */
Expr *orclause = extract_or_clause(rinfo, rel);
/*
* If successful, decide whether we want to use the clause,
* and insert it into the rel's restrictinfo list if so.
*/
if (orclause)
consider_new_or_clause(root, rel, orclause, rinfo);
}
}
}
}
/*
* Is the given primitive (non-OR) RestrictInfo safe to move to the rel?
*/
static bool
is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel)
{
/*
* We want clauses that mention the rel, and only the rel. So in
* particular pseudoconstant clauses can be rejected quickly. Then check
* the clause's Var membership.
*/
if (rinfo->pseudoconstant)
return false;
if (!bms_equal(rinfo->clause_relids, rel->relids))
return false;
/* We don't want extra evaluations of any volatile functions */
if (contain_volatile_functions((Node *) rinfo->clause))
return false;
return true;
}
/*
* Try to extract a restriction clause mentioning only "rel" from the given
* join OR-clause.
*
* We must be able to extract at least one qual for this rel from each of
* the arms of the OR, else we can't use it.
*
* Returns an OR clause (not a RestrictInfo!) pertaining to rel, or NULL
* if no OR clause could be extracted.
*/
static Expr *
extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel)
{
List *clauselist = NIL;
ListCell *lc;
/*
* Scan each arm of the input OR clause. Notice we descend into
* or_rinfo->orclause, which has RestrictInfo nodes embedded below the
* toplevel OR/AND structure. This is useful because we can use the info
* in those nodes to make is_safe_restriction_clause_for()'s checks
* cheaper. We'll strip those nodes from the returned tree, though,
* meaning that fresh ones will be built if the clause is accepted as a
* restriction clause. This might seem wasteful --- couldn't we re-use
* the existing RestrictInfos? But that'd require assuming that
* selectivity and other cached data is computed exactly the same way for
* a restriction clause as for a join clause, which seems undesirable.
*/
Assert(or_clause((Node *) or_rinfo->orclause));
foreach(lc, ((BoolExpr *) or_rinfo->orclause)->args)
{
Node *orarg = (Node *) lfirst(lc);
List *subclauses = NIL;
Node *subclause;
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
{
List *andargs = ((BoolExpr *) orarg)->args;
ListCell *lc2;
foreach(lc2, andargs)
{
2017-02-21 17:33:07 +01:00
RestrictInfo *rinfo = castNode(RestrictInfo, lfirst(lc2));
if (restriction_is_or_clause(rinfo))
{
/*
* Recurse to deal with nested OR. Note we *must* recurse
* here, this isn't just overly-tense optimization: we
* have to descend far enough to find and strip all
* RestrictInfos in the expression.
*/
Expr *suborclause;
suborclause = extract_or_clause(rinfo, rel);
if (suborclause)
subclauses = lappend(subclauses, suborclause);
}
else if (is_safe_restriction_clause_for(rinfo, rel))
subclauses = lappend(subclauses, rinfo->clause);
}
}
else
{
2017-02-21 17:33:07 +01:00
RestrictInfo *rinfo = castNode(RestrictInfo, orarg);
Assert(!restriction_is_or_clause(rinfo));
if (is_safe_restriction_clause_for(rinfo, rel))
subclauses = lappend(subclauses, rinfo->clause);
}
/*
* If nothing could be extracted from this arm, we can't do anything
* with this OR clause.
*/
if (subclauses == NIL)
return NULL;
/*
* OK, add subclause(s) to the result OR. If we found more than one,
* we need an AND node. But if we found only one, and it is itself an
* OR node, add its subclauses to the result instead; this is needed
* to preserve AND/OR flatness (ie, no OR directly underneath OR).
*/
subclause = (Node *) make_ands_explicit(subclauses);
if (or_clause(subclause))
clauselist = list_concat(clauselist,
list_copy(((BoolExpr *) subclause)->args));
else
clauselist = lappend(clauselist, subclause);
}
/*
* If we got a restriction clause from every arm, wrap them up in an OR
* node. (In theory the OR node might be unnecessary, if there was only
* one arm --- but then the input OR node was also redundant.)
*/
if (clauselist != NIL)
return make_orclause(clauselist);
return NULL;
}
/*
* Consider whether a successfully-extracted restriction OR clause is
* actually worth using. If so, add it to the planner's data structures,
* and adjust the original join clause (join_or_rinfo) to compensate.
*/
static void
consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
Expr *orclause, RestrictInfo *join_or_rinfo)
{
RestrictInfo *or_rinfo;
Selectivity or_selec,
orig_selec;
/*
* Build a RestrictInfo from the new OR clause. We can assume it's valid
* as a base restriction clause.
*/
or_rinfo = make_restrictinfo(orclause,
true,
false,
false,
Improve RLS planning by marking individual quals with security levels. In an RLS query, we must ensure that security filter quals are evaluated before ordinary query quals, in case the latter contain "leaky" functions that could expose the contents of sensitive rows. The original implementation of RLS planning ensured this by pushing the scan of a secured table into a sub-query that it marked as a security-barrier view. Unfortunately this results in very inefficient plans in many cases, because the sub-query cannot be flattened and gets planned independently of the rest of the query. To fix, drop the use of sub-queries to enforce RLS qual order, and instead mark each qual (RestrictInfo) with a security_level field establishing its priority for evaluation. Quals must be evaluated in security_level order, except that "leakproof" quals can be allowed to go ahead of quals of lower security_level, if it's helpful to do so. This has to be enforced within the ordering of any one list of quals to be evaluated at a table scan node, and we also have to ensure that quals are not chosen for early evaluation (i.e., use as an index qual or TID scan qual) if they're not allowed to go ahead of other quals at the scan node. This is sufficient to fix the problem for RLS quals, since we only support RLS policies on simple tables and thus RLS quals will always exist at the table scan level only. Eventually these qual ordering rules should be enforced for join quals as well, which would permit improving planning for explicit security-barrier views; but that's a task for another patch. Note that FDWs would need to be aware of these rules --- and not, for example, send an insecure qual for remote execution --- but since we do not yet allow RLS policies on foreign tables, the case doesn't arise. This will need to be addressed before we can allow such policies. Patch by me, reviewed by Stephen Frost and Dean Rasheed. Discussion: https://postgr.es/m/8185.1477432701@sss.pgh.pa.us
2017-01-18 18:58:20 +01:00
join_or_rinfo->security_level,
NULL,
NULL,
NULL);
/*
* Estimate its selectivity. (We could have done this earlier, but doing
* it on the RestrictInfo representation allows the result to get cached,
* saving work later.)
*/
or_selec = clause_selectivity(root, (Node *) or_rinfo,
0, JOIN_INNER, NULL);
/*
* The clause is only worth adding to the query if it rejects a useful
* fraction of the base relation's rows; otherwise, it's just going to
* cause duplicate computation (since we will still have to check the
* original OR clause when the join is formed). Somewhat arbitrarily, we
* set the selectivity threshold at 0.9.
*/
if (or_selec > 0.9)
return; /* forget it */
/*
* OK, add it to the rel's restriction-clause list.
*/
rel->baserestrictinfo = lappend(rel->baserestrictinfo, or_rinfo);
Improve RLS planning by marking individual quals with security levels. In an RLS query, we must ensure that security filter quals are evaluated before ordinary query quals, in case the latter contain "leaky" functions that could expose the contents of sensitive rows. The original implementation of RLS planning ensured this by pushing the scan of a secured table into a sub-query that it marked as a security-barrier view. Unfortunately this results in very inefficient plans in many cases, because the sub-query cannot be flattened and gets planned independently of the rest of the query. To fix, drop the use of sub-queries to enforce RLS qual order, and instead mark each qual (RestrictInfo) with a security_level field establishing its priority for evaluation. Quals must be evaluated in security_level order, except that "leakproof" quals can be allowed to go ahead of quals of lower security_level, if it's helpful to do so. This has to be enforced within the ordering of any one list of quals to be evaluated at a table scan node, and we also have to ensure that quals are not chosen for early evaluation (i.e., use as an index qual or TID scan qual) if they're not allowed to go ahead of other quals at the scan node. This is sufficient to fix the problem for RLS quals, since we only support RLS policies on simple tables and thus RLS quals will always exist at the table scan level only. Eventually these qual ordering rules should be enforced for join quals as well, which would permit improving planning for explicit security-barrier views; but that's a task for another patch. Note that FDWs would need to be aware of these rules --- and not, for example, send an insecure qual for remote execution --- but since we do not yet allow RLS policies on foreign tables, the case doesn't arise. This will need to be addressed before we can allow such policies. Patch by me, reviewed by Stephen Frost and Dean Rasheed. Discussion: https://postgr.es/m/8185.1477432701@sss.pgh.pa.us
2017-01-18 18:58:20 +01:00
rel->baserestrict_min_security = Min(rel->baserestrict_min_security,
or_rinfo->security_level);
/*
* Adjust the original join OR clause's cached selectivity to compensate
* for the selectivity of the added (but redundant) lower-level qual. This
* should result in the join rel getting approximately the same rows
* estimate as it would have gotten without all these shenanigans.
*
* XXX major hack alert: this depends on the assumption that the
* selectivity will stay cached.
*
* XXX another major hack: we adjust only norm_selec, the cached
* selectivity for JOIN_INNER semantics, even though the join clause
* might've been an outer-join clause. This is partly because we can't
* easily identify the relevant SpecialJoinInfo here, and partly because
* the linearity assumption we're making would fail anyway. (If it is an
* outer-join clause, "rel" must be on the nullable side, else we'd not
* have gotten here. So the computation of the join size is going to be
* quite nonlinear with respect to the size of "rel", so it's not clear
* how we ought to adjust outer_selec even if we could compute its
* original value correctly.)
*/
if (or_selec > 0)
{
SpecialJoinInfo sjinfo;
/*
* Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare
* approx_tuple_count() in costsize.c.)
*/
sjinfo.type = T_SpecialJoinInfo;
sjinfo.min_lefthand = bms_difference(join_or_rinfo->clause_relids,
rel->relids);
sjinfo.min_righthand = rel->relids;
sjinfo.syn_lefthand = sjinfo.min_lefthand;
sjinfo.syn_righthand = sjinfo.min_righthand;
sjinfo.jointype = JOIN_INNER;
/* we don't bother trying to make the remaining fields valid */
sjinfo.lhs_strict = false;
sjinfo.delay_upper_joins = false;
Improve planner's cost estimation in the presence of semijoins. If we have a semijoin, say SELECT * FROM x WHERE x1 IN (SELECT y1 FROM y) and we're estimating the cost of a parameterized indexscan on x, the number of repetitions of the indexscan should not be taken as the size of y; it'll really only be the number of distinct values of y1, because the only valid plan with y on the outside of a nestloop would require y to be unique-ified before joining it to x. Most of the time this doesn't make that much difference, but sometimes it can lead to drastically underestimating the cost of the indexscan and hence choosing a bad plan, as pointed out by David Kubečka. Fixing this is a bit difficult because parameterized indexscans are costed out quite early in the planning process, before we have the information that would be needed to call estimate_num_groups() and thereby estimate the number of distinct values of the join column(s). However we can move the code that extracts a semijoin RHS's unique-ification columns, so that it's done in initsplan.c rather than on-the-fly in create_unique_path(). That shouldn't make any difference speed-wise and it's really a bit cleaner too. The other bit of information we need is the size of the semijoin RHS, which is easy if it's a single relation (we make those estimates before considering indexscan costs) but problematic if it's a join relation. The solution adopted here is just to use the product of the sizes of the join component rels. That will generally be an overestimate, but since estimate_num_groups() only uses this input as a clamp, an overestimate shouldn't hurt us too badly. In any case we don't allow this new logic to produce a value larger than we would have chosen before, so that at worst an overestimate leaves us no wiser than we were before.
2015-03-12 02:21:00 +01:00
sjinfo.semi_can_btree = false;
sjinfo.semi_can_hash = false;
sjinfo.semi_operators = NIL;
sjinfo.semi_rhs_exprs = NIL;
/* Compute inner-join size */
orig_selec = clause_selectivity(root, (Node *) join_or_rinfo,
0, JOIN_INNER, &sjinfo);
/* And hack cached selectivity so join size remains the same */
join_or_rinfo->norm_selec = orig_selec / or_selec;
/* ensure result stays in sane range, in particular not "redundant" */
if (join_or_rinfo->norm_selec > 1)
join_or_rinfo->norm_selec = 1;
/* as explained above, we don't touch outer_selec */
}
}