postgresql/src/test/regress/sql/inherit.sql

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1384 lines
55 KiB
MySQL
Raw Normal View History

2000-06-09 13:12:38 +02:00
--
-- Test inheritance features
--
CREATE TABLE a (aa TEXT);
CREATE TABLE b (bb TEXT) INHERITS (a);
CREATE TABLE c (cc TEXT) INHERITS (a);
CREATE TABLE d (dd TEXT) INHERITS (b,c,a);
2000-06-09 13:12:38 +02:00
INSERT INTO a(aa) VALUES('aaa');
INSERT INTO a(aa) VALUES('aaaa');
INSERT INTO a(aa) VALUES('aaaaa');
INSERT INTO a(aa) VALUES('aaaaaa');
INSERT INTO a(aa) VALUES('aaaaaaa');
INSERT INTO a(aa) VALUES('aaaaaaaa');
INSERT INTO b(aa) VALUES('bbb');
INSERT INTO b(aa) VALUES('bbbb');
INSERT INTO b(aa) VALUES('bbbbb');
INSERT INTO b(aa) VALUES('bbbbbb');
INSERT INTO b(aa) VALUES('bbbbbbb');
INSERT INTO b(aa) VALUES('bbbbbbbb');
INSERT INTO c(aa) VALUES('ccc');
INSERT INTO c(aa) VALUES('cccc');
INSERT INTO c(aa) VALUES('ccccc');
INSERT INTO c(aa) VALUES('cccccc');
INSERT INTO c(aa) VALUES('ccccccc');
INSERT INTO c(aa) VALUES('cccccccc');
INSERT INTO d(aa) VALUES('ddd');
INSERT INTO d(aa) VALUES('dddd');
INSERT INTO d(aa) VALUES('ddddd');
INSERT INTO d(aa) VALUES('dddddd');
INSERT INTO d(aa) VALUES('ddddddd');
INSERT INTO d(aa) VALUES('dddddddd');
SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid;
SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid;
2000-06-09 13:12:38 +02:00
UPDATE a SET aa='zzzz' WHERE aa='aaaa';
UPDATE ONLY a SET aa='zzzzz' WHERE aa='aaaaa';
UPDATE b SET aa='zzz' WHERE aa='aaa';
UPDATE ONLY b SET aa='zzz' WHERE aa='aaa';
UPDATE a SET aa='zzzzzz' WHERE aa LIKE 'aaa%';
SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid;
SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid;
2000-06-09 13:12:38 +02:00
UPDATE b SET aa='new';
SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid;
SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid;
2000-06-09 13:12:38 +02:00
UPDATE a SET aa='new';
DELETE FROM ONLY c WHERE aa='new';
SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid;
SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid;
2000-06-09 13:12:38 +02:00
DELETE FROM a;
SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid;
SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid;
SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid;
SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid;
SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid;
-- Confirm PRIMARY KEY adds NOT NULL constraint to child table
CREATE TEMP TABLE z (b TEXT, PRIMARY KEY(aa, b)) inherits (a);
INSERT INTO z VALUES (NULL, 'text'); -- should fail
Fix problems when a plain-inheritance parent table is excluded. When an UPDATE/DELETE/MERGE's target table is an old-style inheritance tree, it's possible for the parent to get excluded from the plan while some children are not. (I believe this is only possible if we can prove that a CHECK ... NO INHERIT constraint on the parent contradicts the query WHERE clause, so it's a very unusual case.) In such a case, ExecInitModifyTable mistakenly concluded that the first surviving child is the target table, leading to at least two bugs: 1. The wrong table's statement-level triggers would get fired. 2. In v16 and up, it was possible to fail with "invalid perminfoindex 0 in RTE with relid nnnn" due to the child RTE not having permissions data included in the query plan. This was hard to reproduce reliably because it did not occur unless the update triggered some non-HOT index updates. In v14 and up, this is easy to fix by defining ModifyTable.rootRelation to be the parent RTE in plain inheritance as well as partitioned cases. While the wrong-triggers bug also appears in older branches, the relevant code in both the planner and executor is quite a bit different, so it would take a good deal of effort to develop and test a suitable patch. Given the lack of field complaints about the trigger issue, I'll desist for now. (Patching v11 for this seems unwise anyway, given that it will have no more releases after next month.) Per bug #18147 from Hans Buschmann. Amit Langote and Tom Lane Discussion: https://postgr.es/m/18147-6fc796538913ee88@postgresql.org
2023-10-24 20:48:28 +02:00
-- Check inherited UPDATE with first child excluded
create table some_tab (f1 int, f2 int, f3 int, check (f1 < 10) no inherit);
create table some_tab_child () inherits(some_tab);
insert into some_tab_child select i, i+1, 0 from generate_series(1,1000) i;
create index on some_tab_child(f1, f2);
-- while at it, also check that statement-level triggers fire
create function some_tab_stmt_trig_func() returns trigger as
$$begin raise notice 'updating some_tab'; return NULL; end;$$
language plpgsql;
create trigger some_tab_stmt_trig
before update on some_tab execute function some_tab_stmt_trig_func();
explain (costs off)
update some_tab set f3 = 11 where f1 = 12 and f2 = 13;
update some_tab set f3 = 11 where f1 = 12 and f2 = 13;
drop table some_tab cascade;
drop function some_tab_stmt_trig_func();
-- Check inherited UPDATE with all children excluded
create table some_tab (a int, b int);
create table some_tab_child () inherits (some_tab);
insert into some_tab_child values(1,2);
explain (verbose, costs off)
update some_tab set a = a + 1 where false;
update some_tab set a = a + 1 where false;
explain (verbose, costs off)
update some_tab set a = a + 1 where false returning b, a;
update some_tab set a = a + 1 where false returning b, a;
table some_tab;
drop table some_tab cascade;
-- Check UPDATE with inherited target and an inherited source table
create temp table foo(f1 int, f2 int);
create temp table foo2(f3 int) inherits (foo);
create temp table bar(f1 int, f2 int);
create temp table bar2(f3 int) inherits (bar);
insert into foo values(1,1);
insert into foo values(3,3);
insert into foo2 values(2,2,2);
insert into foo2 values(3,3,3);
insert into bar values(1,1);
insert into bar values(2,2);
insert into bar values(3,3);
insert into bar values(4,4);
insert into bar2 values(1,1,1);
insert into bar2 values(2,2,2);
insert into bar2 values(3,3,3);
insert into bar2 values(4,4,4);
update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
select tableoid::regclass::text as relname, bar.* from bar order by 1,2;
-- Check UPDATE with inherited target and an appendrel subquery
update bar set f2 = f2 + 100
from
( select f1 from foo union all select f1+3 from foo ) ss
where bar.f1 = ss.f1;
select tableoid::regclass::text as relname, bar.* from bar order by 1,2;
-- Check UPDATE with *partitioned* inherited target and an appendrel subquery
create table some_tab (a int);
insert into some_tab values (0);
create table some_tab_child () inherits (some_tab);
insert into some_tab_child values (1);
create table parted_tab (a int, b char) partition by list (a);
create table parted_tab_part1 partition of parted_tab for values in (1);
create table parted_tab_part2 partition of parted_tab for values in (2);
create table parted_tab_part3 partition of parted_tab for values in (3);
insert into parted_tab values (1, 'a'), (2, 'a'), (3, 'a');
update parted_tab set b = 'b'
from
(select a from some_tab union all select a+1 from some_tab) ss (a)
where parted_tab.a = ss.a;
select tableoid::regclass::text as relname, parted_tab.* from parted_tab order by 1,2;
truncate parted_tab;
insert into parted_tab values (1, 'a'), (2, 'a'), (3, 'a');
update parted_tab set b = 'b'
from
(select 0 from parted_tab union all select 1 from parted_tab) ss (a)
where parted_tab.a = ss.a;
select tableoid::regclass::text as relname, parted_tab.* from parted_tab order by 1,2;
-- modifies partition key, but no rows will actually be updated
explain update parted_tab set a = 2 where false;
drop table parted_tab;
-- Check UPDATE with multi-level partitioned inherited target
create table mlparted_tab (a int, b char, c text) partition by list (a);
create table mlparted_tab_part1 partition of mlparted_tab for values in (1);
create table mlparted_tab_part2 partition of mlparted_tab for values in (2) partition by list (b);
create table mlparted_tab_part3 partition of mlparted_tab for values in (3);
create table mlparted_tab_part2a partition of mlparted_tab_part2 for values in ('a');
create table mlparted_tab_part2b partition of mlparted_tab_part2 for values in ('b');
insert into mlparted_tab values (1, 'a'), (2, 'a'), (2, 'b'), (3, 'a');
update mlparted_tab mlp set c = 'xxx'
from
(select a from some_tab union all select a+1 from some_tab) ss (a)
where (mlp.a = ss.a and mlp.b = 'b') or mlp.a = 3;
select tableoid::regclass::text as relname, mlparted_tab.* from mlparted_tab order by 1,2;
drop table mlparted_tab;
drop table some_tab cascade;
/* Test multiple inheritance of column defaults */
CREATE TABLE firstparent (tomorrow date default now()::date + 1);
CREATE TABLE secondparent (tomorrow date default now() :: date + 1);
CREATE TABLE jointchild () INHERITS (firstparent, secondparent); -- ok
CREATE TABLE thirdparent (tomorrow date default now()::date - 1);
CREATE TABLE otherchild () INHERITS (firstparent, thirdparent); -- not ok
CREATE TABLE otherchild (tomorrow date default now())
INHERITS (firstparent, thirdparent); -- ok, child resolves ambiguous default
DROP TABLE firstparent, secondparent, jointchild, thirdparent, otherchild;
-- Test changing the type of inherited columns
insert into d values('test','one','two','three');
alter table a alter column aa type integer using bit_length(aa);
select * from d;
-- The above verified that we can change the type of a multiply-inherited
-- column; but we should reject that if any definition was inherited from
-- an unrelated parent.
create temp table parent1(f1 int, f2 int);
create temp table parent2(f1 int, f3 bigint);
create temp table childtab(f4 int) inherits(parent1, parent2);
alter table parent1 alter column f1 type bigint; -- fail, conflict w/parent2
alter table parent1 alter column f2 type bigint; -- ok
-- Test non-inheritable parent constraints
create table p1(ff1 int);
alter table p1 add constraint p1chk check (ff1 > 0) no inherit;
alter table p1 add constraint p2chk check (ff1 > 10);
-- connoinherit should be true for NO INHERIT constraint
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.connoinherit from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname = 'p1' order by 1,2;
-- Test that child does not inherit NO INHERIT constraints
create table c1 () inherits (p1);
\d p1
\d c1
-- Test that child does not override inheritable constraints of the parent
create table c2 (constraint p2chk check (ff1 > 10) no inherit) inherits (p1); --fails
drop table p1 cascade;
-- Tests for casting between the rowtypes of parent and child
-- tables. See the pgsql-hackers thread beginning Dec. 4/04
create table base (i integer);
create table derived () inherits (base);
create table more_derived (like derived, b int) inherits (derived);
insert into derived (i) values (0);
select derived::base from derived;
select NULL::derived::base;
-- remove redundant conversions.
explain (verbose on, costs off) select row(i, b)::more_derived::derived::base from more_derived;
explain (verbose on, costs off) select (1, 2)::more_derived::derived::base;
drop table more_derived;
drop table derived;
drop table base;
create table p1(ff1 int);
create table p2(f1 text);
create function p2text(p2) returns text as 'select $1.f1' language sql;
create table c1(f3 int) inherits(p1,p2);
insert into c1 values(123456789, 'hi', 42);
select p2text(c1.*) from c1;
drop function p2text(p2);
drop table c1;
drop table p2;
drop table p1;
CREATE TABLE ac (aa TEXT);
alter table ac add constraint ac_check check (aa is not null);
CREATE TABLE bc (bb TEXT) INHERITS (ac);
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
insert into ac (aa) values (NULL);
insert into bc (aa) values (NULL);
alter table bc drop constraint ac_check; -- fail, disallowed
alter table ac drop constraint ac_check;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
-- try the unnamed-constraint case
alter table ac add check (aa is not null);
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
insert into ac (aa) values (NULL);
insert into bc (aa) values (NULL);
alter table bc drop constraint ac_aa_check; -- fail, disallowed
alter table ac drop constraint ac_aa_check;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
alter table ac add constraint ac_check check (aa is not null);
alter table bc no inherit ac;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
alter table bc drop constraint ac_check;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
alter table ac drop constraint ac_check;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
drop table bc;
drop table ac;
create table ac (a int constraint check_a check (a <> 0));
create table bc (a int constraint check_a check (a <> 0), b int constraint check_b check (b <> 0)) inherits (ac);
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2;
drop table bc;
drop table ac;
create table ac (a int constraint check_a check (a <> 0));
create table bc (b int constraint check_b check (b <> 0));
create table cc (c int constraint check_c check (c <> 0)) inherits (ac, bc);
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2;
alter table cc no inherit bc;
select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2;
drop table cc;
drop table bc;
drop table ac;
create table p1(f1 int);
create table p2(f2 int);
create table c1(f3 int) inherits(p1,p2);
insert into c1 values(1,-1,2);
alter table p2 add constraint cc check (f2>0); -- fail
alter table p2 add check (f2>0); -- check it without a name, too
delete from c1;
insert into c1 values(1,1,2);
alter table p2 add check (f2>0);
insert into c1 values(1,-1,2); -- fail
create table c2(f3 int) inherits(p1,p2);
\d c2
create table c3 (f4 int) inherits(c1,c2);
\d c3
drop table p1 cascade;
drop table p2 cascade;
create table pp1 (f1 int);
create table cc1 (f2 text, f3 int) inherits (pp1);
alter table pp1 add column a1 int check (a1 > 0);
\d cc1
create table cc2(f4 float) inherits(pp1,cc1);
\d cc2
alter table pp1 add column a2 int check (a2 > 0);
\d cc2
drop table pp1 cascade;
-- Test for renaming in simple multiple inheritance
CREATE TABLE inht1 (a int, b int);
CREATE TABLE inhs1 (b int, c int);
CREATE TABLE inhts (d int) INHERITS (inht1, inhs1);
ALTER TABLE inht1 RENAME a TO aa;
ALTER TABLE inht1 RENAME b TO bb; -- to be failed
ALTER TABLE inhts RENAME aa TO aaa; -- to be failed
ALTER TABLE inhts RENAME d TO dd;
\d+ inhts
DROP TABLE inhts;
-- Test for adding a column to a parent table with complex inheritance
CREATE TABLE inhta ();
CREATE TABLE inhtb () INHERITS (inhta);
CREATE TABLE inhtc () INHERITS (inhtb);
CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc);
ALTER TABLE inhta ADD COLUMN i int;
\d+ inhta
DROP TABLE inhta, inhtb, inhtc, inhtd;
-- Test for renaming in diamond inheritance
CREATE TABLE inht2 (x int) INHERITS (inht1);
CREATE TABLE inht3 (y int) INHERITS (inht1);
CREATE TABLE inht4 (z int) INHERITS (inht2, inht3);
ALTER TABLE inht1 RENAME aa TO aaa;
\d+ inht4
CREATE TABLE inhts (d int) INHERITS (inht2, inhs1);
ALTER TABLE inht1 RENAME aaa TO aaaa;
ALTER TABLE inht1 RENAME b TO bb; -- to be failed
\d+ inhts
WITH RECURSIVE r AS (
SELECT 'inht1'::regclass AS inhrelid
UNION ALL
SELECT c.inhrelid FROM pg_inherits c, r WHERE r.inhrelid = c.inhparent
)
SELECT a.attrelid::regclass, a.attname, a.attinhcount, e.expected
FROM (SELECT inhrelid, count(*) AS expected FROM pg_inherits
WHERE inhparent IN (SELECT inhrelid FROM r) GROUP BY inhrelid) e
JOIN pg_attribute a ON e.inhrelid = a.attrelid WHERE NOT attislocal
ORDER BY a.attrelid::regclass::name, a.attnum;
DROP TABLE inht1, inhs1 CASCADE;
2016-03-15 23:06:11 +01:00
-- Test non-inheritable indices [UNIQUE, EXCLUDE] constraints
CREATE TABLE test_constraints (id int, val1 varchar, val2 int, UNIQUE(val1, val2));
CREATE TABLE test_constraints_inh () INHERITS (test_constraints);
\d+ test_constraints
ALTER TABLE ONLY test_constraints DROP CONSTRAINT test_constraints_val1_val2_key;
\d+ test_constraints
\d+ test_constraints_inh
DROP TABLE test_constraints_inh;
DROP TABLE test_constraints;
CREATE TABLE test_ex_constraints (
c circle,
EXCLUDE USING gist (c WITH &&)
);
CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints);
\d+ test_ex_constraints
ALTER TABLE test_ex_constraints DROP CONSTRAINT test_ex_constraints_c_excl;
\d+ test_ex_constraints
\d+ test_ex_constraints_inh
DROP TABLE test_ex_constraints_inh;
DROP TABLE test_ex_constraints;
2016-03-15 23:06:11 +01:00
-- Test non-inheritable foreign key constraints
CREATE TABLE test_primary_constraints(id int PRIMARY KEY);
CREATE TABLE test_foreign_constraints(id1 int REFERENCES test_primary_constraints(id));
CREATE TABLE test_foreign_constraints_inh () INHERITS (test_foreign_constraints);
\d+ test_primary_constraints
\d+ test_foreign_constraints
ALTER TABLE test_foreign_constraints DROP CONSTRAINT test_foreign_constraints_id1_fkey;
\d+ test_foreign_constraints
\d+ test_foreign_constraints_inh
DROP TABLE test_foreign_constraints_inh;
DROP TABLE test_foreign_constraints;
DROP TABLE test_primary_constraints;
-- Test foreign key behavior
create table inh_fk_1 (a int primary key);
insert into inh_fk_1 values (1), (2), (3);
create table inh_fk_2 (x int primary key, y int references inh_fk_1 on delete cascade);
insert into inh_fk_2 values (11, 1), (22, 2), (33, 3);
create table inh_fk_2_child () inherits (inh_fk_2);
insert into inh_fk_2_child values (111, 1), (222, 2);
delete from inh_fk_1 where a = 1;
select * from inh_fk_1 order by 1;
select * from inh_fk_2 order by 1, 2;
drop table inh_fk_1, inh_fk_2, inh_fk_2_child;
Fix two bugs in merging of inherited CHECK constraints. Historically, we've allowed users to add a CHECK constraint to a child table and then add an identical CHECK constraint to the parent. This results in "merging" the two constraints so that the pre-existing child constraint ends up with both conislocal = true and coninhcount > 0. However, if you tried to do it in the other order, you got a duplicate constraint error. This is problematic for pg_dump, which needs to issue separated ADD CONSTRAINT commands in some cases, but has no good way to ensure that the constraints will be added in the required order. And it's more than a bit arbitrary, too. The goal of complaining about duplicated ADD CONSTRAINT commands can be served if we reject the case of adding a constraint when the existing one already has conislocal = true; but if it has conislocal = false, let's just make the ADD CONSTRAINT set conislocal = true. In this way, either order of adding the constraints has the same end result. Another problem was that the code allowed creation of a parent constraint marked convalidated that is merged with a child constraint that is !convalidated. In this case, an inheritance scan of the parent table could emit some rows violating the constraint condition, which would be an unexpected result given the marking of the parent constraint as validated. Hence, forbid merging of constraints in this case. (Note: valid child and not-valid parent seems fine, so continue to allow that.) Per report from Benedikt Grundmann. Back-patch to 9.2 where we introduced possibly-not-valid check constraints. The second bug obviously doesn't apply before that, and I think the first doesn't either, because pg_dump only gets into this situation when dealing with not-valid constraints. Report: <CADbMkNPT-Jz5PRSQ4RbUASYAjocV_KHUWapR%2Bg8fNvhUAyRpxA%40mail.gmail.com> Discussion: <22108.1475874586@sss.pgh.pa.us>
2016-10-09 01:29:27 +02:00
-- Test that parent and child CHECK constraints can be created in either order
create table p1(f1 int);
create table p1_c1() inherits(p1);
alter table p1 add constraint inh_check_constraint1 check (f1 > 0);
alter table p1_c1 add constraint inh_check_constraint1 check (f1 > 0);
alter table p1_c1 add constraint inh_check_constraint2 check (f1 < 10);
alter table p1 add constraint inh_check_constraint2 check (f1 < 10);
select conrelid::regclass::text as relname, conname, conislocal, coninhcount
from pg_constraint where conname like 'inh\_check\_constraint%'
order by 1, 2;
drop table p1 cascade;
--
-- Test DROP behavior of multiply-defined CHECK constraints
--
create table p1(f1 int constraint f1_pos CHECK (f1 > 0));
create table p1_c1 (f1 int constraint f1_pos CHECK (f1 > 0)) inherits (p1);
alter table p1_c1 drop constraint f1_pos;
alter table p1 drop constraint f1_pos;
\d p1_c1
drop table p1 cascade;
create table p1(f1 int constraint f1_pos CHECK (f1 > 0));
create table p2(f1 int constraint f1_pos CHECK (f1 > 0));
create table p1p2_c1 (f1 int) inherits (p1, p2);
create table p1p2_c2 (f1 int constraint f1_pos CHECK (f1 > 0)) inherits (p1, p2);
alter table p2 drop constraint f1_pos;
alter table p1 drop constraint f1_pos;
\d p1p2_c*
drop table p1, p2 cascade;
create table p1(f1 int constraint f1_pos CHECK (f1 > 0));
create table p1_c1() inherits (p1);
create table p1_c2() inherits (p1);
create table p1_c1c2() inherits (p1_c1, p1_c2);
\d p1_c1c2
alter table p1 drop constraint f1_pos;
\d p1_c1c2
drop table p1 cascade;
create table p1(f1 int constraint f1_pos CHECK (f1 > 0));
create table p1_c1() inherits (p1);
create table p1_c2(constraint f1_pos CHECK (f1 > 0)) inherits (p1);
create table p1_c1c2() inherits (p1_c1, p1_c2, p1);
alter table p1_c2 drop constraint f1_pos;
alter table p1 drop constraint f1_pos;
alter table p1_c1c2 drop constraint f1_pos;
alter table p1_c2 drop constraint f1_pos;
\d p1_c1c2
drop table p1 cascade;
Fix two bugs in merging of inherited CHECK constraints. Historically, we've allowed users to add a CHECK constraint to a child table and then add an identical CHECK constraint to the parent. This results in "merging" the two constraints so that the pre-existing child constraint ends up with both conislocal = true and coninhcount > 0. However, if you tried to do it in the other order, you got a duplicate constraint error. This is problematic for pg_dump, which needs to issue separated ADD CONSTRAINT commands in some cases, but has no good way to ensure that the constraints will be added in the required order. And it's more than a bit arbitrary, too. The goal of complaining about duplicated ADD CONSTRAINT commands can be served if we reject the case of adding a constraint when the existing one already has conislocal = true; but if it has conislocal = false, let's just make the ADD CONSTRAINT set conislocal = true. In this way, either order of adding the constraints has the same end result. Another problem was that the code allowed creation of a parent constraint marked convalidated that is merged with a child constraint that is !convalidated. In this case, an inheritance scan of the parent table could emit some rows violating the constraint condition, which would be an unexpected result given the marking of the parent constraint as validated. Hence, forbid merging of constraints in this case. (Note: valid child and not-valid parent seems fine, so continue to allow that.) Per report from Benedikt Grundmann. Back-patch to 9.2 where we introduced possibly-not-valid check constraints. The second bug obviously doesn't apply before that, and I think the first doesn't either, because pg_dump only gets into this situation when dealing with not-valid constraints. Report: <CADbMkNPT-Jz5PRSQ4RbUASYAjocV_KHUWapR%2Bg8fNvhUAyRpxA%40mail.gmail.com> Discussion: <22108.1475874586@sss.pgh.pa.us>
2016-10-09 01:29:27 +02:00
-- Test that a valid child can have not-valid parent, but not vice versa
create table invalid_check_con(f1 int);
create table invalid_check_con_child() inherits(invalid_check_con);
alter table invalid_check_con_child add constraint inh_check_constraint check(f1 > 0) not valid;
alter table invalid_check_con add constraint inh_check_constraint check(f1 > 0); -- fail
alter table invalid_check_con_child drop constraint inh_check_constraint;
insert into invalid_check_con values(0);
alter table invalid_check_con_child add constraint inh_check_constraint check(f1 > 0);
alter table invalid_check_con add constraint inh_check_constraint check(f1 > 0) not valid;
insert into invalid_check_con values(0); -- fail
insert into invalid_check_con_child values(0); -- fail
select conrelid::regclass::text as relname, conname,
convalidated, conislocal, coninhcount, connoinherit
from pg_constraint where conname like 'inh\_check\_constraint%'
order by 1, 2;
-- We don't drop the invalid_check_con* tables, to test dump/reload with
Revise parameterized-path mechanism to fix assorted issues. This patch adjusts the treatment of parameterized paths so that all paths with the same parameterization (same set of required outer rels) for the same relation will have the same rowcount estimate. We cache the rowcount estimates to ensure that property, and hopefully save a few cycles too. Doing this makes it practical for add_path_precheck to operate without a rowcount estimate: it need only assume that paths with different parameterizations never dominate each other, which is close enough to true anyway for coarse filtering, because normally a more-parameterized path should yield fewer rows thanks to having more join clauses to apply. In add_path, we do the full nine yards of comparing rowcount estimates along with everything else, so that we can discard parameterized paths that don't actually have an advantage. This fixes some issues I'd found with add_path rejecting parameterized paths on the grounds that they were more expensive than not-parameterized ones, even though they yielded many fewer rows and hence would be cheaper once subsequent joining was considered. To make the same-rowcounts assumption valid, we have to require that any parameterized path enforce *all* join clauses that could be obtained from the particular set of outer rels, even if not all of them are useful for indexing. This is required at both base scans and joins. It's a good thing anyway since the net impact is that join quals are checked at the lowest practical level in the join tree. Hence, discard the original rather ad-hoc mechanism for choosing parameterization joinquals, and build a better one that has a more principled rule for when clauses can be moved. The original rule was actually buggy anyway for lack of knowledge about which relations are part of an outer join's outer side; getting this right requires adding an outer_relids field to RestrictInfo.
2012-04-19 21:52:46 +02:00
--
-- Test parameterized append plans for inheritance trees
--
create temp table patest0 (id, x) as
select x, x from generate_series(0,1000) x;
create temp table patest1() inherits (patest0);
insert into patest1
select x, x from generate_series(0,1000) x;
create temp table patest2() inherits (patest0);
insert into patest2
select x, x from generate_series(0,1000) x;
create index patest0i on patest0(id);
create index patest1i on patest1(id);
create index patest2i on patest2(id);
analyze patest0;
analyze patest1;
analyze patest2;
explain (costs off)
select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1;
select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1;
drop index patest2i;
explain (costs off)
select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1;
select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1;
drop table patest0 cascade;
--
-- Test merge-append plans for inheritance trees
--
create table matest0 (id serial primary key, name text);
create table matest1 (id integer primary key) inherits (matest0);
create table matest2 (id integer primary key) inherits (matest0);
create table matest3 (id integer primary key) inherits (matest0);
create index matest0i on matest0 ((1-id));
create index matest1i on matest1 ((1-id));
-- create index matest2i on matest2 ((1-id)); -- intentionally missing
create index matest3i on matest3 ((1-id));
insert into matest1 (name) values ('Test 1');
insert into matest1 (name) values ('Test 2');
insert into matest2 (name) values ('Test 3');
insert into matest2 (name) values ('Test 4');
insert into matest3 (name) values ('Test 5');
insert into matest3 (name) values ('Test 6');
set enable_indexscan = off; -- force use of seqscan/sort, so no merge
explain (verbose, costs off) select * from matest0 order by 1-id;
select * from matest0 order by 1-id;
explain (verbose, costs off) select min(1-id) from matest0;
select min(1-id) from matest0;
reset enable_indexscan;
set enable_seqscan = off; -- plan with fewest seqscans should be merge
set enable_parallel_append = off; -- Don't let parallel-append interfere
explain (verbose, costs off) select * from matest0 order by 1-id;
select * from matest0 order by 1-id;
explain (verbose, costs off) select min(1-id) from matest0;
select min(1-id) from matest0;
reset enable_seqscan;
reset enable_parallel_append;
drop table matest0 cascade;
--
-- Check that use of an index with an extraneous column doesn't produce
-- a plan with extraneous sorting
--
create table matest0 (a int, b int, c int, d int);
create table matest1 () inherits(matest0);
create index matest0i on matest0 (b, c);
create index matest1i on matest1 (b, c);
set enable_nestloop = off; -- we want a plan with two MergeAppends
explain (costs off)
select t1.* from matest0 t1, matest0 t2
where t1.b = t2.b and t2.c = t2.d
order by t1.b limit 10;
reset enable_nestloop;
drop table matest0 cascade;
-- Test a MergeAppend plan where one child requires a sort
create table matest0(a int primary key);
create table matest1() inherits (matest0);
insert into matest0 select generate_series(1, 400);
insert into matest1 select generate_series(1, 200);
analyze matest0;
analyze matest1;
explain (costs off)
select * from matest0 where a < 100 order by a;
drop table matest0 cascade;
--
-- Test merge-append for UNION ALL append relations
--
set enable_seqscan = off;
set enable_indexscan = on;
set enable_bitmapscan = off;
Revisit handling of UNION ALL subqueries with non-Var output columns. In commit 57664ed25e5dea117158a2e663c29e60b3546e1c I tried to fix a bug reported by Teodor Sigaev by making non-simple-Var output columns distinct (by wrapping their expressions with dummy PlaceHolderVar nodes). This did not work too well. Commit b28ffd0fcc583c1811e5295279e7d4366c3cae6c fixed some ensuing problems with matching to child indexes, but per a recent report from Claus Stadler, constraint exclusion of UNION ALL subqueries was still broken, because constant-simplification didn't handle the injected PlaceHolderVars well either. On reflection, the original patch was quite misguided: there is no reason to expect that EquivalenceClass child members will be distinct. So instead of trying to make them so, we should ensure that we can cope with the situation when they're not. Accordingly, this patch reverts the code changes in the above-mentioned commits (though the regression test cases they added stay). Instead, I've added assorted defenses to make sure that duplicate EC child members don't cause any problems. Teodor's original problem ("MergeAppend child's targetlist doesn't match MergeAppend") is addressed more directly by revising prepare_sort_from_pathkeys to let the parent MergeAppend's sort list guide creation of each child's sort list. In passing, get rid of add_sort_column; as far as I can tell, testing for duplicate sort keys at this stage is dead code. Certainly it doesn't trigger often enough to be worth expending cycles on in ordinary queries. And keeping the test would've greatly complicated the new logic in prepare_sort_from_pathkeys, because comparing pathkey list entries against a previous output array requires that we not skip any entries in the list. Back-patch to 9.1, like the previous patches. The only known issue in this area that wasn't caused by the ill-advised previous patches was the MergeAppend planning failure, which of course is not relevant before 9.1. It's possible that we need some of the new defenses against duplicate child EC entries in older branches, but until there's some clear evidence of that I'm going to refrain from back-patching further.
2012-03-16 18:11:12 +01:00
-- Check handling of duplicated, constant, or volatile targetlist items
explain (costs off)
SELECT thousand, tenthous FROM tenk1
UNION ALL
SELECT thousand, thousand FROM tenk1
ORDER BY thousand, tenthous;
explain (costs off)
SELECT thousand, tenthous, thousand+tenthous AS x FROM tenk1
UNION ALL
SELECT 42, 42, hundred FROM tenk1
ORDER BY thousand, tenthous;
explain (costs off)
SELECT thousand, tenthous FROM tenk1
UNION ALL
SELECT thousand, random()::integer FROM tenk1
ORDER BY thousand, tenthous;
Revisit handling of UNION ALL subqueries with non-Var output columns. In commit 57664ed25e5dea117158a2e663c29e60b3546e1c I tried to fix a bug reported by Teodor Sigaev by making non-simple-Var output columns distinct (by wrapping their expressions with dummy PlaceHolderVar nodes). This did not work too well. Commit b28ffd0fcc583c1811e5295279e7d4366c3cae6c fixed some ensuing problems with matching to child indexes, but per a recent report from Claus Stadler, constraint exclusion of UNION ALL subqueries was still broken, because constant-simplification didn't handle the injected PlaceHolderVars well either. On reflection, the original patch was quite misguided: there is no reason to expect that EquivalenceClass child members will be distinct. So instead of trying to make them so, we should ensure that we can cope with the situation when they're not. Accordingly, this patch reverts the code changes in the above-mentioned commits (though the regression test cases they added stay). Instead, I've added assorted defenses to make sure that duplicate EC child members don't cause any problems. Teodor's original problem ("MergeAppend child's targetlist doesn't match MergeAppend") is addressed more directly by revising prepare_sort_from_pathkeys to let the parent MergeAppend's sort list guide creation of each child's sort list. In passing, get rid of add_sort_column; as far as I can tell, testing for duplicate sort keys at this stage is dead code. Certainly it doesn't trigger often enough to be worth expending cycles on in ordinary queries. And keeping the test would've greatly complicated the new logic in prepare_sort_from_pathkeys, because comparing pathkey list entries against a previous output array requires that we not skip any entries in the list. Back-patch to 9.1, like the previous patches. The only known issue in this area that wasn't caused by the ill-advised previous patches was the MergeAppend planning failure, which of course is not relevant before 9.1. It's possible that we need some of the new defenses against duplicate child EC entries in older branches, but until there's some clear evidence of that I'm going to refrain from back-patching further.
2012-03-16 18:11:12 +01:00
-- Check min/max aggregate optimization
explain (costs off)
SELECT min(x) FROM
(SELECT unique1 AS x FROM tenk1 a
UNION ALL
SELECT unique2 AS x FROM tenk1 b) s;
explain (costs off)
SELECT min(y) FROM
(SELECT unique1 AS x, unique1 AS y FROM tenk1 a
UNION ALL
SELECT unique2 AS x, unique2 AS y FROM tenk1 b) s;
-- XXX planner doesn't recognize that index on unique2 is sufficiently sorted
explain (costs off)
SELECT x, y FROM
(SELECT thousand AS x, tenthous AS y FROM tenk1 a
UNION ALL
SELECT unique2 AS x, unique2 AS y FROM tenk1 b) s
ORDER BY x, y;
-- exercise rescan code path via a repeatedly-evaluated subquery
explain (costs off)
SELECT
ARRAY(SELECT f.i FROM (
(SELECT d + g.i FROM generate_series(4, 30, 3) d ORDER BY 1)
UNION ALL
(SELECT d + g.i FROM generate_series(0, 30, 5) d ORDER BY 1)
) f(i)
ORDER BY f.i LIMIT 10)
FROM generate_series(1, 3) g(i);
SELECT
ARRAY(SELECT f.i FROM (
(SELECT d + g.i FROM generate_series(4, 30, 3) d ORDER BY 1)
UNION ALL
(SELECT d + g.i FROM generate_series(0, 30, 5) d ORDER BY 1)
) f(i)
ORDER BY f.i LIMIT 10)
FROM generate_series(1, 3) g(i);
reset enable_seqscan;
reset enable_indexscan;
reset enable_bitmapscan;
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
Fix MULTIEXPR_SUBLINK with partitioned target tables, yet again. We already tried to fix this in commits 3f7323cbb et al (and follow-on fixes), but now it emerges that there are still unfixed cases; moreover, these cases affect all branches not only pre-v14. I thought we had eliminated all cases of making multiple clones of an UPDATE's target list when we nuked inheritance_planner. But it turns out we still do that in some partitioned-UPDATE cases, notably including INSERT ... ON CONFLICT UPDATE, because ExecInitPartitionInfo thinks it's okay to clone and modify the parent's targetlist. This fix is based on a suggestion from Andres Freund: let's stop abusing the ParamExecData.execPlan mechanism, which was only ever meant to handle initplans, and instead solve the execution timing problem by having the expression compiler move MULTIEXPR_SUBLINK steps to the front of their expression step lists. This is feasible because (a) all branches still in support compile the entire targetlist of an UPDATE into a single ExprState, and (b) we know that all MULTIEXPR_SUBLINKs do need to be evaluated --- none could be buried inside a CASE, for example. There is a minor semantics change concerning the order of execution of the MULTIEXPR's subquery versus other parts of the parent targetlist, but that seems like something we can get away with. By doing that, we no longer need to worry about whether different clones of a MULTIEXPR_SUBLINK share output Params; their usage of that data structure won't overlap. Per bug #17800 from Alexander Lakhin. Back-patch to all supported branches. In v13 and earlier, we can revert 3f7323cbb and follow-on fixes; however, I chose to keep the SubPlan.subLinkId field added in ccbb54c72. We don't need that anymore in the core code, but it's cheap enough to fill, and removing a plan node field in a minor release seems like it'd be asking for trouble. Andres Freund and Tom Lane Discussion: https://postgr.es/m/17800-ff90866b3906c964@postgresql.org
2023-02-25 20:44:14 +01:00
--
-- Check handling of MULTIEXPR SubPlans in inherited updates
--
create table inhpar(f1 int, f2 name);
create table inhcld(f2 name, f1 int);
alter table inhcld inherit inhpar;
insert into inhpar select x, x::text from generate_series(1,5) x;
insert into inhcld select x::text, x from generate_series(6,10) x;
explain (verbose, costs off)
update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
select * from inhpar;
drop table inhpar cascade;
--
-- And the same for partitioned cases
--
create table inhpar(f1 int primary key, f2 name) partition by range (f1);
create table inhcld1(f2 name, f1 int primary key);
create table inhcld2(f1 int primary key, f2 name);
alter table inhpar attach partition inhcld1 for values from (1) to (5);
alter table inhpar attach partition inhcld2 for values from (5) to (100);
insert into inhpar select x, x::text from generate_series(1,10) x;
explain (verbose, costs off)
update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1);
select * from inhpar;
-- Also check ON CONFLICT
insert into inhpar as i values (3), (7) on conflict (f1)
do update set (f1, f2) = (select i.f1, i.f2 || '+');
select * from inhpar order by f1; -- tuple order might be unstable here
drop table inhpar cascade;
Fix improper uses of canonicalize_qual(). One of the things canonicalize_qual() does is to remove constant-NULL subexpressions of top-level AND/OR clauses. It does that on the assumption that what it's given is a top-level WHERE clause, so that NULL can be treated like FALSE. Although this is documented down inside a subroutine of canonicalize_qual(), it wasn't mentioned in the documentation of that function itself, and some callers hadn't gotten that memo. Notably, commit d007a9505 caused get_relation_constraints() to apply canonicalize_qual() to CHECK constraints. That allowed constraint exclusion to misoptimize situations in which a CHECK constraint had a provably-NULL subclause, as seen in the regression test case added here, in which a child table that should be scanned is not. (Although this thinko is ancient, the test case doesn't fail before 9.2, for reasons I've not bothered to track down in detail. There may be related cases that do fail before that.) More recently, commit f0e44751d added an independent bug by applying canonicalize_qual() to index expressions, which is even sillier since those might not even be boolean. If they are, though, I think this could lead to making incorrect index entries for affected index expressions in v10. I haven't attempted to prove that though. To fix, add an "is_check" parameter to canonicalize_qual() to specify whether it should assume WHERE or CHECK semantics, and make it perform NULL-elimination accordingly. Adjust the callers to apply the right semantics, or remove the call entirely in cases where it's not known that the expression has one or the other semantics. I also removed the call in some cases involving partition expressions, where it should be a no-op because such expressions should be canonical already ... and was a no-op, independently of whether it could in principle have done something, because it was being handed the qual in implicit-AND format which isn't what it expects. In HEAD, add an Assert to catch that type of mistake in future. This represents an API break for external callers of canonicalize_qual(). While that's intentional in HEAD to make such callers think about which case applies to them, it seems like something we probably wouldn't be thanked for in released branches. Hence, in released branches, the extra parameter is added to a new function canonicalize_qual_ext(), and canonicalize_qual() is a wrapper that retains its old behavior. Patch by me with suggestions from Dean Rasheed. Back-patch to all supported branches. Discussion: https://postgr.es/m/24475.1520635069@sss.pgh.pa.us
2018-03-11 23:10:42 +01:00
--
-- Check handling of a constant-null CHECK constraint
--
create table cnullparent (f1 int);
create table cnullchild (check (f1 = 1 or f1 = null)) inherits(cnullparent);
insert into cnullchild values(1);
insert into cnullchild values(2);
insert into cnullchild values(null);
select * from cnullparent;
select * from cnullparent where f1 = 2;
drop table cnullparent cascade;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
--
-- Test inheritance of NOT NULL constraints
--
create table pp1 (f1 int);
create table cc1 (f2 text, f3 int) inherits (pp1);
\d cc1
create table cc2(f4 float) inherits(pp1,cc1);
\d cc2
-- named NOT NULL constraint
alter table cc1 add column a2 int constraint nn not null;
\d+ cc1
\d+ cc2
alter table pp1 alter column f1 set not null;
\d+ pp1
\d+ cc1
\d+ cc2
-- cannot create table with inconsistent NO INHERIT constraint
create table cc3 (a2 int not null no inherit) inherits (cc1);
-- change NO INHERIT status of inherited constraint: no dice, it's inherited
alter table cc2 add not null a2 no inherit;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
-- remove constraint from cc2: no dice, it's inherited
alter table cc2 alter column a2 drop not null;
-- remove constraint cc1, should succeed
alter table cc1 alter column a2 drop not null;
\d+ cc1
-- same for cc2
alter table cc2 alter column f1 drop not null;
\d+ cc2
-- remove from cc1, should fail again
alter table cc1 alter column f1 drop not null;
-- remove from pp1, should succeed
alter table pp1 alter column f1 drop not null;
\d+ pp1
alter table pp1 add primary key (f1);
-- Leave these tables around, for pg_upgrade testing
-- Test a not-null addition that must walk down the hierarchy
CREATE TABLE inh_parent ();
CREATE TABLE inh_child (i int) INHERITS (inh_parent);
CREATE TABLE inh_grandchild () INHERITS (inh_parent, inh_child);
ALTER TABLE inh_parent ADD COLUMN i int NOT NULL;
drop table inh_parent, inh_child, inh_grandchild;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
-- Test the same constraint name for different columns in different parents
create table inh_parent1(a int constraint nn not null);
create table inh_parent2(b int constraint nn not null);
create table inh_child () inherits (inh_parent1, inh_parent2);
\d+ inh_child
drop table inh_parent1, inh_parent2, inh_child;
-- Test multiple parents with overlapping primary keys
create table inh_parent1(a int, b int, c int, primary key (a, b));
create table inh_parent2(d int, e int, b int, primary key (d, b));
create table inh_child() inherits (inh_parent1, inh_parent2);
select conrelid::regclass, conname, contype, conkey,
coninhcount, conislocal, connoinherit
from pg_constraint where contype in ('n','p') and
conrelid::regclass::text in ('inh_child', 'inh_parent1', 'inh_parent2')
order by 1, 2;
\d+ inh_child
drop table inh_parent1, inh_parent2, inh_child;
-- NOT NULL NO INHERIT
create table inh_nn_parent(a int);
create table inh_nn_child() inherits (inh_nn_parent);
alter table inh_nn_parent add not null a no inherit;
create table inh_nn_child2() inherits (inh_nn_parent);
select conrelid::regclass, conname, contype, conkey,
(select attname from pg_attribute where attrelid = conrelid and attnum = conkey[1]),
coninhcount, conislocal, connoinherit
from pg_constraint where contype = 'n' and
conrelid::regclass::text like 'inh\_nn\_%'
order by 2, 1;
\d+ inh_nn*
drop table inh_nn_parent, inh_nn_child, inh_nn_child2;
CREATE TABLE inh_nn_parent (a int, NOT NULL a NO INHERIT);
CREATE TABLE inh_nn_child() INHERITS (inh_nn_parent);
ALTER TABLE inh_nn_parent ADD CONSTRAINT nna NOT NULL a;
ALTER TABLE inh_nn_parent ALTER a SET NOT NULL;
DROP TABLE inh_nn_parent cascade;
-- Adding a PK at the top level of a hierarchy should cause all descendants
-- to be checked for nulls, even past a no-inherit constraint
CREATE TABLE inh_nn_lvl1 (a int);
CREATE TABLE inh_nn_lvl2 () INHERITS (inh_nn_lvl1);
CREATE TABLE inh_nn_lvl3 (CONSTRAINT foo NOT NULL a NO INHERIT) INHERITS (inh_nn_lvl2);
CREATE TABLE inh_nn_lvl4 () INHERITS (inh_nn_lvl3);
CREATE TABLE inh_nn_lvl5 () INHERITS (inh_nn_lvl4);
INSERT INTO inh_nn_lvl2 VALUES (NULL);
ALTER TABLE inh_nn_lvl1 ADD PRIMARY KEY (a);
DELETE FROM inh_nn_lvl2;
INSERT INTO inh_nn_lvl5 VALUES (NULL);
ALTER TABLE inh_nn_lvl1 ADD PRIMARY KEY (a);
DROP TABLE inh_nn_lvl1 CASCADE;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
--
-- test inherit/deinherit
--
create table inh_parent(f1 int);
create table inh_child1(f1 int not null);
create table inh_child2(f1 int);
-- inh_child1 should have not null constraint
alter table inh_child1 inherit inh_parent;
-- should fail, missing NOT NULL constraint
alter table inh_child2 inherit inh_child1;
alter table inh_child2 alter column f1 set not null;
alter table inh_child2 inherit inh_child1;
-- add NOT NULL constraint recursively
alter table inh_parent alter column f1 set not null;
\d+ inh_parent
\d+ inh_child1
\d+ inh_child2
select conrelid::regclass, conname, contype, coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid in ('inh_parent'::regclass, 'inh_child1'::regclass, 'inh_child2'::regclass)
order by 2, 1;
--
-- test deinherit procedure
--
-- deinherit inh_child1
create table inh_child3 () inherits (inh_child1);
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
alter table inh_child1 no inherit inh_parent;
\d+ inh_parent
\d+ inh_child1
\d+ inh_child2
select conrelid::regclass, conname, contype, coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid::regclass::text in ('inh_parent', 'inh_child1', 'inh_child2', 'inh_child3')
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
order by 2, 1;
drop table inh_parent, inh_child1, inh_child2, inh_child3;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
-- a PK in parent must have a not-null in child that it can mark inherited
create table inh_parent (a int primary key);
create table inh_child (a int primary key);
alter table inh_child inherit inh_parent; -- nope
alter table inh_child alter a set not null;
alter table inh_child inherit inh_parent; -- now it works
-- don't interfere with other types of constraints
alter table inh_parent add constraint inh_parent_excl exclude ((1) with =);
alter table inh_parent add constraint inh_parent_uq unique (a);
alter table inh_parent add constraint inh_parent_fk foreign key (a) references inh_parent (a);
create table inh_child2 () inherits (inh_parent);
create table inh_child3 (like inh_parent);
alter table inh_child3 inherit inh_parent;
select conrelid::regclass, conname, contype, coninhcount, conislocal
from pg_constraint
where conrelid::regclass::text in ('inh_parent', 'inh_child', 'inh_child2', 'inh_child3')
order by 2, 1;
drop table inh_parent, inh_child, inh_child2, inh_child3;
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
--
-- test multi inheritance tree
--
create table inh_parent(f1 int not null);
create table inh_child1() inherits(inh_parent);
create table inh_child2() inherits(inh_parent);
create table inh_child3() inherits(inh_child1, inh_child2);
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
-- show constraint info
select conrelid::regclass, conname, contype, coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid in ('inh_parent'::regclass, 'inh_child1'::regclass, 'inh_child2'::regclass, 'inh_child3'::regclass)
Catalog not-null constraints We now create contype='n' pg_constraint rows for not-null constraints. We propagate these constraints to other tables during operations such as adding inheritance relationships, creating and attaching partitions and creating tables LIKE other tables. We also spawn not-null constraints for inheritance child tables when their parents have primary keys. These related constraints mostly follow the well-known rules of conislocal and coninhcount that we have for CHECK constraints, with some adaptations: for example, as opposed to CHECK constraints, we don't match not-null ones by name when descending a hierarchy to alter it, instead matching by column name that they apply to. This means we don't require the constraint names to be identical across a hierarchy. For now, we omit them for system catalogs. Maybe this is worth reconsidering. We don't support NOT VALID nor DEFERRABLE clauses either; these can be added as separate features later (this patch is already large and complicated enough.) psql shows these constraints in \d+. pg_dump requires some ad-hoc hacks, particularly when dumping a primary key. We now create one "throwaway" not-null constraint for each column in the PK together with the CREATE TABLE command, and once the PK is created, all those throwaway constraints are removed. This avoids having to check each tuple for nullness when the dump restores the primary key creation. pg_upgrading from an older release requires a somewhat brittle procedure to create a constraint state that matches what would be created if the database were being created fresh in Postgres 17. I have tested all the scenarios I could think of, and it works correctly as far as I can tell, but I could have neglected weird cases. This patch has been very long in the making. The first patch was written by Bernd Helmle in 2010 to add a new pg_constraint.contype value ('n'), which I (Álvaro) then hijacked in 2011 and 2012, until that one was killed by the realization that we ought to use contype='c' instead: manufactured CHECK constraints. However, later SQL standard development, as well as nonobvious emergent properties of that design (mostly, failure to distinguish them from "normal" CHECK constraints as well as the performance implication of having to test the CHECK expression) led us to reconsider this choice, so now the current implementation uses contype='n' again. During Postgres 16 this had already been introduced by commit e056c557aef4, but there were some problems mainly with the pg_upgrade procedure that couldn't be fixed in reasonable time, so it was reverted. In 2016 Vitaly Burovoy also worked on this feature[1] but found no consensus for his proposed approach, which was claimed to be closer to the letter of the standard, requiring an additional pg_attribute column to track the OID of the not-null constraint for that column. [1] https://postgr.es/m/CAKOSWNkN6HSyatuys8xZxzRCR-KL1OkHS5-b9qd9bf1Rad3PLA@mail.gmail.com Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Author: Bernd Helmle <mailings@oopsware.de> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Reviewed-by: Peter Eisentraut <peter.eisentraut@enterprisedb.com> Reviewed-by: Dean Rasheed <dean.a.rasheed@gmail.com>
2023-08-25 13:31:24 +02:00
order by 2, conrelid::regclass::text;
drop table inh_parent cascade;
-- test child table with inherited columns and
-- with explicitly specified not null constraints
create table inh_parent_1(f1 int);
create table inh_parent_2(f2 text);
create table inh_child(f1 int not null, f2 text not null) inherits(inh_parent_1, inh_parent_2);
-- show constraint info
select conrelid::regclass, conname, contype, coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid in ('inh_parent_1'::regclass, 'inh_parent_2'::regclass, 'inh_child'::regclass)
order by 2, conrelid::regclass::text;
-- also drops inh_child table
drop table inh_parent_1 cascade;
drop table inh_parent_2;
-- test multi layer inheritance tree
create table inh_p1(f1 int not null);
create table inh_p2(f1 int not null);
create table inh_p3(f2 int);
create table inh_p4(f1 int not null, f3 text not null);
create table inh_multiparent() inherits(inh_p1, inh_p2, inh_p3, inh_p4);
-- constraint on f1 should have three parents
select conrelid::regclass, contype, conname,
(select attname from pg_attribute where attrelid = conrelid and attnum = conkey[1]),
coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid::regclass in ('inh_p1', 'inh_p2', 'inh_p3', 'inh_p4',
'inh_multiparent')
order by conrelid::regclass::text, conname;
create table inh_multiparent2 (a int not null, f1 int) inherits(inh_p3, inh_multiparent);
select conrelid::regclass, contype, conname,
(select attname from pg_attribute where attrelid = conrelid and attnum = conkey[1]),
coninhcount, conislocal
from pg_constraint where contype = 'n' and
conrelid::regclass in ('inh_p3', 'inh_multiparent', 'inh_multiparent2')
order by conrelid::regclass::text, conname;
drop table inh_p1, inh_p2, inh_p3, inh_p4 cascade;
--
-- Mixed ownership inheritance tree
--
create role regress_alice;
create role regress_bob;
grant all on schema public to regress_alice, regress_bob;
grant regress_alice to regress_bob;
set session authorization regress_alice;
create table inh_parent (a int not null);
set session authorization regress_bob;
create table inh_child () inherits (inh_parent);
set session authorization regress_alice;
-- alice can't do this: she doesn't own inh_child
alter table inh_parent alter a drop not null;
set session authorization regress_bob;
alter table inh_parent alter a drop not null;
reset session authorization;
drop table inh_parent, inh_child;
revoke all on schema public from regress_alice, regress_bob;
drop role regress_alice, regress_bob;
--
-- Check use of temporary tables with inheritance trees
--
create table inh_perm_parent (a1 int);
create temp table inh_temp_parent (a1 int);
create temp table inh_temp_child () inherits (inh_perm_parent); -- ok
create table inh_perm_child () inherits (inh_temp_parent); -- error
create temp table inh_temp_child_2 () inherits (inh_temp_parent); -- ok
insert into inh_perm_parent values (1);
insert into inh_temp_parent values (2);
insert into inh_temp_child values (3);
insert into inh_temp_child_2 values (4);
select tableoid::regclass, a1 from inh_perm_parent;
select tableoid::regclass, a1 from inh_temp_parent;
drop table inh_perm_parent cascade;
drop table inh_temp_parent cascade;
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
--
-- Check that constraint exclusion works correctly with partitions using
-- implicit constraints generated from the partition bound information.
--
create table list_parted (
a varchar
) partition by list (a);
create table part_ab_cd partition of list_parted for values in ('ab', 'cd');
create table part_ef_gh partition of list_parted for values in ('ef', 'gh');
create table part_null_xy partition of list_parted for values in (null, 'xy');
explain (costs off) select * from list_parted;
explain (costs off) select * from list_parted where a is null;
explain (costs off) select * from list_parted where a is not null;
explain (costs off) select * from list_parted where a in ('ab', 'cd', 'ef');
explain (costs off) select * from list_parted where a = 'ab' or a in (null, 'cd');
explain (costs off) select * from list_parted where a = 'ab';
create table range_list_parted (
a int,
b char(2)
) partition by range (a);
create table part_1_10 partition of range_list_parted for values from (1) to (10) partition by list (b);
create table part_1_10_ab partition of part_1_10 for values in ('ab');
create table part_1_10_cd partition of part_1_10 for values in ('cd');
create table part_10_20 partition of range_list_parted for values from (10) to (20) partition by list (b);
create table part_10_20_ab partition of part_10_20 for values in ('ab');
create table part_10_20_cd partition of part_10_20 for values in ('cd');
create table part_21_30 partition of range_list_parted for values from (21) to (30) partition by list (b);
create table part_21_30_ab partition of part_21_30 for values in ('ab');
create table part_21_30_cd partition of part_21_30 for values in ('cd');
create table part_40_inf partition of range_list_parted for values from (40) to (maxvalue) partition by list (b);
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
create table part_40_inf_ab partition of part_40_inf for values in ('ab');
create table part_40_inf_cd partition of part_40_inf for values in ('cd');
create table part_40_inf_null partition of part_40_inf for values in (null);
explain (costs off) select * from range_list_parted;
explain (costs off) select * from range_list_parted where a = 5;
explain (costs off) select * from range_list_parted where b = 'ab';
explain (costs off) select * from range_list_parted where a between 3 and 23 and b in ('ab');
/* Should select no rows because range partition key cannot be null */
explain (costs off) select * from range_list_parted where a is null;
/* Should only select rows from the null-accepting partition */
explain (costs off) select * from range_list_parted where b is null;
explain (costs off) select * from range_list_parted where a is not null and a < 67;
explain (costs off) select * from range_list_parted where a >= 30;
drop table list_parted;
drop table range_list_parted;
-- check that constraint exclusion is able to cope with the partition
-- constraint emitted for multi-column range partitioned tables
create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c);
create table mcrparted_def partition of mcrparted default;
create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, 1, 1);
create table mcrparted1 partition of mcrparted for values from (1, 1, 1) to (10, 5, 10);
create table mcrparted2 partition of mcrparted for values from (10, 5, 10) to (10, 10, 10);
create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10);
create table mcrparted4 partition of mcrparted for values from (20, 10, 10) to (20, 20, 20);
create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue);
explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0, mcrparted_def
explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1, mcrparted_def
explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2, mcrparted_def
explain (costs off) select * from mcrparted where abs(b) = 5; -- scans all partitions
explain (costs off) select * from mcrparted where a > -1; -- scans all partitions
explain (costs off) select * from mcrparted where a = 20 and abs(b) = 10 and c > 10; -- scans mcrparted4
explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5, mcrparted_def
-- check that partitioned table Appends cope with being referenced in
-- subplans
create table parted_minmax (a int, b varchar(16)) partition by range (a);
create table parted_minmax1 partition of parted_minmax for values from (1) to (10);
create index parted_minmax1i on parted_minmax1 (a, b);
insert into parted_minmax values (1,'12345');
explain (costs off) select min(a), max(a) from parted_minmax where b = '12345';
select min(a), max(a) from parted_minmax where b = '12345';
drop table parted_minmax;
Use Append rather than MergeAppend for scanning ordered partitions. If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
2019-04-06 01:20:30 +02:00
-- Test code that uses Append nodes in place of MergeAppend when the
-- partition ordering matches the desired ordering.
create index mcrparted_a_abs_c_idx on mcrparted (a, abs(b), c);
-- MergeAppend must be used when a default partition exists
explain (costs off) select * from mcrparted order by a, abs(b), c;
drop table mcrparted_def;
-- Append is used for a RANGE partitioned table with no default
-- and no subpartitions
explain (costs off) select * from mcrparted order by a, abs(b), c;
-- Append is used with subpaths in reverse order with backwards index scans
explain (costs off) select * from mcrparted order by a desc, abs(b) desc, c desc;
-- check that Append plan is used containing a MergeAppend for sub-partitions
-- that are unordered.
drop table mcrparted5;
create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue) partition by list (a);
create table mcrparted5a partition of mcrparted5 for values in(20);
create table mcrparted5_def partition of mcrparted5 default;
explain (costs off) select * from mcrparted order by a, abs(b), c;
drop table mcrparted5_def;
-- check that an Append plan is used and the sub-partitions are flattened
-- into the main Append when the sub-partition is unordered but contains
-- just a single sub-partition.
explain (costs off) select a, abs(b) from mcrparted order by a, abs(b), c;
-- check that Append is used when the sub-partitioned tables are pruned
-- during planning.
explain (costs off) select * from mcrparted where a < 20 order by a, abs(b), c;
Allow ordered partition scans in more cases 959d00e9d added the ability to make use of an Append node instead of a MergeAppend when we wanted to perform a scan of a partitioned table and the required sort order was the same as the partitioned keys and the partitioned table was defined in such a way that earlier partitions were guaranteed to only contain lower-order values than later partitions. However, previously we didn't allow these ordered partition scans for LIST partitioned table when there were any partitions that allowed multiple Datums. This was a very cheap check to make and we could likely have done a little better by checking if there were interleaved partitions, but at the time we didn't have visibility about which partitions were pruned, so we still may have disallowed cases where all interleaved partitions were pruned. Since 475dbd0b7, we now have knowledge of pruned partitions, we can do a much better job inside partitions_are_ordered(). Here we pass which partitions survived partition pruning into partitions_are_ordered() and, for LIST partitioning, have it check to see if any live partitions exist that are also in the new "interleaved_parts" field defined in PartitionBoundInfo. For RANGE partitioning we can relax the code which caused the partitions to be unordered if a DEFAULT partition existed. Since we now know which partitions were pruned, partitions_are_ordered() now returns true when the DEFAULT partition was pruned. Reviewed-by: Amit Langote, Zhihong Yu Discussion: https://postgr.es/m/CAApHDvrdoN_sXU52i=QDXe2k3WAo=EVry29r2+Tq2WYcn2xhEA@mail.gmail.com
2021-08-03 02:25:52 +02:00
set enable_bitmapscan to off;
set enable_sort to off;
Use Append rather than MergeAppend for scanning ordered partitions. If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
2019-04-06 01:20:30 +02:00
create table mclparted (a int) partition by list(a);
create table mclparted1 partition of mclparted for values in(1);
create table mclparted2 partition of mclparted for values in(2);
create index on mclparted (a);
-- Ensure an Append is used for a list partition with an order by.
explain (costs off) select * from mclparted order by a;
-- Ensure a MergeAppend is used when a partition exists with interleaved
-- datums in the partition bound.
create table mclparted3_5 partition of mclparted for values in(3,5);
create table mclparted4 partition of mclparted for values in(4);
explain (costs off) select * from mclparted order by a;
Allow ordered partition scans in more cases 959d00e9d added the ability to make use of an Append node instead of a MergeAppend when we wanted to perform a scan of a partitioned table and the required sort order was the same as the partitioned keys and the partitioned table was defined in such a way that earlier partitions were guaranteed to only contain lower-order values than later partitions. However, previously we didn't allow these ordered partition scans for LIST partitioned table when there were any partitions that allowed multiple Datums. This was a very cheap check to make and we could likely have done a little better by checking if there were interleaved partitions, but at the time we didn't have visibility about which partitions were pruned, so we still may have disallowed cases where all interleaved partitions were pruned. Since 475dbd0b7, we now have knowledge of pruned partitions, we can do a much better job inside partitions_are_ordered(). Here we pass which partitions survived partition pruning into partitions_are_ordered() and, for LIST partitioning, have it check to see if any live partitions exist that are also in the new "interleaved_parts" field defined in PartitionBoundInfo. For RANGE partitioning we can relax the code which caused the partitions to be unordered if a DEFAULT partition existed. Since we now know which partitions were pruned, partitions_are_ordered() now returns true when the DEFAULT partition was pruned. Reviewed-by: Amit Langote, Zhihong Yu Discussion: https://postgr.es/m/CAApHDvrdoN_sXU52i=QDXe2k3WAo=EVry29r2+Tq2WYcn2xhEA@mail.gmail.com
2021-08-03 02:25:52 +02:00
explain (costs off) select * from mclparted where a in(3,4,5) order by a;
-- Introduce a NULL and DEFAULT partition so we can test more complex cases
create table mclparted_null partition of mclparted for values in(null);
create table mclparted_def partition of mclparted default;
-- Append can be used providing we don't scan the interleaved partition
explain (costs off) select * from mclparted where a in(1,2,4) order by a;
explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a;
-- Test a more complex case where the NULL partition allows some other value
drop table mclparted_null;
create table mclparted_0_null partition of mclparted for values in(0,null);
-- Ensure MergeAppend is used since 0 and NULLs are in the same partition.
explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a;
explain (costs off) select * from mclparted where a in(0,1,2,4) order by a;
-- Ensure Append is used when the null partition is pruned
explain (costs off) select * from mclparted where a in(1,2,4) order by a;
-- Ensure MergeAppend is used when the default partition is not pruned
explain (costs off) select * from mclparted where a in(1,2,4,100) order by a;
Use Append rather than MergeAppend for scanning ordered partitions. If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
2019-04-06 01:20:30 +02:00
drop table mclparted;
Allow ordered partition scans in more cases 959d00e9d added the ability to make use of an Append node instead of a MergeAppend when we wanted to perform a scan of a partitioned table and the required sort order was the same as the partitioned keys and the partitioned table was defined in such a way that earlier partitions were guaranteed to only contain lower-order values than later partitions. However, previously we didn't allow these ordered partition scans for LIST partitioned table when there were any partitions that allowed multiple Datums. This was a very cheap check to make and we could likely have done a little better by checking if there were interleaved partitions, but at the time we didn't have visibility about which partitions were pruned, so we still may have disallowed cases where all interleaved partitions were pruned. Since 475dbd0b7, we now have knowledge of pruned partitions, we can do a much better job inside partitions_are_ordered(). Here we pass which partitions survived partition pruning into partitions_are_ordered() and, for LIST partitioning, have it check to see if any live partitions exist that are also in the new "interleaved_parts" field defined in PartitionBoundInfo. For RANGE partitioning we can relax the code which caused the partitions to be unordered if a DEFAULT partition existed. Since we now know which partitions were pruned, partitions_are_ordered() now returns true when the DEFAULT partition was pruned. Reviewed-by: Amit Langote, Zhihong Yu Discussion: https://postgr.es/m/CAApHDvrdoN_sXU52i=QDXe2k3WAo=EVry29r2+Tq2WYcn2xhEA@mail.gmail.com
2021-08-03 02:25:52 +02:00
reset enable_sort;
reset enable_bitmapscan;
Use Append rather than MergeAppend for scanning ordered partitions. If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
2019-04-06 01:20:30 +02:00
-- Ensure subplans which don't have a path with the correct pathkeys get
-- sorted correctly.
drop index mcrparted_a_abs_c_idx;
create index on mcrparted1 (a, abs(b), c);
create index on mcrparted2 (a, abs(b), c);
create index on mcrparted3 (a, abs(b), c);
create index on mcrparted4 (a, abs(b), c);
explain (costs off) select * from mcrparted where a < 20 order by a, abs(b), c limit 1;
set enable_bitmapscan = 0;
-- Ensure Append node can be used when the partition is ordered by some
-- pathkeys which were deemed redundant.
explain (costs off) select * from mcrparted where a = 10 order by a, abs(b), c;
reset enable_bitmapscan;
drop table mcrparted;
-- Ensure LIST partitions allow an Append to be used instead of a MergeAppend
create table bool_lp (b bool) partition by list(b);
create table bool_lp_true partition of bool_lp for values in(true);
create table bool_lp_false partition of bool_lp for values in(false);
create index on bool_lp (b);
explain (costs off) select * from bool_lp order by b;
drop table bool_lp;
-- Ensure const bool quals can be properly detected as redundant
create table bool_rp (b bool, a int) partition by range(b,a);
create table bool_rp_false_1k partition of bool_rp for values from (false,0) to (false,1000);
create table bool_rp_true_1k partition of bool_rp for values from (true,0) to (true,1000);
create table bool_rp_false_2k partition of bool_rp for values from (false,1000) to (false,2000);
create table bool_rp_true_2k partition of bool_rp for values from (true,1000) to (true,2000);
create index on bool_rp (b,a);
explain (costs off) select * from bool_rp where b = true order by b,a;
explain (costs off) select * from bool_rp where b = false order by b,a;
explain (costs off) select * from bool_rp where b = true order by a;
explain (costs off) select * from bool_rp where b = false order by a;
drop table bool_rp;
-- Ensure an Append scan is chosen when the partition order is a subset of
-- the required order.
create table range_parted (a int, b int, c int) partition by range(a, b);
create table range_parted1 partition of range_parted for values from (0,0) to (10,10);
create table range_parted2 partition of range_parted for values from (10,10) to (20,20);
create index on range_parted (a,b,c);
explain (costs off) select * from range_parted order by a,b,c;
explain (costs off) select * from range_parted order by a desc,b desc,c desc;
drop table range_parted;
Allow access to child table statistics if user can read parent table. The fix for CVE-2017-7484 disallowed use of pg_statistic data for planning purposes if the user would not be able to select the associated column and a non-leakproof function is to be applied to the statistics values. That turns out to disable use of pg_statistic data in some common cases involving inheritance/partitioning, where the user does have permission to select from the parent table that was actually named in the query, but not from a child table whose stats are needed. Since, in non-corner cases, the user *can* select the child table's data via the parent, this restriction is not actually useful from a security standpoint. Improve the logic so that we also check the permissions of the originally-named table, and allow access if select permission exists for that. When checking access to stats for a simple child column, we can map the child column number back to the parent, and perform this test exactly (including not allowing access if the child column isn't exposed by the parent). For expression indexes, the current logic just insists on whole-table select access, and this patch allows access if the user can select the whole parent table. In principle, if the child table has extra columns, this might allow access to stats on columns the user can't read. In practice, it's unlikely that the planner is going to do any stats calculations involving expressions that are not visible to the query, so we'll ignore that fine point for now. Perhaps someday we'll improve that logic to detect exactly which columns are used by an expression index ... but today is not that day. Back-patch to v11. The issue was created in 9.2 and up by the CVE-2017-7484 fix, but this patch depends on the append_rel_array[] planner data structure which only exists in v11 and up. In practice the issue is most urgent with partitioned tables, so fixing v11 and later should satisfy much of the practical need. Dilip Kumar and Amit Langote, with some kibitzing by me Discussion: https://postgr.es/m/3876.1531261875@sss.pgh.pa.us
2019-11-26 20:41:48 +01:00
-- Check that we allow access to a child table's statistics when the user
-- has permissions only for the parent table.
create table permtest_parent (a int, b text, c text) partition by list (a);
create table permtest_child (b text, c text, a int) partition by list (b);
create table permtest_grandchild (c text, b text, a int);
alter table permtest_child attach partition permtest_grandchild for values in ('a');
alter table permtest_parent attach partition permtest_child for values in (1);
create index on permtest_parent (left(c, 3));
insert into permtest_parent
select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i;
Allow access to child table statistics if user can read parent table. The fix for CVE-2017-7484 disallowed use of pg_statistic data for planning purposes if the user would not be able to select the associated column and a non-leakproof function is to be applied to the statistics values. That turns out to disable use of pg_statistic data in some common cases involving inheritance/partitioning, where the user does have permission to select from the parent table that was actually named in the query, but not from a child table whose stats are needed. Since, in non-corner cases, the user *can* select the child table's data via the parent, this restriction is not actually useful from a security standpoint. Improve the logic so that we also check the permissions of the originally-named table, and allow access if select permission exists for that. When checking access to stats for a simple child column, we can map the child column number back to the parent, and perform this test exactly (including not allowing access if the child column isn't exposed by the parent). For expression indexes, the current logic just insists on whole-table select access, and this patch allows access if the user can select the whole parent table. In principle, if the child table has extra columns, this might allow access to stats on columns the user can't read. In practice, it's unlikely that the planner is going to do any stats calculations involving expressions that are not visible to the query, so we'll ignore that fine point for now. Perhaps someday we'll improve that logic to detect exactly which columns are used by an expression index ... but today is not that day. Back-patch to v11. The issue was created in 9.2 and up by the CVE-2017-7484 fix, but this patch depends on the append_rel_array[] planner data structure which only exists in v11 and up. In practice the issue is most urgent with partitioned tables, so fixing v11 and later should satisfy much of the practical need. Dilip Kumar and Amit Langote, with some kibitzing by me Discussion: https://postgr.es/m/3876.1531261875@sss.pgh.pa.us
2019-11-26 20:41:48 +01:00
analyze permtest_parent;
create role regress_no_child_access;
revoke all on permtest_grandchild from regress_no_child_access;
grant select on permtest_parent to regress_no_child_access;
set session authorization regress_no_child_access;
-- without stats access, these queries would produce hash join plans:
explain (costs off)
select * from permtest_parent p1 inner join permtest_parent p2
on p1.a = p2.a and p1.c ~ 'a1$';
explain (costs off)
select * from permtest_parent p1 inner join permtest_parent p2
on p1.a = p2.a and left(p1.c, 3) ~ 'a1$';
reset session authorization;
revoke all on permtest_parent from regress_no_child_access;
grant select(a,c) on permtest_parent to regress_no_child_access;
set session authorization regress_no_child_access;
explain (costs off)
select p2.a, p1.c from permtest_parent p1 inner join permtest_parent p2
on p1.a = p2.a and p1.c ~ 'a1$';
-- we will not have access to the expression index's stats here:
explain (costs off)
select p2.a, p1.c from permtest_parent p1 inner join permtest_parent p2
on p1.a = p2.a and left(p1.c, 3) ~ 'a1$';
reset session authorization;
revoke all on permtest_parent from regress_no_child_access;
drop role regress_no_child_access;
drop table permtest_parent;
-- Verify that constraint errors across partition root / child are
-- handled correctly (Bug #16293)
CREATE TABLE errtst_parent (
partid int not null,
shdata int not null,
data int NOT NULL DEFAULT 0,
CONSTRAINT shdata_small CHECK(shdata < 3)
) PARTITION BY RANGE (partid);
-- fast defaults lead to attribute mapping being used in one
-- direction, but not the other
CREATE TABLE errtst_child_fastdef (
partid int not null,
shdata int not null,
CONSTRAINT shdata_small CHECK(shdata < 3)
);
-- no remapping in either direction necessary
CREATE TABLE errtst_child_plaindef (
partid int not null,
shdata int not null,
data int NOT NULL DEFAULT 0,
CONSTRAINT shdata_small CHECK(shdata < 3),
CHECK(data < 10)
);
-- remapping in both direction
CREATE TABLE errtst_child_reorder (
data int NOT NULL DEFAULT 0,
shdata int not null,
partid int not null,
CONSTRAINT shdata_small CHECK(shdata < 3),
CHECK(data < 10)
);
ALTER TABLE errtst_child_fastdef ADD COLUMN data int NOT NULL DEFAULT 0;
ALTER TABLE errtst_child_fastdef ADD CONSTRAINT errtest_child_fastdef_data_check CHECK (data < 10);
ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_fastdef FOR VALUES FROM (0) TO (10);
ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_plaindef FOR VALUES FROM (10) TO (20);
ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_reorder FOR VALUES FROM (20) TO (30);
-- insert without child check constraint error
INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', '5');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', '5');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', '5');
-- insert with child check constraint error
INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', '10');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', '10');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', '10');
-- insert with child not null constraint error
INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', NULL);
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', NULL);
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', NULL);
-- insert with shared check constraint error
INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '5', '5');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '5', '5');
INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '5', '5');
-- within partition update without child check constraint violation
BEGIN;
UPDATE errtst_parent SET data = data + 1 WHERE partid = 0;
UPDATE errtst_parent SET data = data + 1 WHERE partid = 10;
UPDATE errtst_parent SET data = data + 1 WHERE partid = 20;
ROLLBACK;
-- within partition update with child check constraint violation
UPDATE errtst_parent SET data = data + 10 WHERE partid = 0;
UPDATE errtst_parent SET data = data + 10 WHERE partid = 10;
UPDATE errtst_parent SET data = data + 10 WHERE partid = 20;
-- direct leaf partition update, without partition id violation
BEGIN;
UPDATE errtst_child_fastdef SET partid = 1 WHERE partid = 0;
UPDATE errtst_child_plaindef SET partid = 11 WHERE partid = 10;
UPDATE errtst_child_reorder SET partid = 21 WHERE partid = 20;
ROLLBACK;
-- direct leaf partition update, with partition id violation
UPDATE errtst_child_fastdef SET partid = partid + 10 WHERE partid = 0;
UPDATE errtst_child_plaindef SET partid = partid + 10 WHERE partid = 10;
UPDATE errtst_child_reorder SET partid = partid + 10 WHERE partid = 20;
-- partition move, without child check constraint violation
BEGIN;
UPDATE errtst_parent SET partid = 10, data = data + 1 WHERE partid = 0;
UPDATE errtst_parent SET partid = 20, data = data + 1 WHERE partid = 10;
UPDATE errtst_parent SET partid = 0, data = data + 1 WHERE partid = 20;
ROLLBACK;
-- partition move, with child check constraint violation
UPDATE errtst_parent SET partid = 10, data = data + 10 WHERE partid = 0;
UPDATE errtst_parent SET partid = 20, data = data + 10 WHERE partid = 10;
UPDATE errtst_parent SET partid = 0, data = data + 10 WHERE partid = 20;
-- partition move, without target partition
UPDATE errtst_parent SET partid = 30, data = data + 10 WHERE partid = 20;
DROP TABLE errtst_parent;