2000-01-05 07:06:23 +01:00
|
|
|
--
|
|
|
|
-- NUMERIC
|
|
|
|
--
|
|
|
|
CREATE TABLE num_data (id int4, val numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_ln (id int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10));
|
|
|
|
CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10));
|
|
|
|
-- ******************************
|
|
|
|
-- * The following EXPECTED results are computed by bc(1)
|
|
|
|
-- * with a scale of 200
|
|
|
|
-- ******************************
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_exp_add VALUES (0,0,'0');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,0,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,1,'0');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,1,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,2,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,2,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,3,'4.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,3,'-4.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,3,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,3,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,4,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,4,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,4,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,5,'16397.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,5,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,5,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,6,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,6,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,6,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,7,'-83028485');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,7,'83028485');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,7,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,7,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,8,'74881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,8,'-74881');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,8,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,8,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (0,9,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (0,9,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,0,'0');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,0,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,1,'0');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,1,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,2,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,2,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,3,'4.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,3,'-4.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,3,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,3,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,4,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,4,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,4,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,5,'16397.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,5,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,5,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,6,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,6,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,6,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,7,'-83028485');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,7,'83028485');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,7,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,7,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,8,'74881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,8,'-74881');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,8,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,8,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (1,9,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (1,9,'0');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,2,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476');
|
|
|
|
INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467');
|
|
|
|
INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627');
|
|
|
|
INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740');
|
|
|
|
INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,0,'4.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,0,'4.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,1,'4.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,1,'4.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,3,'8.62');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,3,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,3,'18.5761');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,4,'7799465.7219');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,5,'16401.348491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,6,'93905.88763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,7,'-83028480.69');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,7,'83028489.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,8,'74885.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,8,'-74876.69');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,8,'322737.11');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553');
|
|
|
|
INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020');
|
|
|
|
INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,0,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,1,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,3,'7799465.7219');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,4,'15598922.8238');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,4,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,5,'7815858.450391');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,8,'7874342.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143');
|
|
|
|
INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980');
|
|
|
|
INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,0,'16397.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,0,'16397.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,1,'16397.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,1,'16397.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,3,'16401.348491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,3,'16392.728491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,4,'7815858.450391');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,5,'32794.076982');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,5,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,6,'110298.61612126');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,8,'91278.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228');
|
|
|
|
INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220');
|
|
|
|
INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,0,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,1,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,3,'93905.88763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,5,'110298.61612126');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,6,'187803.15526052');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,6,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,8,'168782.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184');
|
|
|
|
INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160');
|
|
|
|
INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680');
|
|
|
|
INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920');
|
|
|
|
INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,0,'-83028485');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,0,'-83028485');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,1,'-83028485');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,1,'-83028485');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,3,'-83028480.69');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,7,'-166056970');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,7,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,8,'-82953604');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,8,'-83103366');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118');
|
|
|
|
INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580');
|
|
|
|
INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700');
|
|
|
|
INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,0,'74881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,0,'74881');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,1,'74881');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,1,'74881');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,3,'74885.31');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,3,'74876.69');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,3,'322737.11');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,4,'7874342.4119');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,5,'91278.038491');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,5,'58483.961509');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,6,'168782.57763026');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,7,'-82953604');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,7,'83103366');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,8,'149762');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,8,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,8,'5607164161');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000');
|
|
|
|
INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020');
|
|
|
|
INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,0,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,0,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,1,'0');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,1,'NaN');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748');
|
|
|
|
INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840');
|
|
|
|
INSERT INTO num_exp_sub VALUES (9,9,'0');
|
|
|
|
INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400');
|
|
|
|
INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (0,'0');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (1,'0');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542');
|
|
|
|
INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_exp_ln VALUES (0,'NaN');
|
|
|
|
INSERT INTO num_exp_ln VALUES (1,'NaN');
|
|
|
|
INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514');
|
|
|
|
INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971');
|
|
|
|
INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464');
|
|
|
|
INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038');
|
|
|
|
INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127');
|
|
|
|
INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991');
|
|
|
|
INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668');
|
|
|
|
INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_exp_log10 VALUES (0,'NaN');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (1,'NaN');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138');
|
|
|
|
INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (0,'NaN');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (1,'NaN');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952');
|
|
|
|
INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
BEGIN TRANSACTION;
|
|
|
|
INSERT INTO num_data VALUES (0, '0');
|
|
|
|
INSERT INTO num_data VALUES (1, '0');
|
|
|
|
INSERT INTO num_data VALUES (2, '-34338492.215397047');
|
|
|
|
INSERT INTO num_data VALUES (3, '4.31');
|
|
|
|
INSERT INTO num_data VALUES (4, '7799461.4119');
|
|
|
|
INSERT INTO num_data VALUES (5, '16397.038491');
|
|
|
|
INSERT INTO num_data VALUES (6, '93901.57763026');
|
|
|
|
INSERT INTO num_data VALUES (7, '-83028485');
|
|
|
|
INSERT INTO num_data VALUES (8, '74881');
|
|
|
|
INSERT INTO num_data VALUES (9, '-24926804.045047420');
|
|
|
|
COMMIT TRANSACTION;
|
|
|
|
-- ******************************
|
|
|
|
-- * Create indices for faster checks
|
|
|
|
-- ******************************
|
|
|
|
CREATE UNIQUE INDEX num_exp_add_idx ON num_exp_add (id1, id2);
|
|
|
|
CREATE UNIQUE INDEX num_exp_sub_idx ON num_exp_sub (id1, id2);
|
|
|
|
CREATE UNIQUE INDEX num_exp_div_idx ON num_exp_div (id1, id2);
|
|
|
|
CREATE UNIQUE INDEX num_exp_mul_idx ON num_exp_mul (id1, id2);
|
|
|
|
CREATE UNIQUE INDEX num_exp_sqrt_idx ON num_exp_sqrt (id);
|
|
|
|
CREATE UNIQUE INDEX num_exp_ln_idx ON num_exp_ln (id);
|
|
|
|
CREATE UNIQUE INDEX num_exp_log10_idx ON num_exp_log10 (id);
|
|
|
|
CREATE UNIQUE INDEX num_exp_power_10_ln_idx ON num_exp_power_10_ln (id);
|
|
|
|
VACUUM ANALYZE num_exp_add;
|
|
|
|
VACUUM ANALYZE num_exp_sub;
|
|
|
|
VACUUM ANALYZE num_exp_div;
|
|
|
|
VACUUM ANALYZE num_exp_mul;
|
|
|
|
VACUUM ANALYZE num_exp_sqrt;
|
|
|
|
VACUUM ANALYZE num_exp_ln;
|
|
|
|
VACUUM ANALYZE num_exp_log10;
|
|
|
|
VACUUM ANALYZE num_exp_power_10_ln;
|
|
|
|
-- ******************************
|
|
|
|
-- * Now check the behaviour of the NUMERIC type
|
|
|
|
-- ******************************
|
|
|
|
-- ******************************
|
|
|
|
-- * Addition check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, t1.val + t2.val
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_add t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val + t2.val, 10)
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_add t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != round(t2.expected, 10);
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Subtraction check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, t1.val - t2.val
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_sub t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val - t2.val, 40)
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40)
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_sub t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != round(t2.expected, 40);
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | round
|
|
|
|
-----+-----+--------+-------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Multiply check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, t1.val * t2.val
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_mul t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val * t2.val, 30)
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_mul t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != round(t2.expected, 30);
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Division check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, t1.val / t2.val
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2
|
|
|
|
WHERE t2.val != '0.0';
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_div t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val / t2.val, 80)
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data t1, num_data t2
|
|
|
|
WHERE t2.val != '0.0';
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_div t2
|
|
|
|
WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
|
|
|
|
AND t1.result != round(t2.expected, 80);
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | id2 | result | expected
|
|
|
|
-----+-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Square root check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT id, 0, SQRT(ABS(val))
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data;
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_sqrt t2
|
|
|
|
WHERE t1.id1 = t2.id
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | result | expected
|
|
|
|
-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Natural logarithm check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT id, 0, LN(ABS(val))
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data
|
|
|
|
WHERE val != '0.0';
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_ln t2
|
|
|
|
WHERE t1.id1 = t2.id
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | result | expected
|
|
|
|
-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * Logarithm base 10 check
|
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
|
|
|
INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val))
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data
|
|
|
|
WHERE val != '0.0';
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_log10 t2
|
|
|
|
WHERE t1.id1 = t2.id
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | result | expected
|
|
|
|
-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
2004-04-23 22:32:20 +02:00
|
|
|
-- * POWER(10, LN(value)) check
|
2000-01-05 07:06:23 +01:00
|
|
|
-- ******************************
|
|
|
|
DELETE FROM num_result;
|
2004-04-23 22:32:20 +02:00
|
|
|
INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200))))
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_data
|
|
|
|
WHERE val != '0.0';
|
2000-01-05 07:06:23 +01:00
|
|
|
SELECT t1.id1, t1.result, t2.expected
|
1999-06-10 19:49:32 +02:00
|
|
|
FROM num_result t1, num_exp_power_10_ln t2
|
|
|
|
WHERE t1.id1 = t2.id
|
|
|
|
AND t1.result != t2.expected;
|
2000-01-05 07:06:23 +01:00
|
|
|
id1 | result | expected
|
|
|
|
-----+--------+----------
|
1999-06-10 19:49:32 +02:00
|
|
|
(0 rows)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
-- ******************************
|
|
|
|
-- * Check behavior with Inf and NaN inputs. It's easiest to handle these
|
|
|
|
-- * separately from the num_data framework used above, because some input
|
|
|
|
-- * combinations will throw errors.
|
|
|
|
-- ******************************
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT x1, x2,
|
|
|
|
x1 + x2 AS sum,
|
|
|
|
x1 - x2 AS diff,
|
|
|
|
x1 * x2 AS prod
|
|
|
|
FROM v AS v1(x1), v AS v2(x2);
|
|
|
|
x1 | x2 | sum | diff | prod
|
|
|
|
-----------+-----------+-----------+-----------+-----------
|
|
|
|
0 | 0 | 0 | 0 | 0
|
|
|
|
0 | 1 | 1 | -1 | 0
|
|
|
|
0 | -1 | -1 | 1 | 0
|
|
|
|
0 | 4.2 | 4.2 | -4.2 | 0.0
|
|
|
|
0 | Infinity | Infinity | -Infinity | NaN
|
|
|
|
0 | -Infinity | -Infinity | Infinity | NaN
|
|
|
|
0 | NaN | NaN | NaN | NaN
|
|
|
|
1 | 0 | 1 | 1 | 0
|
|
|
|
1 | 1 | 2 | 0 | 1
|
|
|
|
1 | -1 | 0 | 2 | -1
|
|
|
|
1 | 4.2 | 5.2 | -3.2 | 4.2
|
|
|
|
1 | Infinity | Infinity | -Infinity | Infinity
|
|
|
|
1 | -Infinity | -Infinity | Infinity | -Infinity
|
|
|
|
1 | NaN | NaN | NaN | NaN
|
|
|
|
-1 | 0 | -1 | -1 | 0
|
|
|
|
-1 | 1 | 0 | -2 | -1
|
|
|
|
-1 | -1 | -2 | 0 | 1
|
|
|
|
-1 | 4.2 | 3.2 | -5.2 | -4.2
|
|
|
|
-1 | Infinity | Infinity | -Infinity | -Infinity
|
|
|
|
-1 | -Infinity | -Infinity | Infinity | Infinity
|
|
|
|
-1 | NaN | NaN | NaN | NaN
|
|
|
|
4.2 | 0 | 4.2 | 4.2 | 0.0
|
|
|
|
4.2 | 1 | 5.2 | 3.2 | 4.2
|
|
|
|
4.2 | -1 | 3.2 | 5.2 | -4.2
|
|
|
|
4.2 | 4.2 | 8.4 | 0.0 | 17.64
|
|
|
|
4.2 | Infinity | Infinity | -Infinity | Infinity
|
|
|
|
4.2 | -Infinity | -Infinity | Infinity | -Infinity
|
|
|
|
4.2 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | 0 | Infinity | Infinity | NaN
|
|
|
|
Infinity | 1 | Infinity | Infinity | Infinity
|
|
|
|
Infinity | -1 | Infinity | Infinity | -Infinity
|
|
|
|
Infinity | 4.2 | Infinity | Infinity | Infinity
|
|
|
|
Infinity | Infinity | Infinity | NaN | Infinity
|
|
|
|
Infinity | -Infinity | NaN | Infinity | -Infinity
|
|
|
|
Infinity | NaN | NaN | NaN | NaN
|
|
|
|
-Infinity | 0 | -Infinity | -Infinity | NaN
|
|
|
|
-Infinity | 1 | -Infinity | -Infinity | -Infinity
|
|
|
|
-Infinity | -1 | -Infinity | -Infinity | Infinity
|
|
|
|
-Infinity | 4.2 | -Infinity | -Infinity | -Infinity
|
|
|
|
-Infinity | Infinity | NaN | -Infinity | -Infinity
|
|
|
|
-Infinity | -Infinity | -Infinity | NaN | Infinity
|
|
|
|
-Infinity | NaN | NaN | NaN | NaN
|
|
|
|
NaN | 0 | NaN | NaN | NaN
|
|
|
|
NaN | 1 | NaN | NaN | NaN
|
|
|
|
NaN | -1 | NaN | NaN | NaN
|
|
|
|
NaN | 4.2 | NaN | NaN | NaN
|
|
|
|
NaN | Infinity | NaN | NaN | NaN
|
|
|
|
NaN | -Infinity | NaN | NaN | NaN
|
|
|
|
NaN | NaN | NaN | NaN | NaN
|
|
|
|
(49 rows)
|
|
|
|
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT x1, x2,
|
|
|
|
x1 / x2 AS quot,
|
|
|
|
x1 % x2 AS mod,
|
|
|
|
div(x1, x2) AS div
|
|
|
|
FROM v AS v1(x1), v AS v2(x2) WHERE x2 != 0;
|
|
|
|
x1 | x2 | quot | mod | div
|
|
|
|
-----------+-----------+-------------------------+------+-----------
|
|
|
|
0 | 1 | 0.00000000000000000000 | 0 | 0
|
|
|
|
1 | 1 | 1.00000000000000000000 | 0 | 1
|
|
|
|
-1 | 1 | -1.00000000000000000000 | 0 | -1
|
|
|
|
4.2 | 1 | 4.2000000000000000 | 0.2 | 4
|
|
|
|
Infinity | 1 | Infinity | NaN | Infinity
|
|
|
|
-Infinity | 1 | -Infinity | NaN | -Infinity
|
|
|
|
NaN | 1 | NaN | NaN | NaN
|
|
|
|
0 | -1 | 0.00000000000000000000 | 0 | 0
|
|
|
|
1 | -1 | -1.00000000000000000000 | 0 | -1
|
|
|
|
-1 | -1 | 1.00000000000000000000 | 0 | 1
|
|
|
|
4.2 | -1 | -4.2000000000000000 | 0.2 | -4
|
|
|
|
Infinity | -1 | -Infinity | NaN | -Infinity
|
|
|
|
-Infinity | -1 | Infinity | NaN | Infinity
|
|
|
|
NaN | -1 | NaN | NaN | NaN
|
|
|
|
0 | 4.2 | 0.00000000000000000000 | 0.0 | 0
|
|
|
|
1 | 4.2 | 0.23809523809523809524 | 1.0 | 0
|
|
|
|
-1 | 4.2 | -0.23809523809523809524 | -1.0 | 0
|
|
|
|
4.2 | 4.2 | 1.00000000000000000000 | 0.0 | 1
|
|
|
|
Infinity | 4.2 | Infinity | NaN | Infinity
|
|
|
|
-Infinity | 4.2 | -Infinity | NaN | -Infinity
|
|
|
|
NaN | 4.2 | NaN | NaN | NaN
|
|
|
|
0 | Infinity | 0 | 0 | 0
|
|
|
|
1 | Infinity | 0 | 1 | 0
|
|
|
|
-1 | Infinity | 0 | -1 | 0
|
|
|
|
4.2 | Infinity | 0 | 4.2 | 0
|
|
|
|
Infinity | Infinity | NaN | NaN | NaN
|
|
|
|
-Infinity | Infinity | NaN | NaN | NaN
|
|
|
|
NaN | Infinity | NaN | NaN | NaN
|
|
|
|
0 | -Infinity | 0 | 0 | 0
|
|
|
|
1 | -Infinity | 0 | 1 | 0
|
|
|
|
-1 | -Infinity | 0 | -1 | 0
|
|
|
|
4.2 | -Infinity | 0 | 4.2 | 0
|
|
|
|
Infinity | -Infinity | NaN | NaN | NaN
|
|
|
|
-Infinity | -Infinity | NaN | NaN | NaN
|
|
|
|
NaN | -Infinity | NaN | NaN | NaN
|
|
|
|
0 | NaN | NaN | NaN | NaN
|
|
|
|
1 | NaN | NaN | NaN | NaN
|
|
|
|
-1 | NaN | NaN | NaN | NaN
|
|
|
|
4.2 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | NaN | NaN | NaN | NaN
|
|
|
|
-Infinity | NaN | NaN | NaN | NaN
|
|
|
|
NaN | NaN | NaN | NaN | NaN
|
|
|
|
(42 rows)
|
|
|
|
|
|
|
|
SELECT 'inf'::numeric / '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT '-inf'::numeric / '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT 'nan'::numeric / '0';
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT '0'::numeric / '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT 'inf'::numeric % '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT '-inf'::numeric % '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT 'nan'::numeric % '0';
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT '0'::numeric % '0';
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT div('inf'::numeric, '0');
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT div('-inf'::numeric, '0');
|
|
|
|
ERROR: division by zero
|
|
|
|
SELECT div('nan'::numeric, '0');
|
|
|
|
div
|
|
|
|
-----
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT div('0'::numeric, '0');
|
|
|
|
ERROR: division by zero
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT x, -x as minusx, abs(x), floor(x), ceil(x), sign(x), numeric_inc(x) as inc
|
|
|
|
FROM v;
|
|
|
|
x | minusx | abs | floor | ceil | sign | inc
|
|
|
|
-----------+-----------+----------+-----------+-----------+------+-----------
|
|
|
|
0 | 0 | 0 | 0 | 0 | 0 | 1
|
|
|
|
1 | -1 | 1 | 1 | 1 | 1 | 2
|
|
|
|
-1 | 1 | 1 | -1 | -1 | -1 | 0
|
|
|
|
4.2 | -4.2 | 4.2 | 4 | 5 | 1 | 5.2
|
|
|
|
-7.777 | 7.777 | 7.777 | -8 | -7 | -1 | -6.777
|
|
|
|
Infinity | -Infinity | Infinity | Infinity | Infinity | 1 | Infinity
|
|
|
|
-Infinity | Infinity | Infinity | -Infinity | -Infinity | -1 | -Infinity
|
|
|
|
NaN | NaN | NaN | NaN | NaN | NaN | NaN
|
|
|
|
(8 rows)
|
|
|
|
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT x, round(x), round(x,1) as round1, trunc(x), trunc(x,1) as trunc1
|
|
|
|
FROM v;
|
|
|
|
x | round | round1 | trunc | trunc1
|
|
|
|
-----------+-----------+-----------+-----------+-----------
|
|
|
|
0 | 0 | 0.0 | 0 | 0.0
|
|
|
|
1 | 1 | 1.0 | 1 | 1.0
|
|
|
|
-1 | -1 | -1.0 | -1 | -1.0
|
|
|
|
4.2 | 4 | 4.2 | 4 | 4.2
|
|
|
|
-7.777 | -8 | -7.8 | -7 | -7.7
|
|
|
|
Infinity | Infinity | Infinity | Infinity | Infinity
|
|
|
|
-Infinity | -Infinity | -Infinity | -Infinity | -Infinity
|
|
|
|
NaN | NaN | NaN | NaN | NaN
|
|
|
|
(8 rows)
|
|
|
|
|
|
|
|
-- the large values fall into the numeric abbreviation code's maximal classes
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'),
|
|
|
|
('inf'),('-inf'),('nan'),
|
|
|
|
('inf'),('-inf'),('nan'))
|
|
|
|
SELECT substring(x::text, 1, 32)
|
|
|
|
FROM v ORDER BY x;
|
|
|
|
substring
|
|
|
|
----------------------------------
|
|
|
|
-Infinity
|
|
|
|
-Infinity
|
|
|
|
-1000000000000000000000000000000
|
|
|
|
-7.777
|
|
|
|
-1
|
|
|
|
0
|
|
|
|
1
|
|
|
|
4.2
|
|
|
|
10000000000000000000000000000000
|
|
|
|
Infinity
|
|
|
|
Infinity
|
|
|
|
NaN
|
|
|
|
NaN
|
|
|
|
(13 rows)
|
|
|
|
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('4.2'),('inf'),('nan'))
|
|
|
|
SELECT x, sqrt(x)
|
|
|
|
FROM v;
|
|
|
|
x | sqrt
|
|
|
|
----------+-------------------
|
|
|
|
0 | 0.000000000000000
|
|
|
|
1 | 1.000000000000000
|
|
|
|
4.2 | 2.049390153191920
|
|
|
|
Infinity | Infinity
|
|
|
|
NaN | NaN
|
|
|
|
(5 rows)
|
|
|
|
|
|
|
|
SELECT sqrt('-1'::numeric);
|
|
|
|
ERROR: cannot take square root of a negative number
|
|
|
|
SELECT sqrt('-inf'::numeric);
|
|
|
|
ERROR: cannot take square root of a negative number
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('1'::numeric),('4.2'),('inf'),('nan'))
|
|
|
|
SELECT x,
|
|
|
|
log(x),
|
|
|
|
log10(x),
|
|
|
|
ln(x)
|
|
|
|
FROM v;
|
|
|
|
x | log | log10 | ln
|
|
|
|
----------+--------------------+--------------------+--------------------
|
|
|
|
1 | 0.0000000000000000 | 0.0000000000000000 | 0.0000000000000000
|
|
|
|
4.2 | 0.6232492903979005 | 0.6232492903979005 | 1.4350845252893226
|
|
|
|
Infinity | Infinity | Infinity | Infinity
|
|
|
|
NaN | NaN | NaN | NaN
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT ln('0'::numeric);
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
SELECT ln('-1'::numeric);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
SELECT ln('-inf'::numeric);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('2'::numeric),('4.2'),('inf'),('nan'))
|
|
|
|
SELECT x1, x2,
|
|
|
|
log(x1, x2)
|
|
|
|
FROM v AS v1(x1), v AS v2(x2);
|
|
|
|
x1 | x2 | log
|
|
|
|
----------+----------+--------------------
|
|
|
|
2 | 2 | 1.0000000000000000
|
|
|
|
2 | 4.2 | 2.0703893278913979
|
|
|
|
2 | Infinity | Infinity
|
|
|
|
2 | NaN | NaN
|
|
|
|
4.2 | 2 | 0.4830009440873890
|
|
|
|
4.2 | 4.2 | 1.0000000000000000
|
|
|
|
4.2 | Infinity | Infinity
|
|
|
|
4.2 | NaN | NaN
|
|
|
|
Infinity | 2 | 0
|
|
|
|
Infinity | 4.2 | 0
|
|
|
|
Infinity | Infinity | NaN
|
|
|
|
Infinity | NaN | NaN
|
|
|
|
NaN | 2 | NaN
|
|
|
|
NaN | 4.2 | NaN
|
|
|
|
NaN | Infinity | NaN
|
|
|
|
NaN | NaN | NaN
|
|
|
|
(16 rows)
|
|
|
|
|
|
|
|
SELECT log('0'::numeric, '10');
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
SELECT log('10'::numeric, '0');
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
SELECT log('-inf'::numeric, '10');
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
SELECT log('10'::numeric, '-inf');
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
SELECT log('inf'::numeric, '0');
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
SELECT log('inf'::numeric, '-inf');
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
SELECT log('-inf'::numeric, 'inf');
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
WITH v(x) AS
|
|
|
|
(VALUES('0'::numeric),('1'),('2'),('4.2'),('inf'),('nan'))
|
|
|
|
SELECT x1, x2,
|
|
|
|
power(x1, x2)
|
|
|
|
FROM v AS v1(x1), v AS v2(x2) WHERE x1 != 0 OR x2 >= 0;
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
x1 | x2 | power
|
|
|
|
----------+----------+--------------------
|
|
|
|
0 | 0 | 1.0000000000000000
|
|
|
|
0 | 1 | 0.0000000000000000
|
|
|
|
0 | 2 | 0.0000000000000000
|
|
|
|
0 | 4.2 | 0.0000000000000000
|
|
|
|
0 | Infinity | 0
|
|
|
|
0 | NaN | NaN
|
|
|
|
1 | 0 | 1.0000000000000000
|
|
|
|
1 | 1 | 1.0000000000000000
|
|
|
|
1 | 2 | 1.0000000000000000
|
|
|
|
1 | 4.2 | 1.0000000000000000
|
|
|
|
1 | Infinity | 1
|
|
|
|
1 | NaN | 1
|
|
|
|
2 | 0 | 1.0000000000000000
|
|
|
|
2 | 1 | 2.0000000000000000
|
|
|
|
2 | 2 | 4.0000000000000000
|
|
|
|
2 | 4.2 | 18.379173679952560
|
|
|
|
2 | Infinity | Infinity
|
|
|
|
2 | NaN | NaN
|
|
|
|
4.2 | 0 | 1.0000000000000000
|
|
|
|
4.2 | 1 | 4.2000000000000000
|
|
|
|
4.2 | 2 | 17.640000000000000
|
|
|
|
4.2 | 4.2 | 414.61691860129675
|
|
|
|
4.2 | Infinity | Infinity
|
|
|
|
4.2 | NaN | NaN
|
|
|
|
Infinity | 0 | 1
|
|
|
|
Infinity | 1 | Infinity
|
|
|
|
Infinity | 2 | Infinity
|
|
|
|
Infinity | 4.2 | Infinity
|
|
|
|
Infinity | Infinity | Infinity
|
|
|
|
Infinity | NaN | NaN
|
|
|
|
NaN | 0 | 1
|
|
|
|
NaN | 1 | NaN
|
|
|
|
NaN | 2 | NaN
|
|
|
|
NaN | 4.2 | NaN
|
|
|
|
NaN | Infinity | NaN
|
|
|
|
NaN | NaN | NaN
|
2020-07-23 01:19:44 +02:00
|
|
|
(36 rows)
|
|
|
|
|
|
|
|
SELECT power('0'::numeric, '-1');
|
|
|
|
ERROR: zero raised to a negative power is undefined
|
|
|
|
SELECT power('0'::numeric, '-inf');
|
|
|
|
ERROR: zero raised to a negative power is undefined
|
|
|
|
SELECT power('-1'::numeric, 'inf');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-2'::numeric, '3');
|
|
|
|
power
|
|
|
|
---------------------
|
|
|
|
-8.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-2'::numeric, '3.3');
|
|
|
|
ERROR: a negative number raised to a non-integer power yields a complex result
|
|
|
|
SELECT power('-2'::numeric, '-1');
|
|
|
|
power
|
|
|
|
---------------------
|
|
|
|
-0.5000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-2'::numeric, '-1.5');
|
|
|
|
ERROR: a negative number raised to a non-integer power yields a complex result
|
|
|
|
SELECT power('-2'::numeric, 'inf');
|
|
|
|
power
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-2'::numeric, '-inf');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('inf'::numeric, '-2');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('inf'::numeric, '-inf');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '2');
|
|
|
|
power
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '3');
|
|
|
|
power
|
|
|
|
-----------
|
|
|
|
-Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '4.5');
|
|
|
|
ERROR: a negative number raised to a non-integer power yields a complex result
|
|
|
|
SELECT power('-inf'::numeric, '-2');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '-3');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '0');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, 'inf');
|
|
|
|
power
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT power('-inf'::numeric, '-inf');
|
|
|
|
power
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
2000-01-16 00:44:17 +01:00
|
|
|
-- ******************************
|
|
|
|
-- * miscellaneous checks for things that have been broken in the past...
|
|
|
|
-- ******************************
|
|
|
|
-- numeric AVG used to fail on some platforms
|
|
|
|
SELECT AVG(val) FROM num_data;
|
2003-03-21 02:58:05 +01:00
|
|
|
avg
|
|
|
|
------------------------
|
|
|
|
-13430913.592242320700
|
2000-01-16 00:44:17 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-09-09 09:58:12 +02:00
|
|
|
SELECT MAX(val) FROM num_data;
|
|
|
|
max
|
|
|
|
--------------------
|
|
|
|
7799461.4119000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT MIN(val) FROM num_data;
|
|
|
|
min
|
|
|
|
----------------------
|
|
|
|
-83028485.0000000000
|
|
|
|
(1 row)
|
|
|
|
|
2004-03-11 03:11:14 +01:00
|
|
|
SELECT STDDEV(val) FROM num_data;
|
|
|
|
stddev
|
|
|
|
-------------------------------
|
|
|
|
27791203.28758835329805617386
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT VARIANCE(val) FROM num_data;
|
|
|
|
variance
|
|
|
|
--------------------------------------
|
|
|
|
772350980172061.69659105821915863601
|
|
|
|
(1 row)
|
|
|
|
|
2000-01-16 00:44:17 +01:00
|
|
|
-- Check for appropriate rounding and overflow
|
|
|
|
CREATE TABLE fract_only (id int, val numeric(4,4));
|
|
|
|
INSERT INTO fract_only VALUES (1, '0.0');
|
|
|
|
INSERT INTO fract_only VALUES (2, '0.1');
|
|
|
|
INSERT INTO fract_only VALUES (3, '1.0'); -- should fail
|
2003-07-27 06:53:12 +02:00
|
|
|
ERROR: numeric field overflow
|
2006-10-03 23:25:56 +02:00
|
|
|
DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1.
|
2000-01-16 00:44:17 +01:00
|
|
|
INSERT INTO fract_only VALUES (4, '-0.9999');
|
|
|
|
INSERT INTO fract_only VALUES (5, '0.99994');
|
|
|
|
INSERT INTO fract_only VALUES (6, '0.99995'); -- should fail
|
2003-07-27 06:53:12 +02:00
|
|
|
ERROR: numeric field overflow
|
2006-10-03 23:25:56 +02:00
|
|
|
DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1.
|
2000-01-16 00:44:17 +01:00
|
|
|
INSERT INTO fract_only VALUES (7, '0.00001');
|
|
|
|
INSERT INTO fract_only VALUES (8, '0.00017');
|
2020-07-23 01:19:44 +02:00
|
|
|
INSERT INTO fract_only VALUES (9, 'NaN');
|
|
|
|
INSERT INTO fract_only VALUES (10, 'Inf'); -- should fail
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 4, scale 4 cannot hold an infinite value.
|
|
|
|
INSERT INTO fract_only VALUES (11, '-Inf'); -- should fail
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 4, scale 4 cannot hold an infinite value.
|
2000-01-16 00:44:17 +01:00
|
|
|
SELECT * FROM fract_only;
|
|
|
|
id | val
|
|
|
|
----+---------
|
|
|
|
1 | 0.0000
|
|
|
|
2 | 0.1000
|
|
|
|
4 | -0.9999
|
|
|
|
5 | 0.9999
|
|
|
|
7 | 0.0000
|
|
|
|
8 | 0.0002
|
2020-07-23 01:19:44 +02:00
|
|
|
9 | NaN
|
|
|
|
(7 rows)
|
2000-01-16 00:44:17 +01:00
|
|
|
|
|
|
|
DROP TABLE fract_only;
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
-- Check conversion to integers
|
|
|
|
SELECT (-9223372036854775808.5)::int8; -- should fail
|
|
|
|
ERROR: bigint out of range
|
|
|
|
SELECT (-9223372036854775808.4)::int8; -- ok
|
|
|
|
int8
|
|
|
|
----------------------
|
|
|
|
-9223372036854775808
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 9223372036854775807.4::int8; -- ok
|
|
|
|
int8
|
|
|
|
---------------------
|
|
|
|
9223372036854775807
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 9223372036854775807.5::int8; -- should fail
|
|
|
|
ERROR: bigint out of range
|
|
|
|
SELECT (-2147483648.5)::int4; -- should fail
|
|
|
|
ERROR: integer out of range
|
|
|
|
SELECT (-2147483648.4)::int4; -- ok
|
|
|
|
int4
|
|
|
|
-------------
|
|
|
|
-2147483648
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2147483647.4::int4; -- ok
|
|
|
|
int4
|
|
|
|
------------
|
|
|
|
2147483647
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2147483647.5::int4; -- should fail
|
|
|
|
ERROR: integer out of range
|
|
|
|
SELECT (-32768.5)::int2; -- should fail
|
|
|
|
ERROR: smallint out of range
|
|
|
|
SELECT (-32768.4)::int2; -- ok
|
|
|
|
int2
|
|
|
|
--------
|
|
|
|
-32768
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 32767.4::int2; -- ok
|
|
|
|
int2
|
|
|
|
-------
|
|
|
|
32767
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 32767.5::int2; -- should fail
|
|
|
|
ERROR: smallint out of range
|
2017-09-27 23:05:53 +02:00
|
|
|
-- Check inf/nan conversion behavior
|
|
|
|
SELECT 'NaN'::float8::numeric;
|
|
|
|
numeric
|
|
|
|
---------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'Infinity'::float8::numeric;
|
2020-07-23 01:19:44 +02:00
|
|
|
numeric
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
2017-09-27 23:05:53 +02:00
|
|
|
SELECT '-Infinity'::float8::numeric;
|
2020-07-23 01:19:44 +02:00
|
|
|
numeric
|
|
|
|
-----------
|
|
|
|
-Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'NaN'::numeric::float8;
|
|
|
|
float8
|
|
|
|
--------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'Infinity'::numeric::float8;
|
|
|
|
float8
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT '-Infinity'::numeric::float8;
|
|
|
|
float8
|
|
|
|
-----------
|
|
|
|
-Infinity
|
|
|
|
(1 row)
|
|
|
|
|
2017-09-27 23:05:53 +02:00
|
|
|
SELECT 'NaN'::float4::numeric;
|
|
|
|
numeric
|
|
|
|
---------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'Infinity'::float4::numeric;
|
2020-07-23 01:19:44 +02:00
|
|
|
numeric
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
2017-09-27 23:05:53 +02:00
|
|
|
SELECT '-Infinity'::float4::numeric;
|
2020-07-23 01:19:44 +02:00
|
|
|
numeric
|
|
|
|
-----------
|
|
|
|
-Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'NaN'::numeric::float4;
|
|
|
|
float4
|
|
|
|
--------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'Infinity'::numeric::float4;
|
|
|
|
float4
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT '-Infinity'::numeric::float4;
|
|
|
|
float4
|
|
|
|
-----------
|
|
|
|
-Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT '42'::int2::numeric;
|
|
|
|
numeric
|
|
|
|
---------
|
|
|
|
42
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 'NaN'::numeric::int2;
|
|
|
|
ERROR: cannot convert NaN to smallint
|
|
|
|
SELECT 'Infinity'::numeric::int2;
|
|
|
|
ERROR: cannot convert infinity to smallint
|
|
|
|
SELECT '-Infinity'::numeric::int2;
|
|
|
|
ERROR: cannot convert infinity to smallint
|
|
|
|
SELECT 'NaN'::numeric::int4;
|
|
|
|
ERROR: cannot convert NaN to integer
|
|
|
|
SELECT 'Infinity'::numeric::int4;
|
|
|
|
ERROR: cannot convert infinity to integer
|
|
|
|
SELECT '-Infinity'::numeric::int4;
|
|
|
|
ERROR: cannot convert infinity to integer
|
|
|
|
SELECT 'NaN'::numeric::int8;
|
|
|
|
ERROR: cannot convert NaN to bigint
|
|
|
|
SELECT 'Infinity'::numeric::int8;
|
|
|
|
ERROR: cannot convert infinity to bigint
|
|
|
|
SELECT '-Infinity'::numeric::int8;
|
|
|
|
ERROR: cannot convert infinity to bigint
|
2004-04-23 22:32:20 +02:00
|
|
|
-- Simple check that ceil(), floor(), and round() work correctly
|
|
|
|
CREATE TABLE ceil_floor_round (a numeric);
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('-5.5');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('-5.499999');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('9.5');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('9.4999999');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('0.0');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('0.0000001');
|
|
|
|
INSERT INTO ceil_floor_round VALUES ('-0.000001');
|
|
|
|
SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round;
|
|
|
|
a | ceil | ceiling | floor | round
|
|
|
|
-----------+------+---------+-------+-------
|
|
|
|
-5.5 | -5 | -5 | -6 | -6
|
|
|
|
-5.499999 | -5 | -5 | -6 | -5
|
|
|
|
9.5 | 10 | 10 | 9 | 10
|
|
|
|
9.4999999 | 10 | 10 | 9 | 9
|
|
|
|
0.0 | 0 | 0 | 0 | 0
|
|
|
|
0.0000001 | 1 | 1 | 0 | 0
|
|
|
|
-0.000001 | 0 | 0 | -1 | 0
|
|
|
|
(7 rows)
|
|
|
|
|
|
|
|
DROP TABLE ceil_floor_round;
|
2015-07-03 23:04:39 +02:00
|
|
|
-- Check rounding, it should round ties away from zero.
|
|
|
|
SELECT i as pow,
|
|
|
|
round((-2.5 * 10 ^ i)::numeric, -i),
|
|
|
|
round((-1.5 * 10 ^ i)::numeric, -i),
|
|
|
|
round((-0.5 * 10 ^ i)::numeric, -i),
|
|
|
|
round((0.5 * 10 ^ i)::numeric, -i),
|
|
|
|
round((1.5 * 10 ^ i)::numeric, -i),
|
|
|
|
round((2.5 * 10 ^ i)::numeric, -i)
|
|
|
|
FROM generate_series(-5,5) AS t(i);
|
|
|
|
pow | round | round | round | round | round | round
|
|
|
|
-----+----------+----------+----------+---------+---------+---------
|
|
|
|
-5 | -0.00003 | -0.00002 | -0.00001 | 0.00001 | 0.00002 | 0.00003
|
|
|
|
-4 | -0.0003 | -0.0002 | -0.0001 | 0.0001 | 0.0002 | 0.0003
|
|
|
|
-3 | -0.003 | -0.002 | -0.001 | 0.001 | 0.002 | 0.003
|
|
|
|
-2 | -0.03 | -0.02 | -0.01 | 0.01 | 0.02 | 0.03
|
|
|
|
-1 | -0.3 | -0.2 | -0.1 | 0.1 | 0.2 | 0.3
|
|
|
|
0 | -3 | -2 | -1 | 1 | 2 | 3
|
|
|
|
1 | -30 | -20 | -10 | 10 | 20 | 30
|
|
|
|
2 | -300 | -200 | -100 | 100 | 200 | 300
|
|
|
|
3 | -3000 | -2000 | -1000 | 1000 | 2000 | 3000
|
|
|
|
4 | -30000 | -20000 | -10000 | 10000 | 20000 | 30000
|
|
|
|
5 | -300000 | -200000 | -100000 | 100000 | 200000 | 300000
|
|
|
|
(11 rows)
|
|
|
|
|
2007-01-16 22:41:14 +01:00
|
|
|
-- Testing for width_bucket(). For convenience, we test both the
|
|
|
|
-- numeric and float8 versions of the function in this file.
|
2004-05-14 23:42:30 +02:00
|
|
|
-- errors
|
|
|
|
SELECT width_bucket(5.0, 3.0, 4.0, 0);
|
|
|
|
ERROR: count must be greater than zero
|
|
|
|
SELECT width_bucket(5.0, 3.0, 4.0, -5);
|
|
|
|
ERROR: count must be greater than zero
|
2007-01-16 22:41:14 +01:00
|
|
|
SELECT width_bucket(3.5, 3.0, 3.0, 888);
|
|
|
|
ERROR: lower bound cannot equal upper bound
|
|
|
|
SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
|
|
|
|
ERROR: count must be greater than zero
|
|
|
|
SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
|
|
|
|
ERROR: count must be greater than zero
|
|
|
|
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
|
2004-05-14 23:42:30 +02:00
|
|
|
ERROR: lower bound cannot equal upper bound
|
2007-01-16 22:41:14 +01:00
|
|
|
SELECT width_bucket('NaN', 3.0, 4.0, 888);
|
2012-02-24 10:04:45 +01:00
|
|
|
ERROR: operand, lower bound, and upper bound cannot be NaN
|
2007-01-16 22:41:14 +01:00
|
|
|
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
|
2012-02-24 10:04:45 +01:00
|
|
|
ERROR: operand, lower bound, and upper bound cannot be NaN
|
2020-07-23 01:19:44 +02:00
|
|
|
SELECT width_bucket(2.0, 3.0, '-inf', 888);
|
2020-10-08 18:37:59 +02:00
|
|
|
ERROR: lower and upper bounds must be finite
|
2020-07-23 01:19:44 +02:00
|
|
|
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
|
|
|
|
ERROR: lower and upper bounds must be finite
|
2004-05-14 23:42:30 +02:00
|
|
|
-- normal operation
|
2007-01-16 22:41:14 +01:00
|
|
|
CREATE TABLE width_bucket_test (operand_num numeric, operand_f8 float8);
|
|
|
|
COPY width_bucket_test (operand_num) FROM stdin;
|
|
|
|
UPDATE width_bucket_test SET operand_f8 = operand_num::float8;
|
2004-05-14 23:42:30 +02:00
|
|
|
SELECT
|
2007-01-17 17:19:08 +01:00
|
|
|
operand_num,
|
2007-01-16 22:41:14 +01:00
|
|
|
width_bucket(operand_num, 0, 10, 5) AS wb_1,
|
|
|
|
width_bucket(operand_f8, 0, 10, 5) AS wb_1f,
|
|
|
|
width_bucket(operand_num, 10, 0, 5) AS wb_2,
|
|
|
|
width_bucket(operand_f8, 10, 0, 5) AS wb_2f,
|
|
|
|
width_bucket(operand_num, 2, 8, 4) AS wb_3,
|
|
|
|
width_bucket(operand_f8, 2, 8, 4) AS wb_3f,
|
|
|
|
width_bucket(operand_num, 5.0, 5.5, 20) AS wb_4,
|
|
|
|
width_bucket(operand_f8, 5.0, 5.5, 20) AS wb_4f,
|
|
|
|
width_bucket(operand_num, -25, 25, 10) AS wb_5,
|
|
|
|
width_bucket(operand_f8, -25, 25, 10) AS wb_5f
|
2004-05-14 23:42:30 +02:00
|
|
|
FROM width_bucket_test;
|
2007-01-17 17:19:08 +01:00
|
|
|
operand_num | wb_1 | wb_1f | wb_2 | wb_2f | wb_3 | wb_3f | wb_4 | wb_4f | wb_5 | wb_5f
|
|
|
|
------------------+------+-------+------+-------+------+-------+------+-------+------+-------
|
|
|
|
-5.2 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 4 | 4
|
|
|
|
-0.0000000001 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 5 | 5
|
|
|
|
0.000000000001 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
|
|
|
|
1 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
|
|
|
|
1.99999999999999 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
|
|
|
|
2 | 2 | 2 | 5 | 5 | 1 | 1 | 0 | 0 | 6 | 6
|
|
|
|
2.00000000000001 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6
|
|
|
|
3 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6
|
|
|
|
4 | 3 | 3 | 4 | 4 | 2 | 2 | 0 | 0 | 6 | 6
|
|
|
|
4.5 | 3 | 3 | 3 | 3 | 2 | 2 | 0 | 0 | 6 | 6
|
|
|
|
5 | 3 | 3 | 3 | 3 | 3 | 3 | 1 | 1 | 7 | 7
|
|
|
|
5.5 | 3 | 3 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7
|
|
|
|
6 | 4 | 4 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7
|
|
|
|
7 | 4 | 4 | 2 | 2 | 4 | 4 | 21 | 21 | 7 | 7
|
|
|
|
8 | 5 | 5 | 2 | 2 | 5 | 5 | 21 | 21 | 7 | 7
|
|
|
|
9 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7
|
|
|
|
9.99999999999999 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7
|
|
|
|
10 | 6 | 6 | 1 | 1 | 5 | 5 | 21 | 21 | 8 | 8
|
|
|
|
10.0000000000001 | 6 | 6 | 0 | 0 | 5 | 5 | 21 | 21 | 8 | 8
|
2007-01-16 22:41:14 +01:00
|
|
|
(19 rows)
|
|
|
|
|
2020-10-08 18:37:59 +02:00
|
|
|
-- Check positive and negative infinity: we require
|
2007-01-16 22:41:14 +01:00
|
|
|
-- finite bucket bounds, but allow an infinite operand
|
2020-10-08 18:37:59 +02:00
|
|
|
SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error
|
|
|
|
ERROR: lower and upper bounds must be finite
|
|
|
|
SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error
|
|
|
|
ERROR: lower and upper bounds must be finite
|
|
|
|
SELECT width_bucket('Infinity'::numeric, 1, 10, 10),
|
|
|
|
width_bucket('-Infinity'::numeric, 1, 10, 10);
|
|
|
|
width_bucket | width_bucket
|
|
|
|
--------------+--------------
|
|
|
|
11 | 0
|
|
|
|
(1 row)
|
|
|
|
|
2007-01-16 22:41:14 +01:00
|
|
|
SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error
|
|
|
|
ERROR: lower and upper bounds must be finite
|
|
|
|
SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error
|
|
|
|
ERROR: lower and upper bounds must be finite
|
|
|
|
SELECT width_bucket('Infinity'::float8, 1, 10, 10),
|
|
|
|
width_bucket('-Infinity'::float8, 1, 10, 10);
|
|
|
|
width_bucket | width_bucket
|
|
|
|
--------------+--------------
|
|
|
|
11 | 0
|
|
|
|
(1 row)
|
2004-05-14 23:42:30 +02:00
|
|
|
|
|
|
|
DROP TABLE width_bucket_test;
|
2020-10-08 19:06:27 +02:00
|
|
|
-- Simple test for roundoff error when results should be exact
|
|
|
|
SELECT x, width_bucket(x::float8, 10, 100, 9) as flt,
|
|
|
|
width_bucket(x::numeric, 10, 100, 9) as num
|
|
|
|
FROM generate_series(0, 110, 10) x;
|
|
|
|
x | flt | num
|
|
|
|
-----+-----+-----
|
|
|
|
0 | 0 | 0
|
|
|
|
10 | 1 | 1
|
|
|
|
20 | 2 | 2
|
|
|
|
30 | 3 | 3
|
|
|
|
40 | 4 | 4
|
|
|
|
50 | 5 | 5
|
|
|
|
60 | 6 | 6
|
|
|
|
70 | 7 | 7
|
|
|
|
80 | 8 | 8
|
|
|
|
90 | 9 | 9
|
|
|
|
100 | 10 | 10
|
|
|
|
110 | 10 | 10
|
|
|
|
(12 rows)
|
|
|
|
|
|
|
|
SELECT x, width_bucket(x::float8, 100, 10, 9) as flt,
|
|
|
|
width_bucket(x::numeric, 100, 10, 9) as num
|
|
|
|
FROM generate_series(0, 110, 10) x;
|
|
|
|
x | flt | num
|
|
|
|
-----+-----+-----
|
|
|
|
0 | 10 | 10
|
|
|
|
10 | 10 | 10
|
|
|
|
20 | 9 | 9
|
|
|
|
30 | 8 | 8
|
|
|
|
40 | 7 | 7
|
|
|
|
50 | 6 | 6
|
|
|
|
60 | 5 | 5
|
|
|
|
70 | 4 | 4
|
|
|
|
80 | 3 | 3
|
|
|
|
90 | 2 | 2
|
|
|
|
100 | 1 | 1
|
|
|
|
110 | 0 | 0
|
|
|
|
(12 rows)
|
|
|
|
|
Avoid overflow in width_bucket_float8().
The original coding of this function paid little attention to the
possibility of overflow. There were actually three different hazards:
1. The range from bound1 to bound2 could exceed DBL_MAX, which on
IEEE-compliant machines produces +Infinity in the subtraction.
At best we'd lose all precision in the result, and at worst
produce NaN due to dividing Inf/Inf. The range can't exceed
twice DBL_MAX though, so we can fix this case by scaling all the
inputs by 0.5.
2. We computed count * (operand - bound1), which is also at risk of
float overflow, before dividing. Safer is to do the division first,
producing a quotient that should be in [0,1), and even after allowing
for roundoff error can't be outside [0,1]; then multiplying by count
can't produce a result overflowing an int. (width_bucket_numeric does
the multiplication first on the grounds that that improves accuracy of
its result, but I don't think that a similar argument can be made in
float arithmetic.)
3. If the division result does round to 1, and count is INT_MAX,
the final addition of 1 would overflow an int. We took care
of that in the operand >= bound2 case but did not consider that
it could be possible in the main path. Fix that by moving the
overflow-aware addition of 1 so it is done that way in all cases.
The fix for point 2 creates a possibility that values very close to
a bucket boundary will be rounded differently than they were before.
I'm not troubled by that for HEAD, but it is an argument against
putting this into the stable branches. Given that the cases being
fixed here are fairly extreme and unlikely to be hit in normal use,
it seems best not to back-patch.
Mats Kindahl and Tom Lane
Discussion: https://postgr.es/m/17876-61f280d1601f978d@postgresql.org
2023-03-30 17:27:36 +02:00
|
|
|
-- Check cases that could trigger overflow or underflow within the calculation
|
|
|
|
SELECT oper, low, high, cnt, width_bucket(oper, low, high, cnt)
|
|
|
|
FROM
|
|
|
|
(SELECT 1.797e+308::float8 AS big, 5e-324::float8 AS tiny) as v,
|
|
|
|
LATERAL (VALUES
|
|
|
|
(10.5::float8, -big, big, 1),
|
|
|
|
(10.5::float8, -big, big, 2),
|
|
|
|
(10.5::float8, -big, big, 3),
|
|
|
|
(big / 4, -big / 2, big / 2, 10),
|
|
|
|
(10.5::float8, big, -big, 1),
|
|
|
|
(10.5::float8, big, -big, 2),
|
|
|
|
(10.5::float8, big, -big, 3),
|
|
|
|
(big / 4, big / 2, -big / 2, 10),
|
|
|
|
(0, 0, tiny, 4),
|
|
|
|
(tiny, 0, tiny, 4),
|
|
|
|
(0, 0, 1, 2147483647),
|
|
|
|
(1, 1, 0, 2147483647)
|
|
|
|
) as sample(oper, low, high, cnt);
|
|
|
|
oper | low | high | cnt | width_bucket
|
|
|
|
-------------+-------------+-------------+------------+--------------
|
|
|
|
10.5 | -1.797e+308 | 1.797e+308 | 1 | 1
|
|
|
|
10.5 | -1.797e+308 | 1.797e+308 | 2 | 2
|
|
|
|
10.5 | -1.797e+308 | 1.797e+308 | 3 | 2
|
|
|
|
4.4925e+307 | -8.985e+307 | 8.985e+307 | 10 | 8
|
|
|
|
10.5 | 1.797e+308 | -1.797e+308 | 1 | 1
|
|
|
|
10.5 | 1.797e+308 | -1.797e+308 | 2 | 2
|
|
|
|
10.5 | 1.797e+308 | -1.797e+308 | 3 | 2
|
|
|
|
4.4925e+307 | 8.985e+307 | -8.985e+307 | 10 | 3
|
|
|
|
0 | 0 | 5e-324 | 4 | 1
|
|
|
|
5e-324 | 0 | 5e-324 | 4 | 5
|
|
|
|
0 | 0 | 1 | 2147483647 | 1
|
|
|
|
1 | 1 | 0 | 2147483647 | 1
|
|
|
|
(12 rows)
|
|
|
|
|
|
|
|
-- These fail because the result would be out of int32 range:
|
|
|
|
SELECT width_bucket(1::float8, 0, 1, 2147483647);
|
|
|
|
ERROR: integer out of range
|
|
|
|
SELECT width_bucket(0::float8, 1, 0, 2147483647);
|
|
|
|
ERROR: integer out of range
|
2020-10-08 19:06:27 +02:00
|
|
|
--
|
2000-03-08 02:34:41 +01:00
|
|
|
-- TO_CHAR()
|
|
|
|
--
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '9G999G999G999G999G999')
|
2000-03-08 02:34:41 +01:00
|
|
|
FROM num_data;
|
2020-12-15 21:54:06 +01:00
|
|
|
to_char
|
|
|
|
------------------------
|
|
|
|
0
|
|
|
|
0
|
|
|
|
-34,338,492
|
|
|
|
4
|
|
|
|
7,799,461
|
|
|
|
16,397
|
|
|
|
93,902
|
|
|
|
-83,028,485
|
|
|
|
74,881
|
|
|
|
-24,926,804
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '9G999G999G999G999G999D999G999G999G999G999')
|
2007-01-16 22:41:14 +01:00
|
|
|
FROM num_data;
|
2020-12-15 21:54:06 +01:00
|
|
|
to_char
|
|
|
|
--------------------------------------------
|
|
|
|
.000,000,000,000,000
|
|
|
|
.000,000,000,000,000
|
|
|
|
-34,338,492.215,397,047,000,000
|
|
|
|
4.310,000,000,000,000
|
|
|
|
7,799,461.411,900,000,000,000
|
|
|
|
16,397.038,491,000,000,000
|
|
|
|
93,901.577,630,260,000,000
|
|
|
|
-83,028,485.000,000,000,000,000
|
|
|
|
74,881.000,000,000,000,000
|
|
|
|
-24,926,804.045,047,420,000,000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '9999999999999999.999999999999999PR')
|
2000-03-08 02:34:41 +01:00
|
|
|
FROM num_data;
|
2020-12-15 21:54:06 +01:00
|
|
|
to_char
|
|
|
|
------------------------------------
|
|
|
|
.000000000000000
|
|
|
|
.000000000000000
|
|
|
|
<34338492.215397047000000>
|
|
|
|
4.310000000000000
|
|
|
|
7799461.411900000000000
|
|
|
|
16397.038491000000000
|
|
|
|
93901.577630260000000
|
|
|
|
<83028485.000000000000000>
|
|
|
|
74881.000000000000000
|
|
|
|
<24926804.045047420000000>
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '9999999999999999.999999999999999S')
|
2000-03-08 02:34:41 +01:00
|
|
|
FROM num_data;
|
2020-12-15 21:54:06 +01:00
|
|
|
to_char
|
|
|
|
-----------------------------------
|
|
|
|
.000000000000000+
|
|
|
|
.000000000000000+
|
|
|
|
34338492.215397047000000-
|
|
|
|
4.310000000000000+
|
|
|
|
7799461.411900000000000+
|
|
|
|
16397.038491000000000+
|
|
|
|
93901.577630260000000+
|
|
|
|
83028485.000000000000000-
|
|
|
|
74881.000000000000000+
|
|
|
|
24926804.045047420000000-
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------
|
|
|
|
.000000000000000
|
|
|
|
.000000000000000
|
|
|
|
- 34338492.215397047000000
|
|
|
|
4.310000000000000
|
|
|
|
7799461.411900000000000
|
|
|
|
16397.038491000000000
|
|
|
|
93901.577630260000000
|
|
|
|
- 83028485.000000000000000
|
|
|
|
74881.000000000000000
|
|
|
|
- 24926804.045047420000000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
+0.
|
|
|
|
+0.
|
|
|
|
-34338492.215397047
|
|
|
|
+4.31
|
|
|
|
+7799461.4119
|
|
|
|
+16397.038491
|
|
|
|
+93901.57763026
|
|
|
|
-83028485.
|
|
|
|
+74881.
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data;
|
|
|
|
to_char
|
|
|
|
----------------------
|
|
|
|
0.
|
|
|
|
0.
|
|
|
|
<34338492.215397047>
|
|
|
|
4.31
|
|
|
|
7799461.4119
|
|
|
|
16397.038491
|
|
|
|
93901.57763026
|
|
|
|
<83028485.>
|
|
|
|
74881.
|
|
|
|
<24926804.04504742>
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------
|
|
|
|
+ .000000000000000
|
|
|
|
+ .000000000000000
|
|
|
|
- 34338492.215397047000000
|
|
|
|
+ 4.310000000000000
|
|
|
|
+ 7799461.411900000000000
|
|
|
|
+ 16397.038491000000000
|
|
|
|
+ 93901.577630260000000
|
|
|
|
- 83028485.000000000000000
|
|
|
|
+ 74881.000000000000000
|
|
|
|
- 24926804.045047420000000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '0999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------
|
|
|
|
0000000000000000.000000000000000
|
|
|
|
0000000000000000.000000000000000
|
|
|
|
-0000000034338492.215397047000000
|
|
|
|
0000000000000004.310000000000000
|
|
|
|
0000000007799461.411900000000000
|
|
|
|
0000000000016397.038491000000000
|
|
|
|
0000000000093901.577630260000000
|
|
|
|
-0000000083028485.000000000000000
|
|
|
|
0000000000074881.000000000000000
|
|
|
|
-0000000024926804.045047420000000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------
|
|
|
|
+0000000000000000.000000000000000
|
|
|
|
+0000000000000000.000000000000000
|
|
|
|
-0000000034338492.215397047000000
|
|
|
|
+0000000000000004.310000000000000
|
|
|
|
+0000000007799461.411900000000000
|
|
|
|
+0000000000016397.038491000000000
|
|
|
|
+0000000000093901.577630260000000
|
|
|
|
-0000000083028485.000000000000000
|
|
|
|
+0000000000074881.000000000000000
|
|
|
|
-0000000024926804.045047420000000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------
|
|
|
|
0000000000000000.
|
|
|
|
0000000000000000.
|
|
|
|
-0000000034338492.215397047
|
|
|
|
0000000000000004.31
|
|
|
|
0000000007799461.4119
|
|
|
|
0000000000016397.038491
|
|
|
|
0000000000093901.57763026
|
|
|
|
-0000000083028485.
|
|
|
|
0000000000074881.
|
|
|
|
-0000000024926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999999999999.099999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
.0
|
|
|
|
.0
|
|
|
|
-34338492.215397047
|
|
|
|
4.31
|
|
|
|
7799461.4119
|
|
|
|
16397.038491
|
|
|
|
93901.57763026
|
|
|
|
-83028485.0
|
|
|
|
74881.0
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999999990999.990999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
0000.000
|
|
|
|
0000.000
|
|
|
|
-34338492.215397047
|
|
|
|
0004.310
|
|
|
|
7799461.4119
|
|
|
|
16397.038491
|
|
|
|
93901.57763026
|
|
|
|
-83028485.000
|
|
|
|
74881.000
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM0999999999999999.999909999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------
|
|
|
|
0000000000000000.00000
|
|
|
|
0000000000000000.00000
|
|
|
|
-0000000034338492.215397047
|
|
|
|
0000000000000004.31000
|
|
|
|
0000000007799461.41190
|
|
|
|
0000000000016397.038491
|
|
|
|
0000000000093901.57763026
|
|
|
|
-0000000083028485.00000
|
|
|
|
0000000000074881.00000
|
|
|
|
-0000000024926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999990999999.099999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
0000000.0
|
|
|
|
0000000.0
|
|
|
|
-34338492.215397047
|
|
|
|
0000004.31
|
|
|
|
7799461.4119
|
|
|
|
0016397.038491
|
|
|
|
0093901.57763026
|
|
|
|
-83028485.0
|
|
|
|
0074881.0
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
------------------------------------
|
|
|
|
.000000000000000
|
|
|
|
.000000000000000
|
|
|
|
-34338492.215397047000000
|
|
|
|
4.310000000000000
|
|
|
|
7799461.411900000000000
|
|
|
|
16397.038491000000000
|
|
|
|
93901.577630260000000
|
|
|
|
-83028485.000000000000000
|
|
|
|
74881.000000000000000
|
|
|
|
-24926804.045047420000000
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999999999999.99999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
0.
|
|
|
|
0.
|
|
|
|
-34338492.215397047
|
|
|
|
4.31
|
|
|
|
7799461.4119
|
|
|
|
16397.038491
|
|
|
|
93901.57763026
|
|
|
|
-83028485.
|
|
|
|
74881.
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------------------------------------------
|
|
|
|
+. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
+. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
-3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7 0 0 0 0 0 0 0 0
|
|
|
|
+4 . 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
+7 7 9 9 4 6 1 . 4 1 1 9 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
+1 6 3 9 7 . 0 3 8 4 9 1 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
+9 3 9 0 1 . 5 7 7 6 3 0 2 6 0 0 0 0 0 0 0 0 0
|
|
|
|
-8 3 0 2 8 4 8 5 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
+7 4 8 8 1 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
|
|
|
-2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2 0 0 0 0 0 0 0 0 0
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-------------------------------------------------------
|
|
|
|
+0 .
|
|
|
|
+0 .
|
|
|
|
-3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7
|
|
|
|
+4 . 3 1
|
|
|
|
+7 7 9 9 4 6 1 . 4 1 1 9
|
|
|
|
+1 6 3 9 7 . 0 3 8 4 9 1
|
|
|
|
+9 3 9 0 1 . 5 7 7 6 3 0 2 6
|
|
|
|
-8 3 0 2 8 4 8 5 .
|
|
|
|
+7 4 8 8 1 .
|
|
|
|
-2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-----------------------------------------------------------
|
|
|
|
text 9999 "text between quote marks" 0
|
|
|
|
text 9999 "text between quote marks" 0
|
|
|
|
text -3 9999 433 "text between quote marks" 8492
|
|
|
|
text 9999 "text between quote marks" 4
|
|
|
|
text 9999 779 "text between quote marks" 9461
|
|
|
|
text 9999 1 "text between quote marks" 6397
|
|
|
|
text 9999 9 "text between quote marks" 3902
|
|
|
|
text -8 9999 302 "text between quote marks" 8485
|
|
|
|
text 9999 7 "text between quote marks" 4881
|
|
|
|
text -2 9999 492 "text between quote marks" 6804
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '999999SG9999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
-------------------
|
|
|
|
+ 0
|
|
|
|
+ 0
|
|
|
|
- 34338492
|
|
|
|
+ 4
|
|
|
|
+ 7799461
|
|
|
|
+ 16397
|
|
|
|
+ 93902
|
|
|
|
- 83028485
|
|
|
|
+ 74881
|
|
|
|
- 24926804
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, 'FM9999999999999999.999999999999999') FROM num_data;
|
|
|
|
to_char
|
|
|
|
---------------------
|
|
|
|
0.
|
|
|
|
0.
|
|
|
|
-34338492.215397047
|
|
|
|
4.31
|
|
|
|
7799461.4119
|
|
|
|
16397.038491
|
|
|
|
93901.57763026
|
|
|
|
-83028485.
|
|
|
|
74881.
|
|
|
|
-24926804.04504742
|
2000-03-08 02:34:41 +01:00
|
|
|
(10 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char(val, '9.999EEEE') FROM num_data;
|
|
|
|
to_char
|
|
|
|
------------
|
|
|
|
0.000e+00
|
|
|
|
0.000e+00
|
|
|
|
-3.434e+07
|
|
|
|
4.310e+00
|
|
|
|
7.799e+06
|
|
|
|
1.640e+04
|
|
|
|
9.390e+04
|
|
|
|
-8.303e+07
|
|
|
|
7.488e+04
|
|
|
|
-2.493e+07
|
2009-08-10 20:29:27 +02:00
|
|
|
(10 rows)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
WITH v(val) AS
|
|
|
|
(VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT val,
|
|
|
|
to_char(val, '9.999EEEE') as numeric,
|
|
|
|
to_char(val::float8, '9.999EEEE') as float8,
|
|
|
|
to_char(val::float4, '9.999EEEE') as float4
|
|
|
|
FROM v;
|
|
|
|
val | numeric | float8 | float4
|
|
|
|
------------+------------+------------+------------
|
|
|
|
0 | 0.000e+00 | 0.000e+00 | 0.000e+00
|
|
|
|
-4.2 | -4.200e+00 | -4.200e+00 | -4.200e+00
|
|
|
|
4200000000 | 4.200e+09 | 4.200e+09 | 4.200e+09
|
|
|
|
0.000012 | 1.200e-05 | 1.200e-05 | 1.200e-05
|
|
|
|
Infinity | #.####### | #.####### | #.#######
|
|
|
|
-Infinity | #.####### | #.####### | #.#######
|
|
|
|
NaN | #.####### | #.####### | #.#######
|
|
|
|
(7 rows)
|
|
|
|
|
Fix division-by-zero error in to_char() with 'EEEE' format.
This fixes a long-standing bug when using to_char() to format a
numeric value in scientific notation -- if the value's exponent is
less than -NUMERIC_MAX_DISPLAY_SCALE-1 (-1001), it produced a
division-by-zero error.
The reason for this error was that get_str_from_var_sci() divides its
input by 10^exp, which it produced using power_var_int(). However, the
underflow test in power_var_int() causes it to return zero if the
result scale is too small. That's not a problem for power_var_int()'s
only other caller, power_var(), since that limits the rscale to 1000,
but in get_str_from_var_sci() the exponent can be much smaller,
requiring a much larger rscale. Fix by introducing a new function to
compute 10^exp directly, with no rscale limit. This also allows 10^exp
to be computed more efficiently, without any numeric multiplication,
division or rounding.
Discussion: https://postgr.es/m/CAEZATCWhojfH4whaqgUKBe8D5jNHB8ytzemL-PnRx+KCTyMXmg@mail.gmail.com
2021-08-05 10:24:11 +02:00
|
|
|
WITH v(exp) AS
|
|
|
|
(VALUES(-16379),(-16378),(-1234),(-789),(-45),(-5),(-4),(-3),(-2),(-1),(0),
|
|
|
|
(1),(2),(3),(4),(5),(38),(275),(2345),(45678),(131070),(131071))
|
|
|
|
SELECT exp,
|
|
|
|
to_char(('1.2345e'||exp)::numeric, '9.999EEEE') as numeric
|
|
|
|
FROM v;
|
|
|
|
exp | numeric
|
|
|
|
--------+----------------
|
|
|
|
-16379 | 1.235e-16379
|
|
|
|
-16378 | 1.235e-16378
|
|
|
|
-1234 | 1.235e-1234
|
|
|
|
-789 | 1.235e-789
|
|
|
|
-45 | 1.235e-45
|
|
|
|
-5 | 1.235e-05
|
|
|
|
-4 | 1.235e-04
|
|
|
|
-3 | 1.235e-03
|
|
|
|
-2 | 1.235e-02
|
|
|
|
-1 | 1.235e-01
|
|
|
|
0 | 1.235e+00
|
|
|
|
1 | 1.235e+01
|
|
|
|
2 | 1.235e+02
|
|
|
|
3 | 1.235e+03
|
|
|
|
4 | 1.235e+04
|
|
|
|
5 | 1.235e+05
|
|
|
|
38 | 1.235e+38
|
|
|
|
275 | 1.235e+275
|
|
|
|
2345 | 1.235e+2345
|
|
|
|
45678 | 1.235e+45678
|
|
|
|
131070 | 1.235e+131070
|
|
|
|
131071 | 1.235e+131071
|
|
|
|
(22 rows)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
WITH v(val) AS
|
|
|
|
(VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT val,
|
|
|
|
to_char(val, 'MI9999999999.99') as numeric,
|
|
|
|
to_char(val::float8, 'MI9999999999.99') as float8,
|
|
|
|
to_char(val::float4, 'MI9999999999.99') as float4
|
|
|
|
FROM v;
|
|
|
|
val | numeric | float8 | float4
|
|
|
|
------------+----------------+----------------+----------------
|
|
|
|
0 | .00 | .00 | .00
|
|
|
|
-4.2 | - 4.20 | - 4.20 | - 4.20
|
|
|
|
4200000000 | 4200000000.00 | 4200000000.00 | 4200000000
|
|
|
|
0.000012 | .00 | .00 | .00
|
|
|
|
Infinity | Infinity | Infinity | Infinity
|
|
|
|
-Infinity | - Infinity | - Infinity | - Infinity
|
|
|
|
NaN | NaN | NaN | NaN
|
|
|
|
(7 rows)
|
|
|
|
|
|
|
|
WITH v(val) AS
|
|
|
|
(VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
|
|
|
|
SELECT val,
|
|
|
|
to_char(val, 'MI99.99') as numeric,
|
|
|
|
to_char(val::float8, 'MI99.99') as float8,
|
|
|
|
to_char(val::float4, 'MI99.99') as float4
|
|
|
|
FROM v;
|
|
|
|
val | numeric | float8 | float4
|
|
|
|
------------+---------+--------+--------
|
|
|
|
0 | .00 | .00 | .00
|
|
|
|
-4.2 | - 4.20 | - 4.20 | - 4.20
|
|
|
|
4200000000 | ##.## | ##.## | ##.
|
|
|
|
0.000012 | .00 | .00 | .00
|
|
|
|
Infinity | ##.## | ##.## | ##.
|
|
|
|
-Infinity | -##.## | -##.## | -##.
|
|
|
|
NaN | ##.## | ##.## | ##.##
|
|
|
|
(7 rows)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'FM999.9');
|
|
|
|
to_char
|
|
|
|
---------
|
|
|
|
100.
|
2011-09-07 23:06:10 +02:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'FM999.');
|
|
|
|
to_char
|
|
|
|
---------
|
|
|
|
100
|
2011-09-07 23:06:10 +02:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'FM999');
|
|
|
|
to_char
|
|
|
|
---------
|
|
|
|
100
|
2011-09-07 23:06:10 +02:00
|
|
|
(1 row)
|
|
|
|
|
2023-03-15 00:17:31 +01:00
|
|
|
SELECT to_char('12345678901'::float8, 'FM9999999999D9999900000000000000000');
|
|
|
|
to_char
|
|
|
|
-----------------
|
|
|
|
##########.####
|
|
|
|
(1 row)
|
|
|
|
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
-- Check parsing of literal text in a format string
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'foo999');
|
|
|
|
to_char
|
|
|
|
---------
|
|
|
|
foo 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f\oo999');
|
|
|
|
to_char
|
|
|
|
----------
|
|
|
|
f\oo 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f\\oo999');
|
|
|
|
to_char
|
|
|
|
-----------
|
|
|
|
f\\oo 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f\"oo999');
|
|
|
|
to_char
|
|
|
|
----------
|
|
|
|
f"oo 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f\\"oo999');
|
|
|
|
to_char
|
|
|
|
-----------
|
|
|
|
f\"oo 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f"ool"999');
|
|
|
|
to_char
|
|
|
|
----------
|
|
|
|
fool 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f"\ool"999');
|
|
|
|
to_char
|
|
|
|
----------
|
|
|
|
fool 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f"\\ool"999');
|
|
|
|
to_char
|
|
|
|
-----------
|
|
|
|
f\ool 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f"ool\"999');
|
|
|
|
to_char
|
|
|
|
----------
|
|
|
|
fool"999
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_char('100'::numeric, 'f"ool\\"999');
|
|
|
|
to_char
|
|
|
|
-----------
|
|
|
|
fool\ 100
|
Fix quoted-substring handling in format parsing for to_char/to_number/etc.
This code evidently intended to treat backslash as an escape character
within double-quoted substrings, but it was sufficiently confused that
cases like ..."foo\\"... did not work right: the second backslash
managed to quote the double-quote after it, despite being quoted itself.
Rewrite to get that right, while preserving the existing behavior
outside double-quoted substrings, which is that backslash isn't special
except in the combination \".
Comparing to Oracle, it seems that their version of to_char() for
timestamps allows literal alphanumerics only within double quotes, while
non-alphanumerics are allowed outside quotes; backslashes aren't special
anywhere; there is no way at all to emit a literal double quote.
(Bizarrely, their to_char() for numbers is different; it doesn't allow
literal text at all AFAICT.) The fact that they don't treat backslash
as special justifies our existing behavior for backslash outside double
quotes. I considered making backslash inside double quotes act the same
way (ie, special only if before "), which in a green field would be a
more consistent behavior. But that would likely break more existing SQL
code than what this patch does.
Add some test cases illustrating this behavior. (Only the last new
case actually changes behavior in this commit.)
Little of this behavior was documented, either, so fix that.
Discussion: https://postgr.es/m/3626.1510949486@sss.pgh.pa.us
2017-11-18 18:16:37 +01:00
|
|
|
(1 row)
|
|
|
|
|
2000-03-08 02:34:41 +01:00
|
|
|
-- TO_NUMBER()
|
|
|
|
--
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
SET lc_numeric = 'C';
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('-34,338,492', '99G999G999');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-34338492
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('-34,338,492.654,878', '99G999G999D999G999');
|
|
|
|
to_number
|
|
|
|
------------------
|
|
|
|
-34338492.654878
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('<564646.654564>', '999999.999999PR');
|
|
|
|
to_number
|
|
|
|
----------------
|
|
|
|
-564646.654564
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('0.00001-', '9.999999S');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-0.00001
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('5.01-', 'FM9.999999S');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-5.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('5.01-', 'FM9.999999MI');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-5.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('5 4 4 4 4 8 . 7 8', '9 9 9 9 9 9 . 9 9');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
544448.78
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('.01', 'FM9.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
0.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('.0', '99999999.99999999');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
0.0
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('0', '99.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
0
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('.-01', 'S99.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-0.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('.01-', '99.99S');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-0.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number(' . 0 1-', ' 9 9 . 9 9 S');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
-0.01
|
2000-03-08 02:34:41 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('34,50','999,99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
3450
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('123,000','999G');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
123
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('123456','999G999');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
123456
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('$1234.56','L9,999.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
1234.56
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('$1234.56','L99,999.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
1234.56
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('$1,234.56','L99,999.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
1234.56
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('1234.56','L99,999.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
1234.56
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('1,234.56','L99,999.99');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
1234.56
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
2020-12-15 21:54:06 +01:00
|
|
|
SELECT to_number('42nd', '99th');
|
|
|
|
to_number
|
|
|
|
-----------
|
|
|
|
42
|
Prevent to_number() from losing data when template doesn't match exactly.
Non-data template patterns would consume characters whether or not those
characters were what the pattern expected, for example
SELECT TO_NUMBER('1234', '9,999');
produced 134 because the '2' got eaten by the comma pattern. This seems
undesirable, not least because it doesn't happen in Oracle. For the ','
and 'G' template patterns, we can fix this by consuming characters only
if they match what the pattern would output. For non-data patterns such
as 'L' and 'TH', it seems impractical to tighten things up to the point of
consuming only exact matches to what the pattern would output; but we can
improve matters quite a lot by redefining the behavior as "consume only
characters that aren't digits, signs, decimal point, or comma".
Also, fix it so that the behavior is to consume the number of *characters*
the pattern would output, not the number of *bytes*. The old coding would
do surprising things with non-ASCII currency symbols, for example. (It
would be good to apply that rule for literal text as well, but this commit
only fixes it for non-data patterns.)
Oliver Ford, reviewed by Thomas Munro and Nathan Wagner, and whacked around
a bit more by me
Discussion: https://postgr.es/m/CAGMVOdvpbMqPf9XWNzOwBpzJfErkydr_fEGhmuDGa015z97mwg@mail.gmail.com
2017-11-17 18:04:06 +01:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
RESET lc_numeric;
|
2004-03-11 03:11:14 +01:00
|
|
|
--
|
|
|
|
-- Input syntax
|
|
|
|
--
|
|
|
|
CREATE TABLE num_input_test (n1 numeric);
|
|
|
|
-- good inputs
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' 123');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' 3245874 ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' -93853');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('555.50');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('-555.50');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('NaN ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' nan');
|
2020-07-23 01:19:44 +02:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' inf ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' +inf ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' -inf ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' Infinity ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' +inFinity ');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' -INFINITY ');
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('12_000_000_000');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('12_000.123_456');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('23_000_000_000e-1_0');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('.000_000_000_123e1_0');
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('.000_000_000_123e+1_1');
|
2023-01-23 20:21:22 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0b10001110111100111100001001010');
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' -0B_1010_1011_0101_0100_1010_1001_1000_1100_1110_1011_0001_1111_0000_1010_1101_0010 ');
|
2023-01-23 20:21:22 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' +0o112402761777 ');
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('-0O0012_5524_5230_6334_3167_0261');
|
2023-01-23 20:21:22 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('-0x0000000000000000000000000deadbeef');
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' 0X_30b1_F33a_6DF0_bD4E_64DF_9BdA_7D15 ');
|
2004-03-11 03:11:14 +01:00
|
|
|
-- bad inputs
|
|
|
|
INSERT INTO num_input_test(n1) VALUES (' ');
|
|
|
|
ERROR: invalid input syntax for type numeric: " "
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES (' ');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' 1234 %');
|
|
|
|
ERROR: invalid input syntax for type numeric: " 1234 %"
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES (' 1234 %');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('xyz');
|
|
|
|
ERROR: invalid input syntax for type numeric: "xyz"
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('xyz');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('- 1234');
|
|
|
|
ERROR: invalid input syntax for type numeric: "- 1234"
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('- 1234');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('5 . 0');
|
|
|
|
ERROR: invalid input syntax for type numeric: "5 . 0"
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('5 . 0');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('5. 0 ');
|
|
|
|
ERROR: invalid input syntax for type numeric: "5. 0 "
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('5. 0 ');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('');
|
|
|
|
ERROR: invalid input syntax for type numeric: ""
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES (' N aN ');
|
|
|
|
ERROR: invalid input syntax for type numeric: " N aN "
|
2008-09-01 22:42:46 +02:00
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES (' N aN ');
|
|
|
|
^
|
2023-01-23 20:21:22 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('+NaN');
|
|
|
|
ERROR: invalid input syntax for type numeric: "+NaN"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('+NaN');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('-NaN');
|
|
|
|
ERROR: invalid input syntax for type numeric: "-NaN"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('-NaN');
|
|
|
|
^
|
2020-07-23 01:19:44 +02:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('+ infinity');
|
|
|
|
ERROR: invalid input syntax for type numeric: "+ infinity"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('+ infinity');
|
|
|
|
^
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('_123');
|
|
|
|
ERROR: invalid input syntax for type numeric: "_123"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('_123');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('123_');
|
|
|
|
ERROR: invalid input syntax for type numeric: "123_"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('123_');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('12__34');
|
|
|
|
ERROR: invalid input syntax for type numeric: "12__34"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('12__34');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('123_.456');
|
|
|
|
ERROR: invalid input syntax for type numeric: "123_.456"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('123_.456');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('123._456');
|
|
|
|
ERROR: invalid input syntax for type numeric: "123._456"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('123._456');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('1.2e_34');
|
|
|
|
ERROR: invalid input syntax for type numeric: "1.2e_34"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e_34');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('1.2e34_');
|
|
|
|
ERROR: invalid input syntax for type numeric: "1.2e34_"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e34_');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('1.2e3__4');
|
|
|
|
ERROR: invalid input syntax for type numeric: "1.2e3__4"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e3__4');
|
|
|
|
^
|
2023-01-23 20:21:22 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0b1112');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0b1112"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0b1112');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0c1112');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0c1112"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0c1112');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0o12345678');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0o12345678"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0o12345678');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0x1eg');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0x1eg"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x1eg');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0x12.34');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0x12.34"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x12.34');
|
|
|
|
^
|
2023-02-04 10:48:51 +01:00
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0x__1234');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0x__1234"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x__1234');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0x1234_');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0x1234_"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x1234_');
|
|
|
|
^
|
|
|
|
INSERT INTO num_input_test(n1) VALUES ('0x12__34');
|
|
|
|
ERROR: invalid input syntax for type numeric: "0x12__34"
|
|
|
|
LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x12__34');
|
|
|
|
^
|
2004-03-11 03:11:14 +01:00
|
|
|
SELECT * FROM num_input_test;
|
2023-01-23 20:21:22 +01:00
|
|
|
n1
|
|
|
|
-----------------------------------
|
|
|
|
123
|
|
|
|
3245874
|
|
|
|
-93853
|
|
|
|
555.50
|
|
|
|
-555.50
|
|
|
|
NaN
|
|
|
|
NaN
|
|
|
|
Infinity
|
|
|
|
Infinity
|
|
|
|
-Infinity
|
|
|
|
Infinity
|
|
|
|
Infinity
|
|
|
|
-Infinity
|
2023-02-04 10:48:51 +01:00
|
|
|
12000000000
|
|
|
|
12000.123456
|
|
|
|
2.3000000000
|
|
|
|
1.23
|
|
|
|
12.3
|
2023-01-23 20:21:22 +01:00
|
|
|
299792458
|
|
|
|
-12345678901234567890
|
|
|
|
9999999999
|
|
|
|
-12345678900987654321
|
|
|
|
-3735928559
|
|
|
|
987654321234567898765432123456789
|
2023-02-04 10:48:51 +01:00
|
|
|
(24 rows)
|
2004-03-11 03:11:14 +01:00
|
|
|
|
Convert a few datatype input functions to use "soft" error reporting.
This patch converts the input functions for bool, int2, int4, int8,
float4, float8, numeric, and contrib/cube to the new soft-error style.
array_in and record_in are also converted. There's lots more to do,
but this is enough to provide proof-of-concept that the soft-error
API is usable, as well as reference examples for how to convert
input functions.
This patch is mostly by me, but it owes very substantial debt to
earlier work by Nikita Glukhov, Andrew Dunstan, and Amul Sul.
Thanks to Andres Freund for review.
Discussion: https://postgr.es/m/3bbbb0df-7382-bf87-9737-340ba096e034@postgrespro.ru
2022-12-09 16:14:53 +01:00
|
|
|
-- Also try it with non-error-throwing API
|
|
|
|
SELECT pg_input_is_valid('34.5', 'numeric');
|
|
|
|
pg_input_is_valid
|
|
|
|
-------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_input_is_valid('34xyz', 'numeric');
|
|
|
|
pg_input_is_valid
|
|
|
|
-------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_input_is_valid('1e400000', 'numeric');
|
|
|
|
pg_input_is_valid
|
|
|
|
-------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
2023-02-28 00:04:13 +01:00
|
|
|
SELECT * FROM pg_input_error_info('1e400000', 'numeric');
|
|
|
|
message | detail | hint | sql_error_code
|
|
|
|
--------------------------------+--------+------+----------------
|
|
|
|
value overflows numeric format | | | 22003
|
Convert a few datatype input functions to use "soft" error reporting.
This patch converts the input functions for bool, int2, int4, int8,
float4, float8, numeric, and contrib/cube to the new soft-error style.
array_in and record_in are also converted. There's lots more to do,
but this is enough to provide proof-of-concept that the soft-error
API is usable, as well as reference examples for how to convert
input functions.
This patch is mostly by me, but it owes very substantial debt to
earlier work by Nikita Glukhov, Andrew Dunstan, and Amul Sul.
Thanks to Andres Freund for review.
Discussion: https://postgr.es/m/3bbbb0df-7382-bf87-9737-340ba096e034@postgrespro.ru
2022-12-09 16:14:53 +01:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_input_is_valid('1234.567', 'numeric(8,4)');
|
|
|
|
pg_input_is_valid
|
|
|
|
-------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_input_is_valid('1234.567', 'numeric(7,4)');
|
|
|
|
pg_input_is_valid
|
|
|
|
-------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
2023-02-28 00:04:13 +01:00
|
|
|
SELECT * FROM pg_input_error_info('1234.567', 'numeric(7,4)');
|
|
|
|
message | detail | hint | sql_error_code
|
|
|
|
------------------------+-----------------------------------------------------------------------------------+------+----------------
|
|
|
|
numeric field overflow | A field with precision 7, scale 4 must round to an absolute value less than 10^3. | | 22003
|
Convert a few datatype input functions to use "soft" error reporting.
This patch converts the input functions for bool, int2, int4, int8,
float4, float8, numeric, and contrib/cube to the new soft-error style.
array_in and record_in are also converted. There's lots more to do,
but this is enough to provide proof-of-concept that the soft-error
API is usable, as well as reference examples for how to convert
input functions.
This patch is mostly by me, but it owes very substantial debt to
earlier work by Nikita Glukhov, Andrew Dunstan, and Amul Sul.
Thanks to Andres Freund for review.
Discussion: https://postgr.es/m/3bbbb0df-7382-bf87-9737-340ba096e034@postgrespro.ru
2022-12-09 16:14:53 +01:00
|
|
|
(1 row)
|
|
|
|
|
2023-02-28 00:04:13 +01:00
|
|
|
SELECT * FROM pg_input_error_info('0x1234.567', 'numeric');
|
|
|
|
message | detail | hint | sql_error_code
|
|
|
|
-----------------------------------------------------+--------+------+----------------
|
|
|
|
invalid input syntax for type numeric: "0x1234.567" | | | 22P02
|
2023-01-23 20:21:22 +01:00
|
|
|
(1 row)
|
|
|
|
|
Allow numeric scale to be negative or greater than precision.
Formerly, when specifying NUMERIC(precision, scale), the scale had to
be in the range [0, precision], which was per SQL spec. This commit
extends the range of allowed scales to [-1000, 1000], independent of
the precision (whose valid range remains [1, 1000]).
A negative scale implies rounding before the decimal point. For
example, a column might be declared with a scale of -3 to round values
to the nearest thousand. Note that the display scale remains
non-negative, so in this case the display scale will be zero, and all
digits before the decimal point will be displayed.
A scale greater than the precision supports fractional values with
zeros immediately after the decimal point.
Take the opportunity to tidy up the code that packs, unpacks and
validates the contents of a typmod integer, encapsulating it in a
small set of new inline functions.
Bump the catversion because the allowed contents of atttypmod have
changed for numeric columns. This isn't a change that requires a
re-initdb, but negative scale values in the typmod would confuse old
backends.
Dean Rasheed, with additional improvements by Tom Lane. Reviewed by
Tom Lane.
Discussion: https://postgr.es/m/CAEZATCWdNLgpKihmURF8nfofP0RFtAKJ7ktY6GcZOPnMfUoRqA@mail.gmail.com
2021-07-26 15:13:47 +02:00
|
|
|
--
|
|
|
|
-- Test precision and scale typemods
|
|
|
|
--
|
|
|
|
CREATE TABLE num_typemod_test (
|
|
|
|
millions numeric(3, -6),
|
|
|
|
thousands numeric(3, -3),
|
|
|
|
units numeric(3, 0),
|
|
|
|
thousandths numeric(3, 3),
|
|
|
|
millionths numeric(3, 6)
|
|
|
|
);
|
|
|
|
\d num_typemod_test
|
|
|
|
Table "public.num_typemod_test"
|
|
|
|
Column | Type | Collation | Nullable | Default
|
|
|
|
-------------+---------------+-----------+----------+---------
|
|
|
|
millions | numeric(3,-6) | | |
|
|
|
|
thousands | numeric(3,-3) | | |
|
|
|
|
units | numeric(3,0) | | |
|
|
|
|
thousandths | numeric(3,3) | | |
|
|
|
|
millionths | numeric(3,6) | | |
|
|
|
|
|
|
|
|
-- rounding of valid inputs
|
|
|
|
INSERT INTO num_typemod_test VALUES (123456, 123, 0.123, 0.000123, 0.000000123);
|
|
|
|
INSERT INTO num_typemod_test VALUES (654321, 654, 0.654, 0.000654, 0.000000654);
|
|
|
|
INSERT INTO num_typemod_test VALUES (2345678, 2345, 2.345, 0.002345, 0.000002345);
|
|
|
|
INSERT INTO num_typemod_test VALUES (7654321, 7654, 7.654, 0.007654, 0.000007654);
|
|
|
|
INSERT INTO num_typemod_test VALUES (12345678, 12345, 12.345, 0.012345, 0.000012345);
|
|
|
|
INSERT INTO num_typemod_test VALUES (87654321, 87654, 87.654, 0.087654, 0.000087654);
|
|
|
|
INSERT INTO num_typemod_test VALUES (123456789, 123456, 123.456, 0.123456, 0.000123456);
|
|
|
|
INSERT INTO num_typemod_test VALUES (987654321, 987654, 987.654, 0.987654, 0.000987654);
|
|
|
|
INSERT INTO num_typemod_test VALUES ('NaN', 'NaN', 'NaN', 'NaN', 'NaN');
|
|
|
|
SELECT scale(millions), * FROM num_typemod_test ORDER BY millions;
|
|
|
|
scale | millions | thousands | units | thousandths | millionths
|
|
|
|
-------+-----------+-----------+-------+-------------+------------
|
|
|
|
0 | 0 | 0 | 0 | 0.000 | 0.000000
|
|
|
|
0 | 1000000 | 1000 | 1 | 0.001 | 0.000001
|
|
|
|
0 | 2000000 | 2000 | 2 | 0.002 | 0.000002
|
|
|
|
0 | 8000000 | 8000 | 8 | 0.008 | 0.000008
|
|
|
|
0 | 12000000 | 12000 | 12 | 0.012 | 0.000012
|
|
|
|
0 | 88000000 | 88000 | 88 | 0.088 | 0.000088
|
|
|
|
0 | 123000000 | 123000 | 123 | 0.123 | 0.000123
|
|
|
|
0 | 988000000 | 988000 | 988 | 0.988 | 0.000988
|
|
|
|
| NaN | NaN | NaN | NaN | NaN
|
|
|
|
(9 rows)
|
|
|
|
|
|
|
|
-- invalid inputs
|
|
|
|
INSERT INTO num_typemod_test (millions) VALUES ('inf');
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale -6 cannot hold an infinite value.
|
|
|
|
INSERT INTO num_typemod_test (millions) VALUES (999500000);
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale -6 must round to an absolute value less than 10^9.
|
|
|
|
INSERT INTO num_typemod_test (thousands) VALUES (999500);
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale -3 must round to an absolute value less than 10^6.
|
|
|
|
INSERT INTO num_typemod_test (units) VALUES (999.5);
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale 0 must round to an absolute value less than 10^3.
|
|
|
|
INSERT INTO num_typemod_test (thousandths) VALUES (0.9995);
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale 3 must round to an absolute value less than 1.
|
|
|
|
INSERT INTO num_typemod_test (millionths) VALUES (0.0009995);
|
|
|
|
ERROR: numeric field overflow
|
|
|
|
DETAIL: A field with precision 3, scale 6 must round to an absolute value less than 10^-3.
|
2015-09-21 18:11:32 +02:00
|
|
|
--
|
|
|
|
-- Test some corner cases for multiplication
|
|
|
|
--
|
|
|
|
select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
|
|
|
|
?column?
|
|
|
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
47909999999999999999999999999999999999999999999999999999999999999999999999999999999999985209000000000000000000000000000000000000000000000000000000000000000000000000000000000001
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
|
|
|
|
?column?
|
|
|
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
47899999999999999999999999999999999999999999999999999999999999999999999999999999999999985210000000000000000000000000000000000000000000000000000000000000000000000000000000000001
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
|
|
|
|
?column?
|
|
|
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
47709999999999999999999999999999999999999999999999999999999999999999999999999999999999985229000000000000000000000000000000000000000000000000000000000000000000000000000000000001
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
|
|
|
|
?column?
|
|
|
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
47699999999999999999999999999999999999999999999999999999999999999999999999999999999999985230000000000000000000000000000000000000000000000000000000000000000000000000000000000001
|
|
|
|
(1 row)
|
|
|
|
|
2021-07-10 13:42:59 +02:00
|
|
|
select trim_scale((0.1 - 2e-16383) * (0.1 - 3e-16383));
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
0.01
|
|
|
|
(1 row)
|
|
|
|
|
2008-04-04 20:45:36 +02:00
|
|
|
--
|
|
|
|
-- Test some corner cases for division
|
|
|
|
--
|
|
|
|
select 999999999999999999999::numeric/1000000000000000000000;
|
|
|
|
?column?
|
|
|
|
------------------------
|
|
|
|
1.00000000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div(999999999999999999999::numeric,1000000000000000000000);
|
|
|
|
div
|
|
|
|
-----
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select mod(999999999999999999999::numeric,1000000000000000000000);
|
|
|
|
mod
|
|
|
|
-----------------------
|
|
|
|
999999999999999999999
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div(-9999999999999999999999::numeric,1000000000000000000000);
|
|
|
|
div
|
|
|
|
-----
|
|
|
|
-9
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select mod(-9999999999999999999999::numeric,1000000000000000000000);
|
|
|
|
mod
|
|
|
|
------------------------
|
|
|
|
-999999999999999999999
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div(-9999999999999999999999::numeric,1000000000000000000000)*1000000000000000000000 + mod(-9999999999999999999999::numeric,1000000000000000000000);
|
|
|
|
?column?
|
|
|
|
-------------------------
|
|
|
|
-9999999999999999999999
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select mod (70.0,70) ;
|
|
|
|
mod
|
|
|
|
-----
|
|
|
|
0.0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div (70.0,70) ;
|
|
|
|
div
|
|
|
|
-----
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 70.0 / 70 ;
|
|
|
|
?column?
|
|
|
|
------------------------
|
|
|
|
1.00000000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 12345678901234567890 % 123;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
78
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 12345678901234567890 / 123;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
100371373180768845
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div(12345678901234567890, 123);
|
|
|
|
div
|
|
|
|
--------------------
|
|
|
|
100371373180768844
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select div(12345678901234567890, 123) * 123 + 12345678901234567890 % 123;
|
|
|
|
?column?
|
|
|
|
----------------------
|
|
|
|
12345678901234567890
|
|
|
|
(1 row)
|
|
|
|
|
Improve the performance and accuracy of numeric sqrt() and ln().
Instead of using Newton's method to compute numeric square roots, use
the Karatsuba square root algorithm, which performs better for numbers
of all sizes. In practice, this is 3-5 times faster for inputs with
just a few digits and up to around 10 times faster for larger inputs.
Also, the new algorithm guarantees that the final digit of the result
is correctly rounded, since it computes an integer square root with
truncation, containing at least 1 extra decimal digit before rounding.
The former algorithm would occasionally round the wrong way because
it rounded both the intermediate and final results.
In addition, arrange for sqrt_var() to explicitly support negative
rscale values (rounding before the decimal point). This allows the
argument reduction phase of ln_var() to be optimised for large inputs,
since it only needs to compute square roots with a few more digits
than the final ln() result, rather than computing all the digits
before the decimal point. For very large inputs, this can be many
thousands of times faster.
In passing, optimise div_var_fast() in a couple of places where it was
doing unnecessary work.
Patch be me, reviewed by Tom Lane and Tels.
Discussion: https://postgr.es/m/CAEZATCV1A7+jD3P30Zu31KjaxeSEyOn3v9d6tYegpxcq3cQu-g@mail.gmail.com
2020-03-28 15:37:53 +01:00
|
|
|
--
|
|
|
|
-- Test some corner cases for square root
|
|
|
|
--
|
|
|
|
select sqrt(1.000000000000003::numeric);
|
|
|
|
sqrt
|
|
|
|
-------------------
|
|
|
|
1.000000000000001
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(1.000000000000004::numeric);
|
|
|
|
sqrt
|
|
|
|
-------------------
|
|
|
|
1.000000000000002
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(96627521408608.56340355805::numeric);
|
|
|
|
sqrt
|
|
|
|
---------------------
|
|
|
|
9829929.87811248648
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(96627521408608.56340355806::numeric);
|
|
|
|
sqrt
|
|
|
|
---------------------
|
|
|
|
9829929.87811248649
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(515549506212297735.073688290367::numeric);
|
|
|
|
sqrt
|
|
|
|
------------------------
|
|
|
|
718017761.766585921184
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(515549506212297735.073688290368::numeric);
|
|
|
|
sqrt
|
|
|
|
------------------------
|
|
|
|
718017761.766585921185
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(8015491789940783531003294973900306::numeric);
|
|
|
|
sqrt
|
|
|
|
-------------------
|
|
|
|
89529278953540017
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select sqrt(8015491789940783531003294973900307::numeric);
|
|
|
|
sqrt
|
|
|
|
-------------------
|
|
|
|
89529278953540018
|
|
|
|
(1 row)
|
|
|
|
|
2014-09-12 05:30:51 +02:00
|
|
|
--
|
|
|
|
-- Test code path for raising to integer powers
|
|
|
|
--
|
|
|
|
select 10.0 ^ -2147483648 as rounds_to_zero;
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
rounds_to_zero
|
|
|
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
|
2014-09-12 05:30:51 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 10.0 ^ -2147483647 as rounds_to_zero;
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
rounds_to_zero
|
|
|
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
|
2014-09-12 05:30:51 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 10.0 ^ 2147483647 as overflows;
|
|
|
|
ERROR: value overflows numeric format
|
|
|
|
select 117743296169.0 ^ 1000000000 as overflows;
|
|
|
|
ERROR: value overflows numeric format
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
-- cases that used to return inaccurate results
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
select 3.789 ^ 21.0000000000000000;
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
?column?
|
|
|
|
--------------------------------
|
|
|
|
1409343026052.8716016316022141
|
|
|
|
(1 row)
|
|
|
|
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
select 3.789 ^ 35.0000000000000000;
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
?column?
|
|
|
|
----------------------------------------
|
|
|
|
177158169650516670809.3820586142670135
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 1.2 ^ 345;
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
?column?
|
|
|
|
--------------------------------
|
|
|
|
2077446682327378559843444695.6
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 0.12 ^ (-20);
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
?column?
|
|
|
|
------------------------
|
|
|
|
2608405330458882702.55
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
(1 row)
|
|
|
|
|
2021-01-05 12:15:28 +01:00
|
|
|
select 1.000000000123 ^ (-2147483648);
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
0.7678656556403084
|
|
|
|
(1 row)
|
|
|
|
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
select coalesce(nullif(0.9999999999 ^ 23300000000000, 0), 0) as rounds_to_zero;
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
rounds_to_zero
|
|
|
|
----------------
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
0
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
(1 row)
|
|
|
|
|
Fix corner-case loss of precision in numeric_power().
This fixes a loss of precision that occurs when the first input is
very close to 1, so that its logarithm is very small.
Formerly, during the initial low-precision calculation to estimate the
result weight, the logarithm was computed to a local rscale that was
capped to NUMERIC_MAX_DISPLAY_SCALE (1000). However, the base may be
as close as 1e-16383 to 1, hence its logarithm may be as small as
1e-16383, and so the local rscale needs to be allowed to exceed 16383,
otherwise all precision is lost, leading to a poor choice of rscale
for the full-precision calculation.
Fix this by removing the cap on the local rscale during the initial
low-precision calculation, as we already do in the full-precision
calculation. This doesn't change the fact that the initial calculation
is a low-precision approximation, computing the logarithm to around 8
significant digits, which is very fast, especially when the base is
very close to 1.
Patch by me, reviewed by Alvaro Herrera.
Discussion: https://postgr.es/m/CAEZATCV-Ceu%2BHpRMf416yUe4KKFv%3DtdgXQAe5-7S9tD%3D5E-T1g%40mail.gmail.com
2021-10-06 14:16:51 +02:00
|
|
|
select round(((1 - 1.500012345678e-1000) ^ 1.45e1003) * 1e1000);
|
|
|
|
round
|
|
|
|
----------------------------------------------------------
|
|
|
|
25218976308958387188077465658068501556514992509509282366
|
|
|
|
(1 row)
|
|
|
|
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
-- cases that used to error out
|
|
|
|
select 0.12 ^ (-25);
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
?column?
|
|
|
|
-----------------------------
|
|
|
|
104825960103961013959336.50
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 0.5678 ^ (-85);
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
?column?
|
|
|
|
----------------------------
|
|
|
|
782333637740774446257.7719
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
(1 row)
|
|
|
|
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
select coalesce(nullif(0.9999999999 ^ 70000000000000, 0), 0) as underflows;
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
underflows
|
|
|
|
------------
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
0
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- negative base to integer powers
|
|
|
|
select (-1.0) ^ 2147483646;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select (-1.0) ^ 2147483647;
|
|
|
|
?column?
|
|
|
|
---------------------
|
|
|
|
-1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select (-1.0) ^ 2147483648;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select (-1.0) ^ 1000000000000000;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select (-1.0) ^ 1000000000000001;
|
|
|
|
?column?
|
|
|
|
---------------------
|
|
|
|
-1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
-- integer powers of 10
|
|
|
|
select n, 10.0 ^ n as "10^n", (10.0 ^ n) * (10.0 ^ (-n)) = 1 as ok
|
|
|
|
from generate_series(-20, 20) n;
|
|
|
|
n | 10^n | ok
|
|
|
|
-----+----------------------------------------+----
|
|
|
|
-20 | 0.000000000000000000010000000000000000 | t
|
|
|
|
-19 | 0.00000000000000000010000000000000000 | t
|
|
|
|
-18 | 0.0000000000000000010000000000000000 | t
|
|
|
|
-17 | 0.000000000000000010000000000000000 | t
|
|
|
|
-16 | 0.00000000000000010000000000000000 | t
|
|
|
|
-15 | 0.0000000000000010000000000000000 | t
|
|
|
|
-14 | 0.000000000000010000000000000000 | t
|
|
|
|
-13 | 0.00000000000010000000000000000 | t
|
|
|
|
-12 | 0.0000000000010000000000000000 | t
|
|
|
|
-11 | 0.000000000010000000000000000 | t
|
|
|
|
-10 | 0.00000000010000000000000000 | t
|
|
|
|
-9 | 0.0000000010000000000000000 | t
|
|
|
|
-8 | 0.000000010000000000000000 | t
|
|
|
|
-7 | 0.00000010000000000000000 | t
|
|
|
|
-6 | 0.0000010000000000000000 | t
|
|
|
|
-5 | 0.000010000000000000000 | t
|
|
|
|
-4 | 0.00010000000000000000 | t
|
|
|
|
-3 | 0.0010000000000000000 | t
|
|
|
|
-2 | 0.010000000000000000 | t
|
|
|
|
-1 | 0.10000000000000000 | t
|
|
|
|
0 | 1.0000000000000000 | t
|
|
|
|
1 | 10.000000000000000 | t
|
|
|
|
2 | 100.00000000000000 | t
|
|
|
|
3 | 1000.0000000000000 | t
|
|
|
|
4 | 10000.000000000000 | t
|
|
|
|
5 | 100000.00000000000 | t
|
|
|
|
6 | 1000000.0000000000 | t
|
|
|
|
7 | 10000000.000000000 | t
|
|
|
|
8 | 100000000.00000000 | t
|
|
|
|
9 | 1000000000.0000000 | t
|
|
|
|
10 | 10000000000.000000 | t
|
|
|
|
11 | 100000000000.00000 | t
|
|
|
|
12 | 1000000000000.0000 | t
|
|
|
|
13 | 10000000000000.000 | t
|
|
|
|
14 | 100000000000000.00 | t
|
|
|
|
15 | 1000000000000000.0 | t
|
|
|
|
16 | 10000000000000000.0 | t
|
|
|
|
17 | 100000000000000000.0 | t
|
|
|
|
18 | 1000000000000000000.0 | t
|
|
|
|
19 | 10000000000000000000.0 | t
|
|
|
|
20 | 100000000000000000000.0 | t
|
|
|
|
(41 rows)
|
|
|
|
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
--
|
|
|
|
-- Tests for raising to non-integer powers
|
|
|
|
--
|
|
|
|
-- special cases
|
|
|
|
select 0.0 ^ 0.0;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select (-12.34) ^ 0.0;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 12.34 ^ 0.0;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 0.0 ^ 12.34;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
0.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
2018-05-17 17:10:50 +02:00
|
|
|
-- NaNs
|
|
|
|
select 'NaN'::numeric ^ 'NaN'::numeric;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 'NaN'::numeric ^ 0;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 'NaN'::numeric ^ 1;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 0 ^ 'NaN'::numeric;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 1 ^ 'NaN'::numeric;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
-- invalid inputs
|
|
|
|
select 0.0 ^ (-12.34);
|
|
|
|
ERROR: zero raised to a negative power is undefined
|
|
|
|
select (-12.34) ^ 1.2;
|
|
|
|
ERROR: a negative number raised to a non-integer power yields a complex result
|
|
|
|
-- cases that used to generate inaccurate results
|
|
|
|
select 32.1 ^ 9.8;
|
|
|
|
?column?
|
|
|
|
--------------------
|
|
|
|
580429286790711.10
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 32.1 ^ (-9.8);
|
|
|
|
?column?
|
|
|
|
----------------------------------
|
|
|
|
0.000000000000001722862754788209
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 12.3 ^ 45.6;
|
|
|
|
?column?
|
|
|
|
------------------------------------------------------
|
|
|
|
50081010321492803393171165777624533697036806969694.9
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select 12.3 ^ (-45.6);
|
|
|
|
?column?
|
|
|
|
---------------------------------------------------------------------
|
|
|
|
0.00000000000000000000000000000000000000000000000001996764828785491
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- big test
|
|
|
|
select 1.234 ^ 5678;
|
Improve the accuracy of numeric power() for integer exponents.
This makes the choice of result scale of numeric power() for integer
exponents consistent with the choice for non-integer exponents, and
with the result scale of other numeric functions. Specifically, the
result scale will be at least as large as the scale of either input,
and sufficient to ensure that the result has at least 16 significant
digits.
Formerly, the result scale was based only on the scale of the first
input, without taking into account the weight of the result. For
results with negative weight, that could lead to results with very few
or even no non-zero significant digits (e.g., 10.0 ^ (-18) produced
0.0000000000000000).
Fix this by moving responsibility for the choice of result scale into
power_var_int(), which already has code to estimate the result weight.
Per report by Adrian Klaver and suggested fix by Tom Lane.
No back-patch -- arguably this is a bug fix, but one which is easy to
work around, so it doesn't seem worth the risk of changing query
results in stable branches.
Discussion: https://postgr.es/m/12a40226-70ac-3a3b-3d3a-fdaf9e32d312%40aklaver.com
2022-10-20 11:10:17 +02:00
|
|
|
?column?
|
|
|
|
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
307239295662090741644584872593956173493568238595074141254349565406661439636598896798876823220904084953233015553994854875890890858118656468658643918169805277399402542281777901029346337707622181574346585989613344285010764501017625366742865066948856161360224801370482171458030533346309750557140549621313515752078638620714732831815297168231790779296290266207315344008883935010274044001522606235576584215999260117523114297033944018699691024106823438431754073086813382242140602291215149759520833200152654884259619588924545324.597
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Tests for EXP()
|
|
|
|
--
|
|
|
|
-- special cases
|
|
|
|
select exp(0.0);
|
|
|
|
exp
|
|
|
|
--------------------
|
|
|
|
1.0000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp(1.0);
|
|
|
|
exp
|
|
|
|
--------------------
|
|
|
|
2.7182818284590452
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp(1.0::numeric(71,70));
|
|
|
|
exp
|
|
|
|
--------------------------------------------------------------------------
|
|
|
|
2.7182818284590452353602874713526624977572470936999595749669676277240766
|
|
|
|
(1 row)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
select exp('nan'::numeric);
|
|
|
|
exp
|
|
|
|
-----
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp('inf'::numeric);
|
|
|
|
exp
|
|
|
|
----------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp('-inf'::numeric);
|
|
|
|
exp
|
|
|
|
-----
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
select coalesce(nullif(exp(-5000::numeric), 0), 0) as rounds_to_zero;
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
rounds_to_zero
|
|
|
|
----------------
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
0
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
(1 row)
|
|
|
|
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
select coalesce(nullif(exp(-10000::numeric), 0), 0) as underflows;
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
underflows
|
|
|
|
------------
|
Adjust the integer overflow tests in the numeric code.
Formerly, the numeric code tested whether an integer value of a larger
type would fit in a smaller type by casting it to the smaller type and
then testing if the reverse conversion produced the original value.
That's perfectly fine, except that it caused a test failure on
buildfarm animal castoroides, most likely due to a compiler bug.
Instead, do these tests by comparing against PG_INT16/32_MIN/MAX. That
matches existing code in other places, such as int84(), which is more
widely tested, and so is less likely to go wrong.
While at it, add regression tests covering the numeric-to-int8/4/2
conversions, and adjust the recently added tests to the style of
434ddfb79a (on the v11 branch) to make failures easier to diagnose.
Per buildfarm via Tom Lane, reviewed by Tom Lane.
Discussion: https://postgr.es/m/2394813.1628179479%40sss.pgh.pa.us
2021-08-06 22:29:15 +02:00
|
|
|
0
|
Fix corner-case errors and loss of precision in numeric_power().
This fixes a couple of related problems that arise when raising
numbers to very large powers.
Firstly, when raising a negative number to a very large integer power,
the result should be well-defined, but the previous code would only
cope if the exponent was small enough to go through power_var_int().
Otherwise it would throw an internal error, attempting to take the
logarithm of a negative number. Fix this by adding suitable handling
to the general case in power_var() to cope with negative bases,
checking for integer powers there.
Next, when raising a (positive or negative) number whose absolute
value is slightly less than 1 to a very large power, the result should
approach zero as the power is increased. However, in some cases, for
sufficiently large powers, this would lose all precision and return 1
instead of 0. This was due to the way that the local_rscale was being
calculated for the final full-precision calculation:
local_rscale = rscale + (int) val - ln_dweight + 8
The first two terms on the right hand side are meant to give the
number of significant digits required in the result ("val" being the
estimated result weight). However, this failed to account for the fact
that rscale is clipped to a maximum of NUMERIC_MAX_DISPLAY_SCALE
(1000), and the result weight might be less then -1000, causing their
sum to be negative, leading to a loss of precision. Fix this by
forcing the number of significant digits calculated to be nonnegative.
It's OK for it to be zero (when the result weight is less than -1000),
since the local_rscale value then includes a few extra digits to
ensure an accurate result.
Finally, add additional underflow checks to exp_var() and power_var(),
so that they consistently return zero for cases like this where the
result is indistinguishable from zero. Some paths through this code
already returned zero in such cases, but others were throwing overflow
errors.
Dean Rasheed, reviewed by Yugo Nagata.
Discussion: http://postgr.es/m/CAEZATCW6Dvq7+3wN3tt5jLj-FyOcUgT5xNoOqce5=6Su0bCR0w@mail.gmail.com
2021-07-31 12:21:44 +02:00
|
|
|
(1 row)
|
|
|
|
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
-- cases that used to generate inaccurate results
|
|
|
|
select exp(32.999);
|
|
|
|
exp
|
|
|
|
---------------------
|
|
|
|
214429043492155.053
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp(-32.999);
|
|
|
|
exp
|
|
|
|
----------------------------------
|
|
|
|
0.000000000000004663547361468248
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp(123.456);
|
|
|
|
exp
|
|
|
|
------------------------------------------------------------
|
|
|
|
413294435277809344957685441227343146614594393746575438.725
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select exp(-123.456);
|
|
|
|
exp
|
|
|
|
-------------------------------------------------------------------------
|
|
|
|
0.000000000000000000000000000000000000000000000000000002419582541264601
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- big test
|
|
|
|
select exp(1234.5678);
|
|
|
|
exp
|
|
|
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
|
146549072930959479983482138503979804217622199675223653966270157446954995433819741094410764947112047906012815540251009949604426069672532417736057033099274204598385314594846509975629046864798765888104789074984927709616261452461385220475510438783429612447831614003668421849727379202555580791042606170523016207262965336641214601082882495255771621327088265411334088968112458492660609809762865582162764292604697957813514621259353683899630997077707406305730694385703091201347848855199354307506425820147289848677003277208302716466011827836279231.9667
|
|
|
|
(1 row)
|
|
|
|
|
2014-11-11 13:44:46 +01:00
|
|
|
--
|
|
|
|
-- Tests for generate_series
|
|
|
|
--
|
|
|
|
select * from generate_series(0.0::numeric, 4.0::numeric);
|
|
|
|
generate_series
|
|
|
|
-----------------
|
|
|
|
0.0
|
|
|
|
1.0
|
|
|
|
2.0
|
|
|
|
3.0
|
|
|
|
4.0
|
|
|
|
(5 rows)
|
|
|
|
|
|
|
|
select * from generate_series(0.1::numeric, 4.0::numeric, 1.3::numeric);
|
|
|
|
generate_series
|
|
|
|
-----------------
|
|
|
|
0.1
|
|
|
|
1.4
|
|
|
|
2.7
|
|
|
|
4.0
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
select * from generate_series(4.0::numeric, -1.5::numeric, -2.2::numeric);
|
|
|
|
generate_series
|
|
|
|
-----------------
|
|
|
|
4.0
|
|
|
|
1.8
|
|
|
|
-0.4
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
-- Trigger errors
|
|
|
|
select * from generate_series(-100::numeric, 100::numeric, 0::numeric);
|
|
|
|
ERROR: step size cannot equal zero
|
|
|
|
select * from generate_series(-100::numeric, 100::numeric, 'nan'::numeric);
|
|
|
|
ERROR: step size cannot be NaN
|
|
|
|
select * from generate_series('nan'::numeric, 100::numeric, 10::numeric);
|
|
|
|
ERROR: start value cannot be NaN
|
|
|
|
select * from generate_series(0::numeric, 'nan'::numeric, 10::numeric);
|
|
|
|
ERROR: stop value cannot be NaN
|
2020-07-23 01:19:44 +02:00
|
|
|
select * from generate_series('inf'::numeric, 'inf'::numeric, 10::numeric);
|
|
|
|
ERROR: start value cannot be infinity
|
|
|
|
select * from generate_series(0::numeric, 'inf'::numeric, 10::numeric);
|
|
|
|
ERROR: stop value cannot be infinity
|
|
|
|
select * from generate_series(0::numeric, '42'::numeric, '-inf'::numeric);
|
|
|
|
ERROR: step size cannot be infinity
|
2014-11-11 13:44:46 +01:00
|
|
|
-- Checks maximum, output is truncated
|
|
|
|
select (i / (10::numeric ^ 131071))::numeric(1,0)
|
|
|
|
from generate_series(6 * (10::numeric ^ 131071),
|
|
|
|
9 * (10::numeric ^ 131071),
|
|
|
|
10::numeric ^ 131071) as a(i);
|
|
|
|
numeric
|
|
|
|
---------
|
|
|
|
6
|
|
|
|
7
|
|
|
|
8
|
|
|
|
9
|
|
|
|
(4 rows)
|
|
|
|
|
2014-12-18 13:13:52 +01:00
|
|
|
-- Check usage with variables
|
|
|
|
select * from generate_series(1::numeric, 3::numeric) i, generate_series(i,3) j;
|
|
|
|
i | j
|
|
|
|
---+---
|
|
|
|
1 | 1
|
|
|
|
1 | 2
|
|
|
|
1 | 3
|
|
|
|
2 | 2
|
|
|
|
2 | 3
|
|
|
|
3 | 3
|
|
|
|
(6 rows)
|
|
|
|
|
|
|
|
select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,i) j;
|
|
|
|
i | j
|
|
|
|
---+---
|
|
|
|
1 | 1
|
|
|
|
2 | 1
|
|
|
|
2 | 2
|
|
|
|
3 | 1
|
|
|
|
3 | 2
|
|
|
|
3 | 3
|
|
|
|
(6 rows)
|
|
|
|
|
|
|
|
select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,5,i) j;
|
|
|
|
i | j
|
|
|
|
---+---
|
|
|
|
1 | 1
|
|
|
|
1 | 2
|
|
|
|
1 | 3
|
|
|
|
1 | 4
|
|
|
|
1 | 5
|
|
|
|
2 | 1
|
|
|
|
2 | 3
|
|
|
|
2 | 5
|
|
|
|
3 | 1
|
|
|
|
3 | 4
|
|
|
|
(10 rows)
|
|
|
|
|
Improve type numeric's calculations for ln(), log(), exp(), pow().
Set the "rscales" for intermediate-result calculations to ensure that
suitable numbers of significant digits are maintained throughout. The
previous coding hadn't thought this through in any detail, and as a result
could deliver results with many inaccurate digits, or in the worst cases
even fail with divide-by-zero errors as a result of losing all nonzero
digits of intermediate results.
In exp_var(), get rid entirely of the logic that separated the calculation
into integer and fractional parts: that was neither accurate nor
particularly fast. The existing range-reduction method of dividing by 2^n
can be applied across the full input range instead of only 0..1, as long as
we are careful to set an appropriate rscale for each step.
Also fix the logic in mul_var() for shortening the calculation when the
caller asks for fewer output digits than an exact calculation would
require. This bug doesn't affect simple multiplications since that code
path asks for an exact result, but it does contribute to accuracy issues
in the transcendental math functions.
In passing, improve performance of mul_var() a bit by forcing the shorter
input to be on the left, thus reducing the number of iterations of the
outer loop and probably also reducing the number of carry-propagation
steps needed.
This is arguably a bug fix, but in view of the lack of field complaints,
it does not seem worth the risk of back-patching.
Dean Rasheed
2015-11-14 20:55:38 +01:00
|
|
|
--
|
|
|
|
-- Tests for LN()
|
|
|
|
--
|
|
|
|
-- Invalid inputs
|
|
|
|
select ln(-12.34);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
select ln(0.0);
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
-- Some random tests
|
|
|
|
select ln(1.2345678e-28);
|
|
|
|
ln
|
|
|
|
-----------------------------------------
|
|
|
|
-64.26166165451762991204894255882820859
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(0.0456789);
|
|
|
|
ln
|
|
|
|
---------------------
|
|
|
|
-3.0861187944847439
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(0.349873948359354029493948309745709580730482050975);
|
|
|
|
ln
|
|
|
|
-----------------------------------------------------
|
|
|
|
-1.050182336912082775693991697979750253056317885460
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(0.99949452);
|
|
|
|
ln
|
|
|
|
-------------------------
|
|
|
|
-0.00050560779808326467
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(1.00049687395);
|
|
|
|
ln
|
|
|
|
------------------------
|
|
|
|
0.00049675054901370394
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(1234.567890123456789);
|
|
|
|
ln
|
|
|
|
--------------------
|
|
|
|
7.1184763012977896
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(5.80397490724e5);
|
|
|
|
ln
|
|
|
|
--------------------
|
|
|
|
13.271468476626518
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select ln(9.342536355e34);
|
|
|
|
ln
|
|
|
|
--------------------
|
|
|
|
80.522470935524187
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Tests for LOG() (base 10)
|
|
|
|
--
|
|
|
|
-- invalid inputs
|
|
|
|
select log(-12.34);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
CONTEXT: SQL function "log" statement 1
|
|
|
|
select log(0.0);
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
CONTEXT: SQL function "log" statement 1
|
|
|
|
-- some random tests
|
|
|
|
select log(1.234567e-89);
|
|
|
|
log
|
|
|
|
-----------------------------------------------------------------------------------------------------
|
|
|
|
-88.90848533591373725637496492944925187293052336306443143312825869985819779294142441287021741054275
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(3.4634998359873254962349856073435545);
|
|
|
|
log
|
|
|
|
--------------------------------------
|
|
|
|
0.5395151714070134409152404011959981
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(9.999999999999999999);
|
|
|
|
log
|
|
|
|
----------------------
|
|
|
|
1.000000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(10.00000000000000000);
|
|
|
|
log
|
|
|
|
---------------------
|
|
|
|
1.00000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(10.00000000000000001);
|
|
|
|
log
|
|
|
|
---------------------
|
|
|
|
1.00000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(590489.45235237);
|
|
|
|
log
|
|
|
|
-------------------
|
|
|
|
5.771212144411727
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Tests for LOG() (arbitrary base)
|
|
|
|
--
|
|
|
|
-- invalid inputs
|
|
|
|
select log(-12.34, 56.78);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
select log(-12.34, -56.78);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
select log(12.34, -56.78);
|
|
|
|
ERROR: cannot take logarithm of a negative number
|
|
|
|
select log(0.0, 12.34);
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
select log(12.34, 0.0);
|
|
|
|
ERROR: cannot take logarithm of zero
|
|
|
|
select log(1.0, 12.34);
|
|
|
|
ERROR: division by zero
|
|
|
|
-- some random tests
|
|
|
|
select log(1.23e-89, 6.4689e45);
|
|
|
|
log
|
|
|
|
------------------------------------------------------------------------------------------------
|
|
|
|
-0.5152489207781856983977054971756484879653568168479201885425588841094788842469115325262329756
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(0.99923, 4.58934e34);
|
|
|
|
log
|
|
|
|
---------------------
|
|
|
|
-103611.55579544132
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(1.000016, 8.452010e18);
|
|
|
|
log
|
|
|
|
--------------------
|
|
|
|
2723830.2877097365
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select log(3.1954752e47, 9.4792021e-73);
|
|
|
|
log
|
|
|
|
-------------------------------------------------------------------------------------
|
|
|
|
-1.51613372350688302142917386143459361608600157692779164475351842333265418126982165
|
|
|
|
(1 row)
|
|
|
|
|
2016-01-05 23:02:13 +01:00
|
|
|
--
|
|
|
|
-- Tests for scale()
|
|
|
|
--
|
|
|
|
select scale(numeric 'NaN');
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
select scale(numeric 'inf');
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2016-01-05 23:02:13 +01:00
|
|
|
select scale(NULL::numeric);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(1.12);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(0);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(0.00);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(1.12345);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
5
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(110123.12475871856128);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
14
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(-1123.12471856128);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
11
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select scale(-13.000000000000000);
|
|
|
|
scale
|
|
|
|
-------
|
|
|
|
15
|
|
|
|
(1 row)
|
|
|
|
|
2020-01-06 18:13:53 +01:00
|
|
|
--
|
|
|
|
-- Tests for min_scale()
|
|
|
|
--
|
|
|
|
select min_scale(numeric 'NaN') is NULL; -- should be true
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
select min_scale(numeric 'inf') is NULL; -- should be true
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2020-01-06 18:13:53 +01:00
|
|
|
select min_scale(0); -- no digits
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(0.00); -- no digits again
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.0); -- no scale
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.1); -- scale 1
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.12); -- scale 2
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.123); -- scale 3
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.1234); -- scale 4, filled digit
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
4
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.12345); -- scale 5, 2 NDIGITS
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
5
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1.1000); -- 1 pos in NDIGITS
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select min_scale(1e100); -- very big number
|
|
|
|
min_scale
|
|
|
|
-----------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Tests for trim_scale()
|
|
|
|
--
|
|
|
|
select trim_scale(numeric 'NaN');
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
NaN
|
|
|
|
(1 row)
|
|
|
|
|
2020-07-23 01:19:44 +02:00
|
|
|
select trim_scale(numeric 'inf');
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
Infinity
|
|
|
|
(1 row)
|
|
|
|
|
2020-01-06 18:13:53 +01:00
|
|
|
select trim_scale(1.120);
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
1.12
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(0);
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(0.00);
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(1.1234500);
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
1.12345
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(110123.12475871856128000);
|
|
|
|
trim_scale
|
|
|
|
-----------------------
|
|
|
|
110123.12475871856128
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(-1123.124718561280000000);
|
|
|
|
trim_scale
|
|
|
|
-------------------
|
|
|
|
-1123.12471856128
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(-13.00000000000000000000);
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
-13
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
select trim_scale(1e100);
|
|
|
|
trim_scale
|
|
|
|
-------------------------------------------------------------------------------------------------------
|
|
|
|
10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
2016-09-02 10:51:49 +02:00
|
|
|
--
|
|
|
|
-- Tests for SUM()
|
|
|
|
--
|
|
|
|
-- cases that need carry propagation
|
|
|
|
SELECT SUM(9999::numeric) FROM generate_series(1, 100000);
|
|
|
|
sum
|
|
|
|
-----------
|
|
|
|
999900000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT SUM((-9999)::numeric) FROM generate_series(1, 100000);
|
|
|
|
sum
|
|
|
|
------------
|
|
|
|
-999900000
|
|
|
|
(1 row)
|
|
|
|
|
2021-07-05 11:16:42 +02:00
|
|
|
--
|
|
|
|
-- Tests for VARIANCE()
|
|
|
|
--
|
|
|
|
CREATE TABLE num_variance (a numeric);
|
|
|
|
INSERT INTO num_variance VALUES (0);
|
|
|
|
INSERT INTO num_variance VALUES (3e-500);
|
|
|
|
INSERT INTO num_variance VALUES (-3e-500);
|
|
|
|
INSERT INTO num_variance VALUES (4e-500 - 1e-16383);
|
|
|
|
INSERT INTO num_variance VALUES (-4e-500 + 1e-16383);
|
|
|
|
-- variance is just under 12.5e-1000 and so should round down to 12e-1000
|
|
|
|
SELECT trim_scale(variance(a) * 1e1000) FROM num_variance;
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
12
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that parallel execution produces the same result
|
|
|
|
BEGIN;
|
|
|
|
ALTER TABLE num_variance SET (parallel_workers = 4);
|
|
|
|
SET LOCAL parallel_setup_cost = 0;
|
|
|
|
SET LOCAL max_parallel_workers_per_gather = 4;
|
|
|
|
SELECT trim_scale(variance(a) * 1e1000) FROM num_variance;
|
|
|
|
trim_scale
|
|
|
|
------------
|
|
|
|
12
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
-- case where sum of squares would overflow but variance does not
|
|
|
|
DELETE FROM num_variance;
|
|
|
|
INSERT INTO num_variance SELECT 9e131071 + x FROM generate_series(1, 5) x;
|
|
|
|
SELECT variance(a) FROM num_variance;
|
|
|
|
variance
|
|
|
|
--------------------
|
|
|
|
2.5000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that parallel execution produces the same result
|
|
|
|
BEGIN;
|
|
|
|
ALTER TABLE num_variance SET (parallel_workers = 4);
|
|
|
|
SET LOCAL parallel_setup_cost = 0;
|
|
|
|
SET LOCAL max_parallel_workers_per_gather = 4;
|
|
|
|
SELECT variance(a) FROM num_variance;
|
|
|
|
variance
|
|
|
|
--------------------
|
|
|
|
2.5000000000000000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
DROP TABLE num_variance;
|
2020-01-25 15:00:59 +01:00
|
|
|
--
|
|
|
|
-- Tests for GCD()
|
|
|
|
--
|
|
|
|
SELECT a, b, gcd(a, b), gcd(a, -b), gcd(-b, a), gcd(-b, -a)
|
|
|
|
FROM (VALUES (0::numeric, 0::numeric),
|
|
|
|
(0::numeric, numeric 'NaN'),
|
|
|
|
(0::numeric, 46375::numeric),
|
|
|
|
(433125::numeric, 46375::numeric),
|
|
|
|
(43312.5::numeric, 4637.5::numeric),
|
2020-07-23 01:19:44 +02:00
|
|
|
(4331.250::numeric, 463.75000::numeric),
|
|
|
|
('inf', '0'),
|
|
|
|
('inf', '42'),
|
|
|
|
('inf', 'inf')
|
|
|
|
) AS v(a, b);
|
2020-01-25 15:00:59 +01:00
|
|
|
a | b | gcd | gcd | gcd | gcd
|
|
|
|
----------+-----------+---------+---------+---------+---------
|
|
|
|
0 | 0 | 0 | 0 | 0 | 0
|
|
|
|
0 | NaN | NaN | NaN | NaN | NaN
|
|
|
|
0 | 46375 | 46375 | 46375 | 46375 | 46375
|
|
|
|
433125 | 46375 | 875 | 875 | 875 | 875
|
|
|
|
43312.5 | 4637.5 | 87.5 | 87.5 | 87.5 | 87.5
|
|
|
|
4331.250 | 463.75000 | 8.75000 | 8.75000 | 8.75000 | 8.75000
|
2020-07-23 01:19:44 +02:00
|
|
|
Infinity | 0 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | 42 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | Infinity | NaN | NaN | NaN | NaN
|
|
|
|
(9 rows)
|
2020-01-25 15:00:59 +01:00
|
|
|
|
|
|
|
--
|
|
|
|
-- Tests for LCM()
|
|
|
|
--
|
|
|
|
SELECT a,b, lcm(a, b), lcm(a, -b), lcm(-b, a), lcm(-b, -a)
|
|
|
|
FROM (VALUES (0::numeric, 0::numeric),
|
|
|
|
(0::numeric, numeric 'NaN'),
|
|
|
|
(0::numeric, 13272::numeric),
|
|
|
|
(13272::numeric, 13272::numeric),
|
|
|
|
(423282::numeric, 13272::numeric),
|
|
|
|
(42328.2::numeric, 1327.2::numeric),
|
2020-07-23 01:19:44 +02:00
|
|
|
(4232.820::numeric, 132.72000::numeric),
|
|
|
|
('inf', '0'),
|
|
|
|
('inf', '42'),
|
|
|
|
('inf', 'inf')
|
|
|
|
) AS v(a, b);
|
2020-01-25 15:00:59 +01:00
|
|
|
a | b | lcm | lcm | lcm | lcm
|
|
|
|
----------+-----------+--------------+--------------+--------------+--------------
|
|
|
|
0 | 0 | 0 | 0 | 0 | 0
|
|
|
|
0 | NaN | NaN | NaN | NaN | NaN
|
|
|
|
0 | 13272 | 0 | 0 | 0 | 0
|
|
|
|
13272 | 13272 | 13272 | 13272 | 13272 | 13272
|
|
|
|
423282 | 13272 | 11851896 | 11851896 | 11851896 | 11851896
|
|
|
|
42328.2 | 1327.2 | 1185189.6 | 1185189.6 | 1185189.6 | 1185189.6
|
|
|
|
4232.820 | 132.72000 | 118518.96000 | 118518.96000 | 118518.96000 | 118518.96000
|
2020-07-23 01:19:44 +02:00
|
|
|
Infinity | 0 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | 42 | NaN | NaN | NaN | NaN
|
|
|
|
Infinity | Infinity | NaN | NaN | NaN | NaN
|
|
|
|
(10 rows)
|
2020-01-25 15:00:59 +01:00
|
|
|
|
|
|
|
SELECT lcm(9999 * (10::numeric)^131068 + (10::numeric^131068 - 1), 2); -- overflow
|
|
|
|
ERROR: value overflows numeric format
|
2020-06-18 08:41:31 +02:00
|
|
|
--
|
|
|
|
-- Tests for factorial
|
|
|
|
--
|
2020-09-17 22:17:27 +02:00
|
|
|
SELECT factorial(4);
|
|
|
|
factorial
|
|
|
|
-----------
|
|
|
|
24
|
2020-06-18 08:41:31 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT factorial(15);
|
|
|
|
factorial
|
|
|
|
---------------
|
|
|
|
1307674368000
|
|
|
|
(1 row)
|
|
|
|
|
2020-09-17 22:17:27 +02:00
|
|
|
SELECT factorial(100000);
|
2020-06-18 08:41:31 +02:00
|
|
|
ERROR: value overflows numeric format
|
2020-09-17 22:17:27 +02:00
|
|
|
SELECT factorial(0);
|
|
|
|
factorial
|
|
|
|
-----------
|
|
|
|
1
|
2020-06-18 08:41:31 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT factorial(-4);
|
2020-06-18 08:41:31 +02:00
|
|
|
ERROR: factorial of a negative number is undefined
|
2020-06-30 16:55:07 +02:00
|
|
|
--
|
|
|
|
-- Tests for pg_lsn()
|
|
|
|
--
|
|
|
|
SELECT pg_lsn(23783416::numeric);
|
|
|
|
pg_lsn
|
|
|
|
-----------
|
|
|
|
0/16AE7F8
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_lsn(0::numeric);
|
|
|
|
pg_lsn
|
|
|
|
--------
|
|
|
|
0/0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_lsn(18446744073709551615::numeric);
|
|
|
|
pg_lsn
|
|
|
|
-------------------
|
|
|
|
FFFFFFFF/FFFFFFFF
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_lsn(-1::numeric);
|
|
|
|
ERROR: pg_lsn out of range
|
|
|
|
SELECT pg_lsn(18446744073709551616::numeric);
|
|
|
|
ERROR: pg_lsn out of range
|
|
|
|
SELECT pg_lsn('NaN'::numeric);
|
|
|
|
ERROR: cannot convert NaN to pg_lsn
|