2014-11-21 14:07:29 +01:00
|
|
|
/*------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* regress.c
|
|
|
|
* Code for various C-language functions defined as part of the
|
|
|
|
* regression tests.
|
|
|
|
*
|
|
|
|
* This code is released under the terms of the PostgreSQL License.
|
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
2014-11-21 14:07:29 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/test/regress/regress.c
|
2014-11-21 14:07:29 +01:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
|
1999-07-17 22:18:55 +02:00
|
|
|
#include "postgres.h"
|
1996-11-11 17:33:12 +01:00
|
|
|
|
2009-01-07 14:44:37 +01:00
|
|
|
#include <math.h>
|
2014-07-22 17:01:03 +02:00
|
|
|
#include <signal.h>
|
2001-02-10 03:31:31 +01:00
|
|
|
|
2019-07-08 17:58:05 +02:00
|
|
|
#include "access/detoast.h"
|
2013-07-02 19:35:14 +02:00
|
|
|
#include "access/htup_details.h"
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "access/transam.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "access/xact.h"
|
2021-04-01 10:45:22 +02:00
|
|
|
#include "catalog/namespace.h"
|
2019-02-10 00:32:23 +01:00
|
|
|
#include "catalog/pg_operator.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "catalog/pg_type.h"
|
|
|
|
#include "commands/sequence.h"
|
|
|
|
#include "commands/trigger.h"
|
|
|
|
#include "executor/executor.h"
|
|
|
|
#include "executor/spi.h"
|
2021-04-01 10:45:22 +02:00
|
|
|
#include "funcapi.h"
|
|
|
|
#include "mb/pg_wchar.h"
|
2014-07-22 17:01:03 +02:00
|
|
|
#include "miscadmin.h"
|
2019-02-10 00:32:23 +01:00
|
|
|
#include "nodes/supportnodes.h"
|
|
|
|
#include "optimizer/optimizer.h"
|
|
|
|
#include "optimizer/plancat.h"
|
2021-05-11 20:28:11 +02:00
|
|
|
#include "parser/parse_coerce.h"
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
#include "port/atomics.h"
|
2020-06-09 01:36:51 +02:00
|
|
|
#include "storage/spin.h"
|
Split up guc.c for better build speed and ease of maintenance.
guc.c has grown to be one of our largest .c files, making it
a bottleneck for compilation. It's also acquired a bunch of
knowledge that'd be better kept elsewhere, because of our not
very good habit of putting variable-specific check hooks here.
Hence, split it up along these lines:
* guc.c itself retains just the core GUC housekeeping mechanisms.
* New file guc_funcs.c contains the SET/SHOW interfaces and some
SQL-accessible functions for GUC manipulation.
* New file guc_tables.c contains the data arrays that define the
built-in GUC variables, along with some already-exported constant
tables.
* GUC check/assign/show hook functions are moved to the variable's
home module, whenever that's clearly identifiable. A few hard-
to-classify hooks ended up in commands/variable.c, which was
already a home for miscellaneous GUC hook functions.
To avoid cluttering a lot more header files with #include "guc.h",
I also invented a new header file utils/guc_hooks.h and put all
the GUC hook functions' declarations there, regardless of their
originating module. That allowed removal of #include "guc.h"
from some existing headers. The fallout from that (hopefully
all caught here) demonstrates clearly why such inclusions are
best minimized: there are a lot of files that, for example,
were getting array.h at two or more levels of remove, despite
not having any connection at all to GUCs in themselves.
There is some very minor code beautification here, such as
renaming a couple of inconsistently-named hook functions
and improving some comments. But mostly this just moves
code from point A to point B and deals with the ensuing
needs for #include adjustments and exporting a few functions
that previously weren't exported.
Patch by me, per a suggestion from Andres Freund; thanks also
to Michael Paquier for the idea to invent guc_funcs.c.
Discussion: https://postgr.es/m/587607.1662836699@sss.pgh.pa.us
2022-09-13 17:05:07 +02:00
|
|
|
#include "utils/array.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "utils/builtins.h"
|
|
|
|
#include "utils/geo_decls.h"
|
2022-04-07 06:09:25 +02:00
|
|
|
#include "utils/lsyscache.h"
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "utils/memutils.h"
|
2011-02-23 18:18:09 +01:00
|
|
|
#include "utils/rel.h"
|
2013-07-02 19:35:14 +02:00
|
|
|
#include "utils/typcache.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2019-10-05 19:05:05 +02:00
|
|
|
#define EXPECT_TRUE(expr) \
|
|
|
|
do { \
|
|
|
|
if (!(expr)) \
|
|
|
|
elog(ERROR, \
|
|
|
|
"%s was unexpectedly false in file \"%s\" line %u", \
|
|
|
|
#expr, __FILE__, __LINE__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define EXPECT_EQ_U32(result_expr, expected_expr) \
|
|
|
|
do { \
|
2022-08-20 05:16:51 +02:00
|
|
|
uint32 actual_result = (result_expr); \
|
|
|
|
uint32 expected_result = (expected_expr); \
|
|
|
|
if (actual_result != expected_result) \
|
2019-10-05 19:05:05 +02:00
|
|
|
elog(ERROR, \
|
|
|
|
"%s yielded %u, expected %s in file \"%s\" line %u", \
|
2022-08-20 05:16:51 +02:00
|
|
|
#result_expr, actual_result, #expected_expr, __FILE__, __LINE__); \
|
2019-10-05 19:05:05 +02:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define EXPECT_EQ_U64(result_expr, expected_expr) \
|
|
|
|
do { \
|
2022-08-20 05:16:51 +02:00
|
|
|
uint64 actual_result = (result_expr); \
|
|
|
|
uint64 expected_result = (expected_expr); \
|
|
|
|
if (actual_result != expected_result) \
|
2019-10-05 19:05:05 +02:00
|
|
|
elog(ERROR, \
|
|
|
|
"%s yielded " UINT64_FORMAT ", expected %s in file \"%s\" line %u", \
|
2022-08-20 05:16:51 +02:00
|
|
|
#result_expr, actual_result, #expected_expr, __FILE__, __LINE__); \
|
2019-10-05 19:05:05 +02:00
|
|
|
} while (0)
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
#define LDELIM '('
|
|
|
|
#define RDELIM ')'
|
|
|
|
#define DELIM ','
|
|
|
|
|
2018-02-27 17:11:25 +01:00
|
|
|
static void regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
|
2005-07-23 16:18:57 +02:00
|
|
|
|
2006-05-30 16:09:32 +02:00
|
|
|
PG_MODULE_MAGIC;
|
2006-05-30 23:21:30 +02:00
|
|
|
|
|
|
|
|
2000-07-30 22:44:02 +02:00
|
|
|
/* return the point where two paths intersect, or NULL if no intersection. */
|
2000-11-20 21:36:57 +01:00
|
|
|
PG_FUNCTION_INFO_V1(interpt_pp);
|
|
|
|
|
2000-07-30 22:44:02 +02:00
|
|
|
Datum
|
|
|
|
interpt_pp(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-07-30 22:44:02 +02:00
|
|
|
PATH *p1 = PG_GETARG_PATH_P(0);
|
|
|
|
PATH *p2 = PG_GETARG_PATH_P(1);
|
1996-07-09 08:22:35 +02:00
|
|
|
int i,
|
|
|
|
j;
|
|
|
|
LSEG seg1,
|
|
|
|
seg2;
|
1996-11-12 12:09:13 +01:00
|
|
|
bool found; /* We've found the intersection */
|
1996-07-09 08:22:35 +02:00
|
|
|
|
1996-11-12 12:09:13 +01:00
|
|
|
found = false; /* Haven't found it yet */
|
|
|
|
|
|
|
|
for (i = 0; i < p1->npts - 1 && !found; i++)
|
2000-07-30 22:44:02 +02:00
|
|
|
{
|
|
|
|
regress_lseg_construct(&seg1, &p1->p[i], &p1->p[i + 1]);
|
1996-11-12 12:09:13 +01:00
|
|
|
for (j = 0; j < p2->npts - 1 && !found; j++)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-11-12 12:09:13 +01:00
|
|
|
regress_lseg_construct(&seg2, &p2->p[j], &p2->p[j + 1]);
|
2000-07-30 22:44:02 +02:00
|
|
|
if (DatumGetBool(DirectFunctionCall2(lseg_intersect,
|
|
|
|
LsegPGetDatum(&seg1),
|
|
|
|
LsegPGetDatum(&seg2))))
|
1996-11-12 12:09:13 +01:00
|
|
|
found = true;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2000-07-30 22:44:02 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2000-07-30 22:44:02 +02:00
|
|
|
if (!found)
|
|
|
|
PG_RETURN_NULL();
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-30 22:44:02 +02:00
|
|
|
/*
|
|
|
|
* Note: DirectFunctionCall2 will kick out an error if lseg_interpt()
|
|
|
|
* returns NULL, but that should be impossible since we know the two
|
|
|
|
* segments intersect.
|
|
|
|
*/
|
|
|
|
PG_RETURN_DATUM(DirectFunctionCall2(lseg_interpt,
|
|
|
|
LsegPGetDatum(&seg1),
|
|
|
|
LsegPGetDatum(&seg2)));
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* like lseg_construct, but assume space already allocated */
|
2018-02-27 17:11:25 +01:00
|
|
|
static void
|
2003-03-20 05:52:35 +01:00
|
|
|
regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
|
|
|
lseg->p[0].x = pt1->x;
|
|
|
|
lseg->p[0].y = pt1->y;
|
|
|
|
lseg->p[1].x = pt2->x;
|
|
|
|
lseg->p[1].y = pt2->y;
|
|
|
|
}
|
|
|
|
|
2000-11-20 21:36:57 +01:00
|
|
|
PG_FUNCTION_INFO_V1(overpaid);
|
|
|
|
|
2000-06-05 09:29:25 +02:00
|
|
|
Datum
|
|
|
|
overpaid(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2004-04-01 23:28:47 +02:00
|
|
|
HeapTupleHeader tuple = PG_GETARG_HEAPTUPLEHEADER(0);
|
1996-07-09 08:22:35 +02:00
|
|
|
bool isnull;
|
2000-08-25 01:34:11 +02:00
|
|
|
int32 salary;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2000-08-25 01:34:11 +02:00
|
|
|
salary = DatumGetInt32(GetAttributeByName(tuple, "salary", &isnull));
|
2000-06-05 09:29:25 +02:00
|
|
|
if (isnull)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
PG_RETURN_BOOL(salary > 699);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
Major patch from Thomas Lockhart <Thomas.G.Lockhart@jpl.nasa.gov>
OK, here are a passel of patches for the geometric data types.
These add a "circle" data type, new operators and functions
for the existing data types, and change the default formats
for some of the existing types to make them consistant with
each other. Current formatting conventions (e.g. compatible
with v6.0 to allow dump/reload) are supported, but the new
conventions should be an improvement and we can eventually
drop the old conventions entirely.
For example, there are two kinds of paths (connected line segments),
open and closed, and the old format was
'(1,2,1,2,3,4)' for a closed path with two points (1,2) and (3,4)
'(0,2,1,2,3,4)' for an open path with two points (1,2) and (3,4)
Pretty arcane, huh? The new format for paths is
'((1,2),(3,4))' for a closed path with two points (1,2) and (3,4)
'[(1,2),(3,4)]' for an open path with two points (1,2) and (3,4)
For polygons, the old convention is
'(0,4,2,0,4,3)' for a triangle with points at (0,0),(4,4), and (2,3)
and the new convention is
'((0,0),(4,4),(2,3))' for a triangle with points at (0,0),(4,4), and (2,3)
Other data types which are also represented as lists of points
(e.g. boxes, line segments, and polygons) have similar representations
(they surround each point with parens).
For v6.1, any format which can be interpreted as the old style format
is decoded as such; we can remove that backwards compatibility but ugly
convention for v7.0. This will allow dump/reloads from v6.0.
These include some updates to the regression test files to change the test
for creating a data type from "circle" to "widget" to keep the test from
trashing the new builtin circle type.
1997-04-22 19:35:09 +02:00
|
|
|
/* New type "widget"
|
|
|
|
* This used to be "circle", but I added circle to builtins,
|
|
|
|
* so needed to make sure the names do not collide. - tgl 97/04/21
|
|
|
|
*/
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
Point center;
|
|
|
|
double radius;
|
Major patch from Thomas Lockhart <Thomas.G.Lockhart@jpl.nasa.gov>
OK, here are a passel of patches for the geometric data types.
These add a "circle" data type, new operators and functions
for the existing data types, and change the default formats
for some of the existing types to make them consistant with
each other. Current formatting conventions (e.g. compatible
with v6.0 to allow dump/reload) are supported, but the new
conventions should be an improvement and we can eventually
drop the old conventions entirely.
For example, there are two kinds of paths (connected line segments),
open and closed, and the old format was
'(1,2,1,2,3,4)' for a closed path with two points (1,2) and (3,4)
'(0,2,1,2,3,4)' for an open path with two points (1,2) and (3,4)
Pretty arcane, huh? The new format for paths is
'((1,2),(3,4))' for a closed path with two points (1,2) and (3,4)
'[(1,2),(3,4)]' for an open path with two points (1,2) and (3,4)
For polygons, the old convention is
'(0,4,2,0,4,3)' for a triangle with points at (0,0),(4,4), and (2,3)
and the new convention is
'((0,0),(4,4),(2,3))' for a triangle with points at (0,0),(4,4), and (2,3)
Other data types which are also represented as lists of points
(e.g. boxes, line segments, and polygons) have similar representations
(they surround each point with parens).
For v6.1, any format which can be interpreted as the old style format
is decoded as such; we can remove that backwards compatibility but ugly
convention for v7.0. This will allow dump/reloads from v6.0.
These include some updates to the regression test files to change the test
for creating a data type from "circle" to "widget" to keep the test from
trashing the new builtin circle type.
1997-04-22 19:35:09 +02:00
|
|
|
} WIDGET;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
PG_FUNCTION_INFO_V1(widget_in);
|
|
|
|
PG_FUNCTION_INFO_V1(widget_out);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
#define NARGS 3
|
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
Datum
|
|
|
|
widget_in(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2017-03-29 22:16:49 +02:00
|
|
|
char *str = PG_GETARG_CSTRING(0);
|
1996-07-09 08:22:35 +02:00
|
|
|
char *p,
|
Clean up code for widget_in() and widget_out().
Given syntactically wrong input, widget_in() could call atof() with an
indeterminate pointer argument, typically leading to a crash; or if it
didn't do that, it might return a NULL pointer, which again would lead
to a crash since old-style C functions aren't supposed to do things
that way. Fix that by correcting the off-by-one syntax test and
throwing a proper error rather than just returning NULL.
Also, since widget_in and widget_out have been marked STRICT for a
long time, their tests for null inputs are just dead code; remove 'em.
In the oldest branches, also improve widget_out to use snprintf not
sprintf, just to be sure.
In passing, get rid of a long-since-useless sprintf into a local buffer
that nothing further is done with, and make some other minor coding
style cleanups.
In the intended regression-testing usage of these functions, none of
this is very significant; but if the regression test database were
left around in a production installation, these bugs could amount
to a minor security hazard.
Piotr Stefaniak, Michael Paquier, and Tom Lane
2016-01-09 19:44:27 +01:00
|
|
|
*coord[NARGS];
|
1996-07-09 08:22:35 +02:00
|
|
|
int i;
|
Major patch from Thomas Lockhart <Thomas.G.Lockhart@jpl.nasa.gov>
OK, here are a passel of patches for the geometric data types.
These add a "circle" data type, new operators and functions
for the existing data types, and change the default formats
for some of the existing types to make them consistant with
each other. Current formatting conventions (e.g. compatible
with v6.0 to allow dump/reload) are supported, but the new
conventions should be an improvement and we can eventually
drop the old conventions entirely.
For example, there are two kinds of paths (connected line segments),
open and closed, and the old format was
'(1,2,1,2,3,4)' for a closed path with two points (1,2) and (3,4)
'(0,2,1,2,3,4)' for an open path with two points (1,2) and (3,4)
Pretty arcane, huh? The new format for paths is
'((1,2),(3,4))' for a closed path with two points (1,2) and (3,4)
'[(1,2),(3,4)]' for an open path with two points (1,2) and (3,4)
For polygons, the old convention is
'(0,4,2,0,4,3)' for a triangle with points at (0,0),(4,4), and (2,3)
and the new convention is
'((0,0),(4,4),(2,3))' for a triangle with points at (0,0),(4,4), and (2,3)
Other data types which are also represented as lists of points
(e.g. boxes, line segments, and polygons) have similar representations
(they surround each point with parens).
For v6.1, any format which can be interpreted as the old style format
is decoded as such; we can remove that backwards compatibility but ugly
convention for v7.0. This will allow dump/reloads from v6.0.
These include some updates to the regression test files to change the test
for creating a data type from "circle" to "widget" to keep the test from
trashing the new builtin circle type.
1997-04-22 19:35:09 +02:00
|
|
|
WIDGET *result;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
for (i = 0, p = str; *p && i < NARGS && *p != RDELIM; p++)
|
Clean up code for widget_in() and widget_out().
Given syntactically wrong input, widget_in() could call atof() with an
indeterminate pointer argument, typically leading to a crash; or if it
didn't do that, it might return a NULL pointer, which again would lead
to a crash since old-style C functions aren't supposed to do things
that way. Fix that by correcting the off-by-one syntax test and
throwing a proper error rather than just returning NULL.
Also, since widget_in and widget_out have been marked STRICT for a
long time, their tests for null inputs are just dead code; remove 'em.
In the oldest branches, also improve widget_out to use snprintf not
sprintf, just to be sure.
In passing, get rid of a long-since-useless sprintf into a local buffer
that nothing further is done with, and make some other minor coding
style cleanups.
In the intended regression-testing usage of these functions, none of
this is very significant; but if the regression test database were
left around in a production installation, these bugs could amount
to a minor security hazard.
Piotr Stefaniak, Michael Paquier, and Tom Lane
2016-01-09 19:44:27 +01:00
|
|
|
{
|
|
|
|
if (*p == DELIM || (*p == LDELIM && i == 0))
|
1996-07-09 08:22:35 +02:00
|
|
|
coord[i++] = p + 1;
|
Clean up code for widget_in() and widget_out().
Given syntactically wrong input, widget_in() could call atof() with an
indeterminate pointer argument, typically leading to a crash; or if it
didn't do that, it might return a NULL pointer, which again would lead
to a crash since old-style C functions aren't supposed to do things
that way. Fix that by correcting the off-by-one syntax test and
throwing a proper error rather than just returning NULL.
Also, since widget_in and widget_out have been marked STRICT for a
long time, their tests for null inputs are just dead code; remove 'em.
In the oldest branches, also improve widget_out to use snprintf not
sprintf, just to be sure.
In passing, get rid of a long-since-useless sprintf into a local buffer
that nothing further is done with, and make some other minor coding
style cleanups.
In the intended regression-testing usage of these functions, none of
this is very significant; but if the regression test database were
left around in a production installation, these bugs could amount
to a minor security hazard.
Piotr Stefaniak, Michael Paquier, and Tom Lane
2016-01-09 19:44:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (i < NARGS)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
|
2018-07-22 23:58:01 +02:00
|
|
|
errmsg("invalid input syntax for type %s: \"%s\"",
|
|
|
|
"widget", str)));
|
Clean up code for widget_in() and widget_out().
Given syntactically wrong input, widget_in() could call atof() with an
indeterminate pointer argument, typically leading to a crash; or if it
didn't do that, it might return a NULL pointer, which again would lead
to a crash since old-style C functions aren't supposed to do things
that way. Fix that by correcting the off-by-one syntax test and
throwing a proper error rather than just returning NULL.
Also, since widget_in and widget_out have been marked STRICT for a
long time, their tests for null inputs are just dead code; remove 'em.
In the oldest branches, also improve widget_out to use snprintf not
sprintf, just to be sure.
In passing, get rid of a long-since-useless sprintf into a local buffer
that nothing further is done with, and make some other minor coding
style cleanups.
In the intended regression-testing usage of these functions, none of
this is very significant; but if the regression test database were
left around in a production installation, these bugs could amount
to a minor security hazard.
Piotr Stefaniak, Michael Paquier, and Tom Lane
2016-01-09 19:44:27 +01:00
|
|
|
|
Major patch from Thomas Lockhart <Thomas.G.Lockhart@jpl.nasa.gov>
OK, here are a passel of patches for the geometric data types.
These add a "circle" data type, new operators and functions
for the existing data types, and change the default formats
for some of the existing types to make them consistant with
each other. Current formatting conventions (e.g. compatible
with v6.0 to allow dump/reload) are supported, but the new
conventions should be an improvement and we can eventually
drop the old conventions entirely.
For example, there are two kinds of paths (connected line segments),
open and closed, and the old format was
'(1,2,1,2,3,4)' for a closed path with two points (1,2) and (3,4)
'(0,2,1,2,3,4)' for an open path with two points (1,2) and (3,4)
Pretty arcane, huh? The new format for paths is
'((1,2),(3,4))' for a closed path with two points (1,2) and (3,4)
'[(1,2),(3,4)]' for an open path with two points (1,2) and (3,4)
For polygons, the old convention is
'(0,4,2,0,4,3)' for a triangle with points at (0,0),(4,4), and (2,3)
and the new convention is
'((0,0),(4,4),(2,3))' for a triangle with points at (0,0),(4,4), and (2,3)
Other data types which are also represented as lists of points
(e.g. boxes, line segments, and polygons) have similar representations
(they surround each point with parens).
For v6.1, any format which can be interpreted as the old style format
is decoded as such; we can remove that backwards compatibility but ugly
convention for v7.0. This will allow dump/reloads from v6.0.
These include some updates to the regression test files to change the test
for creating a data type from "circle" to "widget" to keep the test from
trashing the new builtin circle type.
1997-04-22 19:35:09 +02:00
|
|
|
result = (WIDGET *) palloc(sizeof(WIDGET));
|
1996-07-09 08:22:35 +02:00
|
|
|
result->center.x = atof(coord[0]);
|
|
|
|
result->center.y = atof(coord[1]);
|
|
|
|
result->radius = atof(coord[2]);
|
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
PG_RETURN_POINTER(result);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
Datum
|
|
|
|
widget_out(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2017-03-29 22:16:49 +02:00
|
|
|
WIDGET *widget = (WIDGET *) PG_GETARG_POINTER(0);
|
|
|
|
char *str = psprintf("(%g,%g,%g)",
|
|
|
|
widget->center.x, widget->center.y, widget->radius);
|
2017-05-17 22:31:56 +02:00
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
PG_RETURN_CSTRING(str);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2000-11-20 21:36:57 +01:00
|
|
|
PG_FUNCTION_INFO_V1(pt_in_widget);
|
|
|
|
|
2000-06-13 09:35:40 +02:00
|
|
|
Datum
|
|
|
|
pt_in_widget(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-06-13 09:35:40 +02:00
|
|
|
Point *point = PG_GETARG_POINT_P(0);
|
|
|
|
WIDGET *widget = (WIDGET *) PG_GETARG_POINTER(1);
|
2018-07-29 02:02:48 +02:00
|
|
|
float8 distance;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2018-07-29 02:02:48 +02:00
|
|
|
distance = DatumGetFloat8(DirectFunctionCall2(point_distance,
|
|
|
|
PointPGetDatum(point),
|
|
|
|
PointPGetDatum(&widget->center)));
|
|
|
|
|
|
|
|
PG_RETURN_BOOL(distance < widget->radius);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2017-03-29 22:16:49 +02:00
|
|
|
PG_FUNCTION_INFO_V1(reverse_name);
|
|
|
|
|
|
|
|
Datum
|
|
|
|
reverse_name(PG_FUNCTION_ARGS)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2017-03-29 22:16:49 +02:00
|
|
|
char *string = PG_GETARG_CSTRING(0);
|
1998-02-11 20:14:04 +01:00
|
|
|
int i;
|
1996-07-09 08:22:35 +02:00
|
|
|
int len;
|
|
|
|
char *new_string;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-07-27 23:49:55 +02:00
|
|
|
new_string = palloc0(NAMEDATALEN);
|
1998-07-20 18:57:18 +02:00
|
|
|
for (i = 0; i < NAMEDATALEN && string[i]; ++i)
|
1996-07-09 08:22:35 +02:00
|
|
|
;
|
1998-07-20 18:57:18 +02:00
|
|
|
if (i == NAMEDATALEN || !string[i])
|
1996-07-09 08:22:35 +02:00
|
|
|
--i;
|
|
|
|
len = i;
|
|
|
|
for (; i >= 0; --i)
|
|
|
|
new_string[len - i] = string[i];
|
2017-03-29 22:16:49 +02:00
|
|
|
PG_RETURN_CSTRING(new_string);
|
2000-11-20 21:36:57 +01:00
|
|
|
}
|
|
|
|
|
2018-02-27 19:27:38 +01:00
|
|
|
PG_FUNCTION_INFO_V1(trigger_return_old);
|
|
|
|
|
|
|
|
Datum
|
|
|
|
trigger_return_old(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
TriggerData *trigdata = (TriggerData *) fcinfo->context;
|
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
if (!CALLED_AS_TRIGGER(fcinfo))
|
|
|
|
elog(ERROR, "trigger_return_old: not fired by trigger manager");
|
|
|
|
|
|
|
|
tuple = trigdata->tg_trigtuple;
|
|
|
|
|
|
|
|
return PointerGetDatum(tuple);
|
|
|
|
}
|
1997-09-11 11:14:12 +02:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
#define TTDUMMY_INFINITY 999999
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
static SPIPlanPtr splan = NULL;
|
1997-09-24 10:35:10 +02:00
|
|
|
static bool ttoff = false;
|
|
|
|
|
2000-11-20 21:36:57 +01:00
|
|
|
PG_FUNCTION_INFO_V1(ttdummy);
|
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
Datum
|
|
|
|
ttdummy(PG_FUNCTION_ARGS)
|
1997-09-24 10:35:10 +02:00
|
|
|
{
|
2000-05-29 03:59:17 +02:00
|
|
|
TriggerData *trigdata = (TriggerData *) fcinfo->context;
|
1997-09-24 10:35:10 +02:00
|
|
|
Trigger *trigger; /* to get trigger name */
|
|
|
|
char **args; /* arguments */
|
|
|
|
int attnum[2]; /* fnumbers of start/stop columns */
|
|
|
|
Datum oldon,
|
|
|
|
oldoff;
|
|
|
|
Datum newon,
|
|
|
|
newoff;
|
|
|
|
Datum *cvals; /* column values */
|
|
|
|
char *cnulls; /* column nulls */
|
|
|
|
char *relname; /* triggered relation name */
|
|
|
|
Relation rel; /* triggered relation */
|
|
|
|
HeapTuple trigtuple;
|
|
|
|
HeapTuple newtuple = NULL;
|
|
|
|
HeapTuple rettuple;
|
|
|
|
TupleDesc tupdesc; /* tuple description */
|
|
|
|
int natts; /* # of attributes */
|
|
|
|
bool isnull; /* to know is some column NULL or not */
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
if (!CALLED_AS_TRIGGER(fcinfo))
|
|
|
|
elog(ERROR, "ttdummy: not fired by trigger manager");
|
2010-10-08 19:27:31 +02:00
|
|
|
if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
|
|
|
|
elog(ERROR, "ttdummy: must be fired for row");
|
|
|
|
if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event))
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy: must be fired before event");
|
2000-05-29 03:59:17 +02:00
|
|
|
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
|
Wording cleanup for error messages. Also change can't -> cannot.
Standard English uses "may", "can", and "might" in different ways:
may - permission, "You may borrow my rake."
can - ability, "I can lift that log."
might - possibility, "It might rain today."
Unfortunately, in conversational English, their use is often mixed, as
in, "You may use this variable to do X", when in fact, "can" is a better
choice. Similarly, "It may crash" is better stated, "It might crash".
2007-02-01 20:10:30 +01:00
|
|
|
elog(ERROR, "ttdummy: cannot process INSERT event");
|
2000-05-29 03:59:17 +02:00
|
|
|
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
|
|
|
|
newtuple = trigdata->tg_newtuple;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
trigtuple = trigdata->tg_trigtuple;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
rel = trigdata->tg_relation;
|
1997-09-24 10:35:10 +02:00
|
|
|
relname = SPI_getrelname(rel);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* check if TT is OFF for this relation */
|
|
|
|
if (ttoff) /* OFF - nothing to do */
|
|
|
|
{
|
|
|
|
pfree(relname);
|
2000-05-29 03:59:17 +02:00
|
|
|
return PointerGetDatum((newtuple != NULL) ? newtuple : trigtuple);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
trigger = trigdata->tg_trigger;
|
1997-09-24 10:35:10 +02:00
|
|
|
|
|
|
|
if (trigger->tgnargs != 2)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): invalid (!= 2) number of arguments %d",
|
1997-09-24 10:35:10 +02:00
|
|
|
relname, trigger->tgnargs);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
args = trigger->tgargs;
|
|
|
|
tupdesc = rel->rd_att;
|
|
|
|
natts = tupdesc->natts;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
for (i = 0; i < 2; i++)
|
|
|
|
{
|
|
|
|
attnum[i] = SPI_fnumber(tupdesc, args[i]);
|
2016-11-08 19:11:15 +01:00
|
|
|
if (attnum[i] <= 0)
|
|
|
|
elog(ERROR, "ttdummy (%s): there is no attribute %s",
|
|
|
|
relname, args[i]);
|
1997-09-24 10:35:10 +02:00
|
|
|
if (SPI_gettypeid(tupdesc, attnum[i]) != INT4OID)
|
2016-11-08 19:11:15 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): attribute %s must be of integer type",
|
|
|
|
relname, args[i]);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
oldon = SPI_getbinval(trigtuple, tupdesc, attnum[0], &isnull);
|
|
|
|
if (isnull)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[0]);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
oldoff = SPI_getbinval(trigtuple, tupdesc, attnum[1], &isnull);
|
|
|
|
if (isnull)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[1]);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
if (newtuple != NULL) /* UPDATE */
|
|
|
|
{
|
|
|
|
newon = SPI_getbinval(newtuple, tupdesc, attnum[0], &isnull);
|
|
|
|
if (isnull)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[0]);
|
1997-09-24 10:35:10 +02:00
|
|
|
newoff = SPI_getbinval(newtuple, tupdesc, attnum[1], &isnull);
|
|
|
|
if (isnull)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[1]);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
if (oldon != newon || oldoff != newoff)
|
2015-08-03 05:49:19 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("ttdummy (%s): you cannot change %s and/or %s columns (use set_ttdummy)",
|
|
|
|
relname, args[0], args[1])));
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
if (newoff != TTDUMMY_INFINITY)
|
|
|
|
{
|
|
|
|
pfree(relname); /* allocated in upper executor context */
|
2000-05-29 03:59:17 +02:00
|
|
|
return PointerGetDatum(NULL);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (oldoff != TTDUMMY_INFINITY) /* DELETE */
|
|
|
|
{
|
|
|
|
pfree(relname);
|
2000-05-29 03:59:17 +02:00
|
|
|
return PointerGetDatum(NULL);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2008-03-25 23:42:46 +01:00
|
|
|
newoff = DirectFunctionCall1(nextval, CStringGetTextDatum("ttdummy_seq"));
|
|
|
|
/* nextval now returns int64; coerce down to int32 */
|
|
|
|
newoff = Int32GetDatum((int32) DatumGetInt64(newoff));
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* Connect to SPI manager */
|
|
|
|
if ((ret = SPI_connect()) < 0)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): SPI_connect returned %d", relname, ret);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* Fetch tuple values and nulls */
|
|
|
|
cvals = (Datum *) palloc(natts * sizeof(Datum));
|
|
|
|
cnulls = (char *) palloc(natts * sizeof(char));
|
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
|
|
|
cvals[i] = SPI_getbinval((newtuple != NULL) ? newtuple : trigtuple,
|
|
|
|
tupdesc, i + 1, &isnull);
|
|
|
|
cnulls[i] = (isnull) ? 'n' : ' ';
|
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* change date column(s) */
|
|
|
|
if (newtuple) /* UPDATE */
|
|
|
|
{
|
|
|
|
cvals[attnum[0] - 1] = newoff; /* start_date eq current date */
|
|
|
|
cnulls[attnum[0] - 1] = ' ';
|
|
|
|
cvals[attnum[1] - 1] = TTDUMMY_INFINITY; /* stop_date eq INFINITY */
|
|
|
|
cnulls[attnum[1] - 1] = ' ';
|
|
|
|
}
|
|
|
|
else
|
2004-10-07 17:21:58 +02:00
|
|
|
/* DELETE */
|
1997-09-24 10:35:10 +02:00
|
|
|
{
|
|
|
|
cvals[attnum[1] - 1] = newoff; /* stop_date eq current date */
|
|
|
|
cnulls[attnum[1] - 1] = ' ';
|
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* if there is no plan ... */
|
|
|
|
if (splan == NULL)
|
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr pplan;
|
1997-09-24 10:35:10 +02:00
|
|
|
Oid *ctypes;
|
1999-10-22 04:08:37 +02:00
|
|
|
char *query;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1999-10-22 04:08:37 +02:00
|
|
|
/* allocate space in preparation */
|
1997-09-24 10:35:10 +02:00
|
|
|
ctypes = (Oid *) palloc(natts * sizeof(Oid));
|
1999-10-22 04:08:37 +02:00
|
|
|
query = (char *) palloc(100 + 16 * natts);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/*
|
|
|
|
* Construct query: INSERT INTO _relation_ VALUES ($1, ...)
|
|
|
|
*/
|
1999-10-22 04:08:37 +02:00
|
|
|
sprintf(query, "INSERT INTO %s VALUES (", relname);
|
1997-09-24 10:35:10 +02:00
|
|
|
for (i = 1; i <= natts; i++)
|
|
|
|
{
|
1999-10-22 04:08:37 +02:00
|
|
|
sprintf(query + strlen(query), "$%d%s",
|
1997-09-24 10:35:10 +02:00
|
|
|
i, (i < natts) ? ", " : ")");
|
|
|
|
ctypes[i - 1] = SPI_gettypeid(tupdesc, i);
|
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* Prepare plan for query */
|
1999-10-22 04:08:37 +02:00
|
|
|
pplan = SPI_prepare(query, natts, ctypes);
|
1997-09-24 10:35:10 +02:00
|
|
|
if (pplan == NULL)
|
2017-08-31 04:16:50 +02:00
|
|
|
elog(ERROR, "ttdummy (%s): SPI_prepare returned %s", relname, SPI_result_code_string(SPI_result));
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
if (SPI_keepplan(pplan))
|
|
|
|
elog(ERROR, "ttdummy (%s): SPI_keepplan failed", relname);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
splan = pplan;
|
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
ret = SPI_execp(splan, cvals, cnulls, 0);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
if (ret < 0)
|
1998-01-06 20:25:19 +01:00
|
|
|
elog(ERROR, "ttdummy (%s): SPI_execp returned %d", relname, ret);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* Tuple to return to upper Executor ... */
|
|
|
|
if (newtuple) /* UPDATE */
|
2016-11-08 21:36:36 +01:00
|
|
|
rettuple = SPI_modifytuple(rel, trigtuple, 1, &(attnum[1]), &newoff, NULL);
|
|
|
|
else /* DELETE */
|
1997-09-24 10:35:10 +02:00
|
|
|
rettuple = trigtuple;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
SPI_finish(); /* don't forget say Bye to SPI mgr */
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
pfree(relname);
|
|
|
|
|
2000-05-29 03:59:17 +02:00
|
|
|
return PointerGetDatum(rettuple);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
|
|
|
|
2000-11-20 21:36:57 +01:00
|
|
|
PG_FUNCTION_INFO_V1(set_ttdummy);
|
|
|
|
|
2000-06-13 09:35:40 +02:00
|
|
|
Datum
|
|
|
|
set_ttdummy(PG_FUNCTION_ARGS)
|
1997-09-24 10:35:10 +02:00
|
|
|
{
|
2000-06-13 09:35:40 +02:00
|
|
|
int32 on = PG_GETARG_INT32(0);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
if (ttoff) /* OFF currently */
|
|
|
|
{
|
|
|
|
if (on == 0)
|
2000-06-13 09:35:40 +02:00
|
|
|
PG_RETURN_INT32(0);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* turn ON */
|
|
|
|
ttoff = false;
|
2000-06-13 09:35:40 +02:00
|
|
|
PG_RETURN_INT32(0);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* ON currently */
|
|
|
|
if (on != 0)
|
2000-06-13 09:35:40 +02:00
|
|
|
PG_RETURN_INT32(1);
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1997-09-24 10:35:10 +02:00
|
|
|
/* turn OFF */
|
|
|
|
ttoff = true;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
2000-06-13 09:35:40 +02:00
|
|
|
PG_RETURN_INT32(1);
|
1997-09-24 10:35:10 +02:00
|
|
|
}
|
2002-08-22 02:01:51 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2018-02-27 21:15:35 +01:00
|
|
|
* Type int44 has no real-world use, but the regression tests use it
|
|
|
|
* (under the alias "city_budget"). It's a four-element vector of int4's.
|
2002-08-22 02:01:51 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2018-02-27 21:15:35 +01:00
|
|
|
* int44in - converts "num, num, ..." to internal form
|
2002-08-22 02:01:51 +02:00
|
|
|
*
|
|
|
|
* Note: Fills any missing positions with zeroes.
|
|
|
|
*/
|
2018-02-27 21:15:35 +01:00
|
|
|
PG_FUNCTION_INFO_V1(int44in);
|
2002-08-22 02:01:51 +02:00
|
|
|
|
|
|
|
Datum
|
2018-02-27 21:15:35 +01:00
|
|
|
int44in(PG_FUNCTION_ARGS)
|
2002-08-22 02:01:51 +02:00
|
|
|
{
|
|
|
|
char *input_string = PG_GETARG_CSTRING(0);
|
|
|
|
int32 *result = (int32 *) palloc(4 * sizeof(int32));
|
|
|
|
int i;
|
|
|
|
|
|
|
|
i = sscanf(input_string,
|
|
|
|
"%d, %d, %d, %d",
|
|
|
|
&result[0],
|
|
|
|
&result[1],
|
|
|
|
&result[2],
|
|
|
|
&result[3]);
|
|
|
|
while (i < 4)
|
|
|
|
result[i++] = 0;
|
|
|
|
|
|
|
|
PG_RETURN_POINTER(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-02-27 21:15:35 +01:00
|
|
|
* int44out - converts internal form to "num, num, ..."
|
2002-08-22 02:01:51 +02:00
|
|
|
*/
|
2018-02-27 21:15:35 +01:00
|
|
|
PG_FUNCTION_INFO_V1(int44out);
|
2002-08-22 02:01:51 +02:00
|
|
|
|
|
|
|
Datum
|
2018-02-27 21:15:35 +01:00
|
|
|
int44out(PG_FUNCTION_ARGS)
|
2002-08-22 02:01:51 +02:00
|
|
|
{
|
|
|
|
int32 *an_array = (int32 *) PG_GETARG_POINTER(0);
|
2018-02-27 18:13:14 +01:00
|
|
|
char *result = (char *) palloc(16 * 4);
|
|
|
|
|
|
|
|
snprintf(result, 16 * 4, "%d,%d,%d,%d",
|
|
|
|
an_array[0],
|
|
|
|
an_array[1],
|
|
|
|
an_array[2],
|
|
|
|
an_array[3]);
|
2002-08-22 02:01:51 +02:00
|
|
|
|
|
|
|
PG_RETURN_CSTRING(result);
|
|
|
|
}
|
2013-07-02 19:35:14 +02:00
|
|
|
|
Make canonicalize_path() more canonical.
Teach canonicalize_path() how to strip all unnecessary uses of "."
and "..", replacing the previous ad-hoc code that got rid of only
some such cases. In particular, we can always remove all such
uses from absolute paths.
The proximate reason to do this is that Windows rejects paths
involving ".." in some cases (in particular, you can't put one in a
symlink), so we ought to be sure we don't use ".." unnecessarily.
Moreover, it seems like good cleanup on general principles.
There is other path-munging code that could be simplified now, but
we'll leave that for followup work.
It is tempting to call this a bug fix and back-patch it. On the other
hand, the misbehavior can only be reached if a highly privileged user
does something dubious, so it's not unreasonable to say "so don't do
that". And this patch could result in unexpected behavioral changes,
in case anybody was expecting uses of ".." to stay put. So at least
for now, just put it in HEAD.
Shenhao Wang, editorialized a bit by me
Discussion: https://postgr.es/m/OSBPR01MB4214FA221FFE046F11F2AD74F2D49@OSBPR01MB4214.jpnprd01.prod.outlook.com
2022-01-31 18:05:37 +01:00
|
|
|
PG_FUNCTION_INFO_V1(test_canonicalize_path);
|
|
|
|
Datum
|
|
|
|
test_canonicalize_path(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
char *path = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
|
|
|
|
canonicalize_path(path);
|
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(path));
|
|
|
|
}
|
|
|
|
|
2013-07-02 19:35:14 +02:00
|
|
|
PG_FUNCTION_INFO_V1(make_tuple_indirect);
|
|
|
|
Datum
|
|
|
|
make_tuple_indirect(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
HeapTupleHeader rec = PG_GETARG_HEAPTUPLEHEADER(0);
|
|
|
|
HeapTupleData tuple;
|
|
|
|
int ncolumns;
|
|
|
|
Datum *values;
|
|
|
|
bool *nulls;
|
|
|
|
|
|
|
|
Oid tupType;
|
|
|
|
int32 tupTypmod;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
|
|
|
|
HeapTuple newtup;
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
MemoryContext old_context;
|
|
|
|
|
|
|
|
/* Extract type info from the tuple itself */
|
|
|
|
tupType = HeapTupleHeaderGetTypeId(rec);
|
|
|
|
tupTypmod = HeapTupleHeaderGetTypMod(rec);
|
|
|
|
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
|
|
|
|
ncolumns = tupdesc->natts;
|
|
|
|
|
|
|
|
/* Build a temporary HeapTuple control structure */
|
|
|
|
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
|
|
|
|
ItemPointerSetInvalid(&(tuple.t_self));
|
|
|
|
tuple.t_tableOid = InvalidOid;
|
|
|
|
tuple.t_data = rec;
|
|
|
|
|
|
|
|
values = (Datum *) palloc(ncolumns * sizeof(Datum));
|
|
|
|
nulls = (bool *) palloc(ncolumns * sizeof(bool));
|
|
|
|
|
|
|
|
heap_deform_tuple(&tuple, tupdesc, values, nulls);
|
|
|
|
|
|
|
|
old_context = MemoryContextSwitchTo(TopTransactionContext);
|
|
|
|
|
|
|
|
for (i = 0; i < ncolumns; i++)
|
|
|
|
{
|
|
|
|
struct varlena *attr;
|
|
|
|
struct varlena *new_attr;
|
|
|
|
struct varatt_indirect redirect_pointer;
|
|
|
|
|
|
|
|
/* only work on existing, not-null varlenas */
|
2017-08-20 20:19:07 +02:00
|
|
|
if (TupleDescAttr(tupdesc, i)->attisdropped ||
|
2013-07-02 19:35:14 +02:00
|
|
|
nulls[i] ||
|
2017-08-20 20:19:07 +02:00
|
|
|
TupleDescAttr(tupdesc, i)->attlen != -1)
|
2013-07-02 19:35:14 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
attr = (struct varlena *) DatumGetPointer(values[i]);
|
|
|
|
|
|
|
|
/* don't recursively indirect */
|
|
|
|
if (VARATT_IS_EXTERNAL_INDIRECT(attr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* copy datum, so it still lives later */
|
|
|
|
if (VARATT_IS_EXTERNAL_ONDISK(attr))
|
2019-10-04 20:24:46 +02:00
|
|
|
attr = detoast_external_attr(attr);
|
2013-07-02 19:35:14 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
struct varlena *oldattr = attr;
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2013-07-02 19:35:14 +02:00
|
|
|
attr = palloc0(VARSIZE_ANY(oldattr));
|
|
|
|
memcpy(attr, oldattr, VARSIZE_ANY(oldattr));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* build indirection Datum */
|
|
|
|
new_attr = (struct varlena *) palloc0(INDIRECT_POINTER_SIZE);
|
|
|
|
redirect_pointer.pointer = attr;
|
|
|
|
SET_VARTAG_EXTERNAL(new_attr, VARTAG_INDIRECT);
|
|
|
|
memcpy(VARDATA_EXTERNAL(new_attr), &redirect_pointer,
|
|
|
|
sizeof(redirect_pointer));
|
|
|
|
|
|
|
|
values[i] = PointerGetDatum(new_attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
newtup = heap_form_tuple(tupdesc, values, nulls);
|
|
|
|
pfree(values);
|
|
|
|
pfree(nulls);
|
|
|
|
ReleaseTupleDesc(tupdesc);
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(old_context);
|
|
|
|
|
Fix failure to detoast fields in composite elements of structured types.
If we have an array of records stored on disk, the individual record fields
cannot contain out-of-line TOAST pointers: the tuptoaster.c mechanisms are
only prepared to deal with TOAST pointers appearing in top-level fields of
a stored row. The same applies for ranges over composite types, nested
composites, etc. However, the existing code only took care of expanding
sub-field TOAST pointers for the case of nested composites, not for other
structured types containing composites. For example, given a command such
as
UPDATE tab SET arraycol = ARRAY[(ROW(x,42)::mycompositetype] ...
where x is a direct reference to a field of an on-disk tuple, if that field
is long enough to be toasted out-of-line then the TOAST pointer would be
inserted as-is into the array column. If the source record for x is later
deleted, the array field value would become a dangling pointer, leading
to errors along the line of "missing chunk number 0 for toast value ..."
when the value is referenced. A reproducible test case for this was
provided by Jan Pecek, but it seems likely that some of the "missing chunk
number" reports we've heard in the past were caused by similar issues.
Code-wise, the problem is that PG_DETOAST_DATUM() is not adequate to
produce a self-contained Datum value if the Datum is of composite type.
Seen in this light, the problem is not just confined to arrays and ranges,
but could also affect some other places where detoasting is done in that
way, for example form_index_tuple().
I tried teaching the array code to apply toast_flatten_tuple_attribute()
along with PG_DETOAST_DATUM() when the array element type is composite,
but this was messy and imposed extra cache lookup costs whether or not any
TOAST pointers were present, indeed sometimes when the array element type
isn't even composite (since sometimes it takes a typcache lookup to find
that out). The idea of extending that approach to all the places that
currently use PG_DETOAST_DATUM() wasn't attractive at all.
This patch instead solves the problem by decreeing that composite Datum
values must not contain any out-of-line TOAST pointers in the first place;
that is, we expand out-of-line fields at the point of constructing a
composite Datum, not at the point where we're about to insert it into a
larger tuple. This rule is applied only to true composite Datums, not
to tuples that are being passed around the system as tuples, so it's not
as invasive as it might sound at first. With this approach, the amount
of code that has to be touched for a full solution is greatly reduced,
and added cache lookup costs are avoided except when there actually is
a TOAST pointer that needs to be inlined.
The main drawback of this approach is that we might sometimes dereference
a TOAST pointer that will never actually be used by the query, imposing a
rather large cost that wasn't there before. On the other side of the coin,
if the field value is used multiple times then we'll come out ahead by
avoiding repeat detoastings. Experimentation suggests that common SQL
coding patterns are unaffected either way, though. Applications that are
very negatively affected could be advised to modify their code to not fetch
columns they won't be using.
In future, we might consider reverting this solution in favor of detoasting
only at the point where data is about to be stored to disk, using some
method that can drill down into multiple levels of nested structured types.
That will require defining new APIs for structured types, though, so it
doesn't seem feasible as a back-patchable fix.
Note that this patch changes HeapTupleGetDatum() from a macro to a function
call; this means that any third-party code using that macro will not get
protection against creating TOAST-pointer-containing Datums until it's
recompiled. The same applies to any uses of PG_RETURN_HEAPTUPLEHEADER().
It seems likely that this is not a big problem in practice: most of the
tuple-returning functions in core and contrib produce outputs that could
not possibly be toasted anyway, and the same probably holds for third-party
extensions.
This bug has existed since TOAST was invented, so back-patch to all
supported branches.
2014-05-01 21:19:06 +02:00
|
|
|
/*
|
|
|
|
* We intentionally don't use PG_RETURN_HEAPTUPLEHEADER here, because that
|
|
|
|
* would cause the indirect toast pointers to be flattened out of the
|
|
|
|
* tuple immediately, rendering subsequent testing irrelevant. So just
|
|
|
|
* return the HeapTupleHeader pointer as-is. This violates the general
|
|
|
|
* rule that composite Datums shouldn't contain toast pointers, but so
|
|
|
|
* long as the regression test scripts don't insert the result of this
|
|
|
|
* function into a container type (record, array, etc) it should be OK.
|
|
|
|
*/
|
|
|
|
PG_RETURN_POINTER(newtup->t_data);
|
2013-07-02 19:35:14 +02:00
|
|
|
}
|
2014-07-22 17:01:03 +02:00
|
|
|
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
2020-12-30 18:55:59 +01:00
|
|
|
PG_FUNCTION_INFO_V1(regress_setenv);
|
2014-07-22 17:01:03 +02:00
|
|
|
|
|
|
|
Datum
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
2020-12-30 18:55:59 +01:00
|
|
|
regress_setenv(PG_FUNCTION_ARGS)
|
2014-07-22 17:01:03 +02:00
|
|
|
{
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
2020-12-30 18:55:59 +01:00
|
|
|
char *envvar = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
char *envval = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2014-07-22 17:01:03 +02:00
|
|
|
|
|
|
|
if (!superuser())
|
|
|
|
elog(ERROR, "must be superuser to change environment variables");
|
|
|
|
|
Use setenv() in preference to putenv().
Since at least 2001 we've used putenv() and avoided setenv(), on the
grounds that the latter was unportable and not in POSIX. However,
POSIX added it that same year, and by now the situation has reversed:
setenv() is probably more portable than putenv(), since POSIX now
treats the latter as not being a core function. And setenv() has
cleaner semantics too. So, let's reverse that old policy.
This commit adds a simple src/port/ implementation of setenv() for
any stragglers (we have one in the buildfarm, but I'd not be surprised
if that code is never used in the field). More importantly, extend
win32env.c to also support setenv(). Then, replace usages of putenv()
with setenv(), and get rid of some ad-hoc implementations of setenv()
wannabees.
Also, adjust our src/port/ implementation of unsetenv() to follow the
POSIX spec that it returns an error indicator, rather than returning
void as per the ancient BSD convention. I don't feel a need to make
all the call sites check for errors, but the portability stub ought
to match real-world practice.
Discussion: https://postgr.es/m/2065122.1609212051@sss.pgh.pa.us
2020-12-30 18:55:59 +01:00
|
|
|
if (setenv(envvar, envval, 1) != 0)
|
2014-07-22 17:01:03 +02:00
|
|
|
elog(ERROR, "could not set environment variable: %m");
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sleep until no process has a given PID. */
|
|
|
|
PG_FUNCTION_INFO_V1(wait_pid);
|
|
|
|
|
|
|
|
Datum
|
|
|
|
wait_pid(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
int pid = PG_GETARG_INT32(0);
|
|
|
|
|
|
|
|
if (!superuser())
|
|
|
|
elog(ERROR, "must be superuser to check PID liveness");
|
|
|
|
|
|
|
|
while (kill(pid, 0) == 0)
|
2015-03-07 06:47:38 +01:00
|
|
|
{
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
2014-07-22 17:01:03 +02:00
|
|
|
pg_usleep(50000);
|
2015-03-07 06:47:38 +01:00
|
|
|
}
|
2014-07-22 17:01:03 +02:00
|
|
|
|
|
|
|
if (errno != ESRCH)
|
|
|
|
elog(ERROR, "could not check PID %d liveness: %m", pid);
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
static void
|
|
|
|
test_atomic_flag(void)
|
|
|
|
{
|
|
|
|
pg_atomic_flag flag;
|
|
|
|
|
|
|
|
pg_atomic_init_flag(&flag);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(pg_atomic_unlocked_test_flag(&flag));
|
|
|
|
EXPECT_TRUE(pg_atomic_test_set_flag(&flag));
|
|
|
|
EXPECT_TRUE(!pg_atomic_unlocked_test_flag(&flag));
|
|
|
|
EXPECT_TRUE(!pg_atomic_test_set_flag(&flag));
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_clear_flag(&flag);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(pg_atomic_unlocked_test_flag(&flag));
|
|
|
|
EXPECT_TRUE(pg_atomic_test_set_flag(&flag));
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_clear_flag(&flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_atomic_uint32(void)
|
|
|
|
{
|
|
|
|
pg_atomic_uint32 var;
|
|
|
|
uint32 expected;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pg_atomic_init_u32(&var, 0);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), 0);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_write_u32(&var, 3);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), 3);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, pg_atomic_read_u32(&var) - 2),
|
|
|
|
3);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&var, 1), 4);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_sub_fetch_u32(&var, 3), 0);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_add_fetch_u32(&var, 10), 10);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_exchange_u32(&var, 5), 10);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_exchange_u32(&var, 0), 5);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
/* test around numerical limits */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, INT_MAX), 0);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, INT_MAX), INT_MAX);
|
2019-09-14 04:33:30 +02:00
|
|
|
pg_atomic_fetch_add_u32(&var, 2); /* wrap to 0 */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MAX), 0);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MAX + 1),
|
|
|
|
PG_INT16_MAX);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MIN),
|
|
|
|
2 * PG_INT16_MAX + 1);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&var, PG_INT16_MIN - 1),
|
|
|
|
PG_INT16_MAX);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_fetch_add_u32(&var, 1); /* top up to UINT_MAX */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), UINT_MAX);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&var, INT_MAX), UINT_MAX);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), (uint32) INT_MAX + 1);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_sub_fetch_u32(&var, INT_MAX), 1);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_sub_fetch_u32(&var, 1);
|
2020-10-12 06:31:37 +02:00
|
|
|
expected = PG_INT16_MAX;
|
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
|
|
|
|
expected = PG_INT16_MAX + 1;
|
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
|
|
|
|
expected = PG_INT16_MIN;
|
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
|
|
|
|
expected = PG_INT16_MIN - 1;
|
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
/* fail exchange because of old expected */
|
|
|
|
expected = 10;
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1));
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
/* CAS is allowed to fail due to interrupts, try a couple of times */
|
|
|
|
for (i = 0; i < 1000; i++)
|
|
|
|
{
|
|
|
|
expected = 0;
|
|
|
|
if (!pg_atomic_compare_exchange_u32(&var, &expected, 1))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == 1000)
|
|
|
|
elog(ERROR, "atomic_compare_exchange_u32() never succeeded");
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), 1);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_write_u32(&var, 0);
|
|
|
|
|
|
|
|
/* try setting flagbits */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(!(pg_atomic_fetch_or_u32(&var, 1) & 1));
|
|
|
|
EXPECT_TRUE(pg_atomic_fetch_or_u32(&var, 2) & 1);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&var), 3);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
/* try clearing flagbits */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~2) & 3, 3);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~1), 1);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
/* no bits set anymore */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_and_u32(&var, ~0), 0);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
test_atomic_uint64(void)
|
|
|
|
{
|
|
|
|
pg_atomic_uint64 var;
|
|
|
|
uint64 expected;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pg_atomic_init_u64(&var, 0);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U64(pg_atomic_read_u64(&var), 0);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
pg_atomic_write_u64(&var, 3);
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U64(pg_atomic_read_u64(&var), 3);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&var, pg_atomic_read_u64(&var) - 2),
|
|
|
|
3);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&var, 1), 4);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_sub_fetch_u64(&var, 3), 0);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_add_fetch_u64(&var, 10), 10);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_exchange_u64(&var, 5), 10);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_exchange_u64(&var, 0), 5);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
/* fail exchange because of old expected */
|
|
|
|
expected = 10;
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(!pg_atomic_compare_exchange_u64(&var, &expected, 1));
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
/* CAS is allowed to fail due to interrupts, try a couple of times */
|
|
|
|
for (i = 0; i < 100; i++)
|
|
|
|
{
|
|
|
|
expected = 0;
|
|
|
|
if (!pg_atomic_compare_exchange_u64(&var, &expected, 1))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == 100)
|
|
|
|
elog(ERROR, "atomic_compare_exchange_u64() never succeeded");
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U64(pg_atomic_read_u64(&var), 1);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
|
|
|
pg_atomic_write_u64(&var, 0);
|
|
|
|
|
|
|
|
/* try setting flagbits */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_TRUE(!(pg_atomic_fetch_or_u64(&var, 1) & 1));
|
|
|
|
EXPECT_TRUE(pg_atomic_fetch_or_u64(&var, 2) & 1);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_read_u64(&var), 3);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
/* try clearing flagbits */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U64((pg_atomic_fetch_and_u64(&var, ~2) & 3), 3);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~1), 1);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
/* no bits set anymore */
|
2019-10-05 19:05:05 +02:00
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
}
|
|
|
|
|
2020-06-09 01:36:51 +02:00
|
|
|
/*
|
|
|
|
* Perform, fairly minimal, testing of the spinlock implementation.
|
|
|
|
*
|
|
|
|
* It's likely worth expanding these to actually test concurrency etc, but
|
|
|
|
* having some regularly run tests is better than none.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
test_spinlock(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Basic tests for spinlocks, as well as the underlying operations.
|
|
|
|
*
|
|
|
|
* We embed the spinlock in a struct with other members to test that the
|
|
|
|
* spinlock operations don't perform too wide writes.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct test_lock_struct
|
|
|
|
{
|
|
|
|
char data_before[4];
|
|
|
|
slock_t lock;
|
|
|
|
char data_after[4];
|
|
|
|
} struct_w_lock;
|
|
|
|
|
|
|
|
memcpy(struct_w_lock.data_before, "abcd", 4);
|
|
|
|
memcpy(struct_w_lock.data_after, "ef12", 4);
|
|
|
|
|
|
|
|
/* test basic operations via the SpinLock* API */
|
|
|
|
SpinLockInit(&struct_w_lock.lock);
|
|
|
|
SpinLockAcquire(&struct_w_lock.lock);
|
|
|
|
SpinLockRelease(&struct_w_lock.lock);
|
|
|
|
|
|
|
|
/* test basic operations via underlying S_* API */
|
|
|
|
S_INIT_LOCK(&struct_w_lock.lock);
|
|
|
|
S_LOCK(&struct_w_lock.lock);
|
|
|
|
S_UNLOCK(&struct_w_lock.lock);
|
|
|
|
|
|
|
|
/* and that "contended" acquisition works */
|
|
|
|
s_lock(&struct_w_lock.lock, "testfile", 17, "testfunc");
|
|
|
|
S_UNLOCK(&struct_w_lock.lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check, using TAS directly, that a single spin cycle doesn't block
|
|
|
|
* when acquiring an already acquired lock.
|
|
|
|
*/
|
|
|
|
#ifdef TAS
|
|
|
|
S_LOCK(&struct_w_lock.lock);
|
|
|
|
|
|
|
|
if (!TAS(&struct_w_lock.lock))
|
|
|
|
elog(ERROR, "acquired already held spinlock");
|
|
|
|
|
|
|
|
#ifdef TAS_SPIN
|
|
|
|
if (!TAS_SPIN(&struct_w_lock.lock))
|
|
|
|
elog(ERROR, "acquired already held spinlock");
|
|
|
|
#endif /* defined(TAS_SPIN) */
|
|
|
|
|
|
|
|
S_UNLOCK(&struct_w_lock.lock);
|
|
|
|
#endif /* defined(TAS) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that after all of this the non-lock contents are still
|
|
|
|
* correct.
|
|
|
|
*/
|
|
|
|
if (memcmp(struct_w_lock.data_before, "abcd", 4) != 0)
|
|
|
|
elog(ERROR, "padding before spinlock modified");
|
|
|
|
if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0)
|
|
|
|
elog(ERROR, "padding after spinlock modified");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that allocating more than INT32_MAX emulated spinlocks works.
|
|
|
|
* That's interesting because the spinlock emulation uses a 32bit integer
|
|
|
|
* to map spinlocks onto semaphores. There've been bugs...
|
|
|
|
*/
|
|
|
|
#ifndef HAVE_SPINLOCKS
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Initialize enough spinlocks to advance counter close to wraparound.
|
|
|
|
* It's too expensive to perform acquire/release for each, as those
|
|
|
|
* may be syscalls when the spinlock emulation is used (and even just
|
|
|
|
* atomic TAS would be expensive).
|
|
|
|
*/
|
|
|
|
for (uint32 i = 0; i < INT32_MAX - 100000; i++)
|
|
|
|
{
|
|
|
|
slock_t lock;
|
|
|
|
|
|
|
|
SpinLockInit(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32 i = 0; i < 200000; i++)
|
|
|
|
{
|
|
|
|
slock_t lock;
|
|
|
|
|
|
|
|
SpinLockInit(&lock);
|
|
|
|
|
|
|
|
SpinLockAcquire(&lock);
|
|
|
|
SpinLockRelease(&lock);
|
|
|
|
SpinLockAcquire(&lock);
|
|
|
|
SpinLockRelease(&lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
|
2020-06-09 01:50:37 +02:00
|
|
|
/*
|
|
|
|
* Verify that performing atomic ops inside a spinlock isn't a
|
|
|
|
* problem. Realistically that's only going to be a problem when both
|
|
|
|
* --disable-spinlocks and --disable-atomics are used, but it's cheap enough
|
|
|
|
* to just always test.
|
|
|
|
*
|
|
|
|
* The test works by initializing enough atomics that we'd conflict if there
|
|
|
|
* were an overlap between a spinlock and an atomic by holding a spinlock
|
|
|
|
* while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
|
|
|
|
*
|
|
|
|
* NUM_TEST_ATOMICS doesn't really need to be more than
|
|
|
|
* NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
|
|
|
|
* extensively.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
test_atomic_spin_nest(void)
|
|
|
|
{
|
|
|
|
slock_t lock;
|
|
|
|
#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
|
|
|
|
pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS];
|
|
|
|
pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS];
|
|
|
|
|
|
|
|
SpinLockInit(&lock);
|
|
|
|
|
|
|
|
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
|
|
|
|
{
|
|
|
|
pg_atomic_init_u32(&atomics32[i], 0);
|
|
|
|
pg_atomic_init_u64(&atomics64[i], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* just so it's not all zeroes */
|
|
|
|
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
|
|
|
|
{
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i], i), 0);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i], i), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* test whether we can do atomic op with lock held */
|
|
|
|
SpinLockAcquire(&lock);
|
|
|
|
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
|
|
|
|
{
|
|
|
|
EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i], i), i);
|
|
|
|
EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]), 0);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i], i), i);
|
|
|
|
EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]), 0);
|
|
|
|
}
|
|
|
|
SpinLockRelease(&lock);
|
|
|
|
}
|
|
|
|
#undef NUM_TEST_ATOMICS
|
|
|
|
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
PG_FUNCTION_INFO_V1(test_atomic_ops);
|
|
|
|
Datum
|
|
|
|
test_atomic_ops(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
test_atomic_flag();
|
|
|
|
|
|
|
|
test_atomic_uint32();
|
|
|
|
|
|
|
|
test_atomic_uint64();
|
|
|
|
|
2020-06-09 01:36:51 +02:00
|
|
|
/*
|
|
|
|
* Arguably this shouldn't be tested as part of this function, but it's
|
|
|
|
* closely enough related that that seems ok for now.
|
|
|
|
*/
|
|
|
|
test_spinlock();
|
|
|
|
|
2020-06-09 01:50:37 +02:00
|
|
|
test_atomic_spin_nest();
|
|
|
|
|
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
2014-09-25 23:49:05 +02:00
|
|
|
PG_RETURN_BOOL(true);
|
|
|
|
}
|
2017-09-15 14:07:22 +02:00
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(test_fdw_handler);
|
|
|
|
Datum
|
|
|
|
test_fdw_handler(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2018-02-27 18:13:14 +01:00
|
|
|
elog(ERROR, "test_fdw_handler is not implemented");
|
2017-09-15 14:07:22 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
}
|
2019-02-10 00:32:23 +01:00
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(test_support_func);
|
|
|
|
Datum
|
|
|
|
test_support_func(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Node *rawreq = (Node *) PG_GETARG_POINTER(0);
|
|
|
|
Node *ret = NULL;
|
|
|
|
|
|
|
|
if (IsA(rawreq, SupportRequestSelectivity))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Assume that the target is int4eq; that's safe as long as we don't
|
|
|
|
* attach this to any other boolean-returning function.
|
|
|
|
*/
|
|
|
|
SupportRequestSelectivity *req = (SupportRequestSelectivity *) rawreq;
|
|
|
|
Selectivity s1;
|
|
|
|
|
|
|
|
if (req->is_join)
|
|
|
|
s1 = join_selectivity(req->root, Int4EqualOperator,
|
|
|
|
req->args,
|
|
|
|
req->inputcollid,
|
|
|
|
req->jointype,
|
|
|
|
req->sjinfo);
|
|
|
|
else
|
|
|
|
s1 = restriction_selectivity(req->root, Int4EqualOperator,
|
|
|
|
req->args,
|
|
|
|
req->inputcollid,
|
|
|
|
req->varRelid);
|
|
|
|
|
|
|
|
req->selectivity = s1;
|
|
|
|
ret = (Node *) req;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsA(rawreq, SupportRequestCost))
|
|
|
|
{
|
|
|
|
/* Provide some generic estimate */
|
|
|
|
SupportRequestCost *req = (SupportRequestCost *) rawreq;
|
|
|
|
|
|
|
|
req->startup = 0;
|
|
|
|
req->per_tuple = 2 * cpu_operator_cost;
|
|
|
|
ret = (Node *) req;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsA(rawreq, SupportRequestRows))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Assume that the target is generate_series_int4; that's safe as long
|
|
|
|
* as we don't attach this to any other set-returning function.
|
|
|
|
*/
|
|
|
|
SupportRequestRows *req = (SupportRequestRows *) rawreq;
|
|
|
|
|
|
|
|
if (req->node && IsA(req->node, FuncExpr)) /* be paranoid */
|
|
|
|
{
|
|
|
|
List *args = ((FuncExpr *) req->node)->args;
|
|
|
|
Node *arg1 = linitial(args);
|
|
|
|
Node *arg2 = lsecond(args);
|
|
|
|
|
|
|
|
if (IsA(arg1, Const) &&
|
|
|
|
!((Const *) arg1)->constisnull &&
|
|
|
|
IsA(arg2, Const) &&
|
|
|
|
!((Const *) arg2)->constisnull)
|
|
|
|
{
|
|
|
|
int32 val1 = DatumGetInt32(((Const *) arg1)->constvalue);
|
|
|
|
int32 val2 = DatumGetInt32(((Const *) arg2)->constvalue);
|
|
|
|
|
|
|
|
req->rows = val2 - val1 + 1;
|
|
|
|
ret = (Node *) req;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_POINTER(ret);
|
|
|
|
}
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(test_opclass_options_func);
|
|
|
|
Datum
|
|
|
|
test_opclass_options_func(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
}
|
2021-04-01 10:45:22 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Call an encoding conversion or verification function.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* string bytea -- string to convert
|
|
|
|
* src_enc name -- source encoding
|
|
|
|
* dest_enc name -- destination encoding
|
|
|
|
* noError bool -- if set, don't ereport() on invalid or untranslatable
|
|
|
|
* input
|
|
|
|
*
|
|
|
|
* Result is a tuple with two attributes:
|
|
|
|
* int4 -- number of input bytes successfully converted
|
|
|
|
* bytea -- converted string
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(test_enc_conversion);
|
|
|
|
Datum
|
|
|
|
test_enc_conversion(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
bytea *string = PG_GETARG_BYTEA_PP(0);
|
|
|
|
char *src_encoding_name = NameStr(*PG_GETARG_NAME(1));
|
|
|
|
int src_encoding = pg_char_to_encoding(src_encoding_name);
|
|
|
|
char *dest_encoding_name = NameStr(*PG_GETARG_NAME(2));
|
|
|
|
int dest_encoding = pg_char_to_encoding(dest_encoding_name);
|
|
|
|
bool noError = PG_GETARG_BOOL(3);
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
char *src;
|
|
|
|
char *dst;
|
|
|
|
bytea *retval;
|
|
|
|
Size srclen;
|
|
|
|
Size dstsize;
|
|
|
|
Oid proc;
|
|
|
|
int convertedbytes;
|
|
|
|
int dstlen;
|
|
|
|
Datum values[2];
|
2022-07-16 08:42:15 +02:00
|
|
|
bool nulls[2] = {0};
|
2021-04-01 10:45:22 +02:00
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
if (src_encoding < 0)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid source encoding name \"%s\"",
|
|
|
|
src_encoding_name)));
|
|
|
|
if (dest_encoding < 0)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid destination encoding name \"%s\"",
|
|
|
|
dest_encoding_name)));
|
|
|
|
|
|
|
|
/* Build a tuple descriptor for our result type */
|
|
|
|
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
|
|
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
tupdesc = BlessTupleDesc(tupdesc);
|
|
|
|
|
|
|
|
srclen = VARSIZE_ANY_EXHDR(string);
|
|
|
|
src = VARDATA_ANY(string);
|
|
|
|
|
|
|
|
if (src_encoding == dest_encoding)
|
|
|
|
{
|
|
|
|
/* just check that the source string is valid */
|
|
|
|
int oklen;
|
|
|
|
|
|
|
|
oklen = pg_encoding_verifymbstr(src_encoding, src, srclen);
|
|
|
|
|
|
|
|
if (oklen == srclen)
|
|
|
|
{
|
|
|
|
convertedbytes = oklen;
|
|
|
|
retval = string;
|
|
|
|
}
|
|
|
|
else if (!noError)
|
|
|
|
{
|
|
|
|
report_invalid_encoding(src_encoding, src + oklen, srclen - oklen);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* build bytea data type structure.
|
|
|
|
*/
|
|
|
|
Assert(oklen < srclen);
|
|
|
|
convertedbytes = oklen;
|
|
|
|
retval = (bytea *) palloc(oklen + VARHDRSZ);
|
|
|
|
SET_VARSIZE(retval, oklen + VARHDRSZ);
|
|
|
|
memcpy(VARDATA(retval), src, oklen);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
proc = FindDefaultConversionProc(src_encoding, dest_encoding);
|
|
|
|
if (!OidIsValid(proc))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
|
|
|
errmsg("default conversion function for encoding \"%s\" to \"%s\" does not exist",
|
|
|
|
pg_encoding_to_char(src_encoding),
|
|
|
|
pg_encoding_to_char(dest_encoding))));
|
|
|
|
|
|
|
|
if (srclen >= (MaxAllocSize / (Size) MAX_CONVERSION_GROWTH))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
|
|
|
errmsg("out of memory"),
|
|
|
|
errdetail("String of %d bytes is too long for encoding conversion.",
|
|
|
|
(int) srclen)));
|
|
|
|
|
|
|
|
dstsize = (Size) srclen * MAX_CONVERSION_GROWTH + 1;
|
|
|
|
dst = MemoryContextAlloc(CurrentMemoryContext, dstsize);
|
|
|
|
|
|
|
|
/* perform conversion */
|
|
|
|
convertedbytes = pg_do_encoding_conversion_buf(proc,
|
|
|
|
src_encoding,
|
|
|
|
dest_encoding,
|
|
|
|
(unsigned char *) src, srclen,
|
|
|
|
(unsigned char *) dst, dstsize,
|
|
|
|
noError);
|
|
|
|
dstlen = strlen(dst);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* build bytea data type structure.
|
|
|
|
*/
|
|
|
|
retval = (bytea *) palloc(dstlen + VARHDRSZ);
|
|
|
|
SET_VARSIZE(retval, dstlen + VARHDRSZ);
|
|
|
|
memcpy(VARDATA(retval), dst, dstlen);
|
|
|
|
|
|
|
|
pfree(dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
values[0] = Int32GetDatum(convertedbytes);
|
|
|
|
values[1] = PointerGetDatum(retval);
|
|
|
|
tuple = heap_form_tuple(tupdesc, values, nulls);
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
|
|
|
|
}
|
2021-05-11 20:28:11 +02:00
|
|
|
|
|
|
|
/* Provide SQL access to IsBinaryCoercible() */
|
|
|
|
PG_FUNCTION_INFO_V1(binary_coercible);
|
|
|
|
Datum
|
|
|
|
binary_coercible(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid srctype = PG_GETARG_OID(0);
|
|
|
|
Oid targettype = PG_GETARG_OID(1);
|
|
|
|
|
|
|
|
PG_RETURN_BOOL(IsBinaryCoercible(srctype, targettype));
|
|
|
|
}
|
2022-04-07 06:09:25 +02:00
|
|
|
|
|
|
|
/*
|
2022-04-23 05:20:11 +02:00
|
|
|
* Return the length of the portion of a tuple consisting of the given array
|
|
|
|
* of data types. The input data types must be fixed-length data types.
|
2022-04-07 06:09:25 +02:00
|
|
|
*/
|
2022-04-23 05:20:11 +02:00
|
|
|
PG_FUNCTION_INFO_V1(get_columns_length);
|
2022-04-07 06:09:25 +02:00
|
|
|
Datum
|
2022-04-23 05:20:11 +02:00
|
|
|
get_columns_length(PG_FUNCTION_ARGS)
|
2022-04-07 06:09:25 +02:00
|
|
|
{
|
|
|
|
ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
|
|
|
|
Oid *type_oids;
|
|
|
|
int ntypes;
|
|
|
|
int column_offset = 0;
|
|
|
|
|
|
|
|
if (ARR_HASNULL(ta) && array_contains_nulls(ta))
|
|
|
|
elog(ERROR, "argument must not contain nulls");
|
|
|
|
|
|
|
|
if (ARR_NDIM(ta) > 1)
|
|
|
|
elog(ERROR, "argument must be empty or one-dimensional array");
|
|
|
|
|
|
|
|
type_oids = (Oid *) ARR_DATA_PTR(ta);
|
|
|
|
ntypes = ArrayGetNItems(ARR_NDIM(ta), ARR_DIMS(ta));
|
|
|
|
for (int i = 0; i < ntypes; i++)
|
|
|
|
{
|
|
|
|
Oid typeoid = type_oids[i];
|
|
|
|
int16 typlen;
|
|
|
|
bool typbyval;
|
|
|
|
char typalign;
|
|
|
|
|
|
|
|
get_typlenbyvalalign(typeoid, &typlen, &typbyval, &typalign);
|
|
|
|
|
|
|
|
/* the data type must be fixed-length */
|
2022-04-23 05:20:11 +02:00
|
|
|
if (typlen < 0)
|
2022-04-07 06:09:25 +02:00
|
|
|
elog(ERROR, "type %u is not fixed-length data type", typeoid);
|
|
|
|
|
2022-04-23 05:20:11 +02:00
|
|
|
column_offset = att_align_nominal(column_offset + typlen, typalign);
|
2022-04-07 06:09:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_INT32(column_offset);
|
|
|
|
}
|