Introduce atomic read/write functions with full barrier semantics.

Writing correct code using atomic variables is often difficult due
to the memory barrier semantics (or lack thereof) of the underlying
operations.  This commit introduces atomic read/write functions
with full barrier semantics to ease this cognitive load.  For
example, some spinlocks protect a single value, and these new
functions make it easy to convert the value to an atomic variable
(thus eliminating the need for the spinlock) without modifying the
barrier semantics previously provided by the spinlock.  Since these
functions may be less performant than the other atomic reads and
writes, they are not suitable for every use-case.  However, using a
single atomic operation with full barrier semantics may be more
performant in cases where a separate explicit barrier would
otherwise be required.

The base implementations for these new functions are atomic
exchanges (for writes) and atomic fetch/adds with 0 (for reads).
These implementations can be overwritten with better architecture-
specific versions as they are discovered.

This commit leaves converting existing code to use these new
functions as a future exercise.

Reviewed-by: Andres Freund, Yong Li, Jeff Davis
Discussion: https://postgr.es/m/20231110205128.GB1315705%40nathanxps13
This commit is contained in:
Nathan Bossart 2024-02-29 10:00:44 -06:00
parent 5f2e179bd3
commit bd5132db55
2 changed files with 94 additions and 0 deletions

View File

@ -237,6 +237,26 @@ pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
return pg_atomic_read_u32_impl(ptr);
}
/*
* pg_atomic_read_membarrier_u32 - read with barrier semantics.
*
* This read is guaranteed to return the current value, provided that the value
* is only ever updated via operations with barrier semantics, such as
* pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
* While this may be less performant than pg_atomic_read_u32(), it may be
* easier to reason about correctness with this function in less performance-
* sensitive code.
*
* Full barrier semantics.
*/
static inline uint32
pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
{
AssertPointerAlignment(ptr, 4);
return pg_atomic_read_membarrier_u32_impl(ptr);
}
/*
* pg_atomic_write_u32 - write to atomic variable.
*
@ -274,6 +294,26 @@ pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
pg_atomic_unlocked_write_u32_impl(ptr, val);
}
/*
* pg_atomic_write_membarrier_u32 - write with barrier semantics.
*
* The write is guaranteed to succeed as a whole, i.e., it's not possible to
* observe a partial write for any reader. Note that this correctly interacts
* with both pg_atomic_compare_exchange_u32() and
* pg_atomic_read_membarrier_u32(). While this may be less performant than
* pg_atomic_write_u32(), it may be easier to reason about correctness with
* this function in less performance-sensitive code.
*
* Full barrier semantics.
*/
static inline void
pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
{
AssertPointerAlignment(ptr, 4);
pg_atomic_write_membarrier_u32_impl(ptr, val);
}
/*
* pg_atomic_exchange_u32 - exchange newval with current value
*
@ -427,6 +467,15 @@ pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
return pg_atomic_read_u64_impl(ptr);
}
static inline uint64
pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_read_membarrier_u64_impl(ptr);
}
static inline void
pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
{
@ -436,6 +485,15 @@ pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
pg_atomic_write_u64_impl(ptr, val);
}
static inline void
pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
pg_atomic_write_membarrier_u64_impl(ptr, val);
}
static inline uint64
pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
{

View File

@ -243,6 +243,24 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
}
#endif
#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U32
static inline uint32
pg_atomic_read_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr)
{
return pg_atomic_fetch_add_u32_impl(ptr, 0);
}
#endif
#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32
static inline void
pg_atomic_write_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
{
(void) pg_atomic_exchange_u32_impl(ptr, val);
}
#endif
#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_EXCHANGE_U64
static inline uint64
@ -399,3 +417,21 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
}
#endif
#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U64
static inline uint64
pg_atomic_read_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr)
{
return pg_atomic_fetch_add_u64_impl(ptr, 0);
}
#endif
#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64
static inline void
pg_atomic_write_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
{
(void) pg_atomic_exchange_u64_impl(ptr, val);
}
#endif