1997-09-18 16:21:02 +02:00
/*-------------------------------------------------------------------------
*
1999-02-14 00:22:53 +01:00
* s_lock . h
2001-09-29 06:02:27 +02:00
* Hardware - dependent implementation of spinlocks .
1997-09-18 16:21:02 +02:00
*
2001-09-29 06:02:27 +02:00
* NOTE : none of the macros in this file are intended to be called directly .
* Call them through the hardware - independent macros in spin . h .
1997-09-18 16:21:02 +02:00
*
2001-09-29 06:02:27 +02:00
* The following hardware - dependent macros must be provided for each
* supported platform :
2000-12-29 22:31:21 +01:00
*
* void S_INIT_LOCK ( slock_t * lock )
* Initialize a spinlock ( to the unlocked state ) .
*
2012-06-26 22:02:55 +02:00
* int S_LOCK ( slock_t * lock )
2000-12-29 22:31:21 +01:00
* Acquire a spinlock , waiting if necessary .
* Time out and abort ( ) if unable to acquire the lock in a
* " reasonable " amount of time - - - typically ~ 1 minute .
2012-06-26 22:02:55 +02:00
* Should return number of " delays " ; see s_lock . c
1998-04-29 14:41:29 +02:00
*
2000-12-29 22:31:21 +01:00
* void S_UNLOCK ( slock_t * lock )
* Unlock a previously acquired lock .
1998-04-29 14:41:29 +02:00
*
2000-12-29 22:31:21 +01:00
* bool S_LOCK_FREE ( slock_t * lock )
* Tests if the lock is free . Returns TRUE if free , FALSE if locked .
* This does * not * change the state of the lock .
1998-04-29 14:41:29 +02:00
*
2003-12-27 21:58:58 +01:00
* void SPIN_DELAY ( void )
* Delay operation to occur inside spinlock wait loop .
*
2001-09-29 06:02:27 +02:00
* Note to implementors : there are default implementations for all these
* macros at the bottom of the file . Check if your platform can use
* these or needs to override them .
*
2011-08-29 16:05:48 +02:00
* Usually , S_LOCK ( ) is implemented in terms of even lower - level macros
* TAS ( ) and TAS_SPIN ( ) :
2001-09-29 06:02:27 +02:00
*
2000-12-29 22:31:21 +01:00
* int TAS ( slock_t * lock )
* Atomic test - and - set instruction . Attempt to acquire the lock ,
2001-01-19 03:58:59 +01:00
* but do * not * wait . Returns 0 if successful , nonzero if unable
2000-12-29 22:31:21 +01:00
* to acquire the lock .
1998-04-29 14:41:29 +02:00
*
2011-08-29 16:05:48 +02:00
* int TAS_SPIN ( slock_t * lock )
* Like TAS ( ) , but this version is used when waiting for a lock
2011-08-29 19:18:44 +02:00
* previously found to be contended . By default , this is the
2011-08-29 16:05:48 +02:00
* same as TAS ( ) , but on some architectures it ' s better to poll a
* contended lock using an unlocked instruction and retry the
* atomic test - and - set only when it appears free .
1997-09-18 16:21:02 +02:00
*
2011-08-29 16:05:48 +02:00
* TAS ( ) and TAS_SPIN ( ) are NOT part of the API , and should never be called
* directly .
1998-04-29 14:41:29 +02:00
*
2011-08-29 16:05:48 +02:00
* CAUTION : on some platforms TAS ( ) and / or TAS_SPIN ( ) may sometimes report
* failure to acquire a lock even when the lock is not locked . For example ,
* on Alpha TAS ( ) will " fail " if interrupted . Therefore a retry loop must
* always be used , even if you are certain the lock is free .
*
2011-08-29 19:18:44 +02:00
* Another caution for users of these macros is that it is the caller ' s
* responsibility to ensure that the compiler doesn ' t re - order accesses
* to shared memory to precede the actual lock acquisition , or follow the
* lock release . Typically we handle this by using volatile - qualified
* pointers to refer to both the spinlock itself and the shared data
* structure being accessed within the spinlocked critical section .
* That fixes it because compilers are not allowed to re - order accesses
* to volatile objects relative to other such accesses .
*
* On platforms with weak memory ordering , the TAS ( ) , TAS_SPIN ( ) , and
* S_UNLOCK ( ) macros must further include hardware - level memory fence
* instructions to prevent similar re - ordering at the hardware level .
* TAS ( ) and TAS_SPIN ( ) must guarantee that loads and stores issued after
* the macro are not executed until the lock has been obtained . Conversely ,
* S_UNLOCK ( ) must guarantee that loads and stores issued before the macro
* have been executed before the lock is released .
2001-12-11 03:58:49 +01:00
*
2000-12-29 22:31:21 +01:00
* On most supported platforms , TAS ( ) uses a tas ( ) function written
* in assembly language to execute a hardware atomic - test - and - set
* instruction . Equivalent OS - supplied mutex routines could be used too .
1998-06-16 09:18:16 +02:00
*
2003-12-23 19:13:17 +01:00
* If no system - specific TAS ( ) is available ( ie , HAVE_SPINLOCKS is not
2001-09-29 06:02:27 +02:00
* defined ) , then we fall back on an emulation that uses SysV semaphores
* ( see spin . c ) . This emulation will be MUCH MUCH slower than a proper TAS ( )
2000-12-29 22:31:21 +01:00
* implementation , because of the cost of a kernel call per lock or unlock .
* An old report is that Postgres spends around 40 % of its time in semop ( 2 )
* when using the SysV semaphore code .
1997-09-18 16:21:02 +02:00
*
2001-09-29 06:02:27 +02:00
*
2014-01-07 22:05:30 +01:00
* Portions Copyright ( c ) 1996 - 2014 , PostgreSQL Global Development Group
2001-09-29 06:02:27 +02:00
* Portions Copyright ( c ) 1994 , Regents of the University of California
*
2010-09-20 22:08:53 +02:00
* src / include / storage / s_lock . h
2001-09-29 06:02:27 +02:00
*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1997-09-18 16:21:02 +02:00
*/
2000-11-29 00:27:57 +01:00
# ifndef S_LOCK_H
1997-09-18 16:21:02 +02:00
# define S_LOCK_H
2003-12-23 04:52:10 +01:00
# ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
1998-12-15 13:47:01 +01:00
2007-08-05 17:11:40 +02:00
# if defined(__GNUC__) || defined(__INTEL_COMPILER)
1998-06-16 09:18:16 +02:00
/*************************************************************************
* All the gcc inlines
2003-12-23 04:31:30 +01:00
* Gcc consistently defines the CPU as __cpu__ .
* Other compilers use __cpu or __cpu__ so we test for both in those cases .
1998-04-29 14:41:29 +02:00
*/
2004-06-20 01:02:32 +02:00
/*----------
* Standard gcc asm format ( assuming " volatile slock_t *lock " ) :
2001-01-20 01:03:55 +01:00
__asm__ __volatile__ (
2004-06-20 01:02:32 +02:00
" instruction \n "
" instruction \n "
" instruction \n "
: " =r " ( _res ) , " +m " ( * lock ) // return register, in/out lock value
: " r " ( lock ) // lock pointer, in input register
: " memory " , " cc " ) ; // show clobbered registers here
* The output - operands list ( after first colon ) should always include
* " +m " ( * lock ) , whether or not the asm code actually refers to this
* operand directly . This ensures that gcc believes the value in the
* lock variable is used and set by the asm code . Also , the clobbers
* list ( after third colon ) should always include " memory " ; this prevents
* gcc from thinking it can cache the values of shared - memory fields
* across the asm code . Add " cc " if your asm code changes the condition
* code register , and also list any temp registers the code uses .
* - - - - - - - - - -
2001-01-19 03:58:59 +01:00
*/
1998-04-29 14:41:29 +02:00
2005-12-17 21:15:43 +01:00
# ifdef __i386__ /* 32-bit i386 */
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned char slock_t ;
1998-06-16 09:18:16 +02:00
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
1998-09-01 06:40:42 +02:00
register slock_t _res = 1 ;
1998-06-16 09:18:16 +02:00
2005-10-11 22:01:30 +02:00
/*
* Use a non - locking test before asserting the bus lock . Note that the
* extra test appears to be a small loss on some x86 platforms and a small
* win on others ; it ' s by no means clear that we should keep it .
2013-08-29 12:48:59 +02:00
*
* When this was last tested , we didn ' t have separate TAS ( ) and TAS_SPIN ( )
* macros . Nowadays it probably would be better to do a non - locking test
* in TAS_SPIN ( ) but not in TAS ( ) , like on x86_64 , but no - one ' s done the
* testing to verify that . Without some empirical evidence , better to
* leave it alone .
2005-10-11 22:01:30 +02:00
*/
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2003-12-27 21:58:58 +01:00
" cmpb $0,%1 \n "
" jne 1f \n "
2001-01-20 01:03:55 +01:00
" lock \n "
" xchgb %0,%1 \n "
2003-12-27 21:58:58 +01:00
" 1: \n "
2004-06-20 01:02:32 +02:00
: " +q " ( _res ) , " +m " ( * lock )
:
: " memory " , " cc " ) ;
1998-06-16 09:18:16 +02:00
return ( int ) _res ;
}
1998-09-01 06:40:42 +02:00
2003-12-27 21:58:58 +01:00
# define SPIN_DELAY() spin_delay()
static __inline__ void
spin_delay ( void )
{
2004-10-07 01:41:59 +02:00
/*
* This sequence is equivalent to the PAUSE instruction ( " rep " is
* ignored by old IA32 processors if the following instruction is
* not a string operation ) ; the IA - 32 Architecture Software
* Developer ' s Manual , Vol . 3 , Section 7.7 .2 describes why using
* PAUSE in the inner loop of a spin lock is necessary for good
* performance :
*
* The PAUSE instruction improves the performance of IA - 32
* processors supporting Hyper - Threading Technology when
* executing spin - wait loops and other routines where one
* thread is accessing a shared lock or semaphore in a tight
* polling loop . When executing a spin - wait loop , the
* processor can suffer a severe performance penalty when
* exiting the loop because it detects a possible memory order
* violation and flushes the core processor ' s pipeline . The
* PAUSE instruction provides a hint to the processor that the
* code sequence is a spin - wait loop . The processor uses this
* hint to avoid the memory order violation and prevent the
* pipeline flush . In addition , the PAUSE instruction
* de - pipelines the spin - wait loop to prevent it from
* consuming execution resources excessively .
*/
2003-12-27 21:58:58 +01:00
__asm__ __volatile__ (
2004-06-20 01:02:32 +02:00
" rep; nop \n " ) ;
2003-12-27 21:58:58 +01:00
}
2005-10-11 22:01:30 +02:00
# endif /* __i386__ */
# ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
# define HAS_TEST_AND_SET
typedef unsigned char slock_t ;
# define TAS(lock) tas(lock)
2013-08-29 12:48:59 +02:00
/*
* On Intel EM64T , it ' s a win to use a non - locking test before the xchg proper ,
* but only when spinning .
*
* See also Implementing Scalable Atomic Locks for Multi - Core Intel ( tm ) EM64T
* and IA32 , by Michael Chynoweth and Mary R . Lee . As of this writing , it is
* available at :
* http : //software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
*/
# define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
2005-10-11 22:01:30 +02:00
static __inline__ int
tas ( volatile slock_t * lock )
{
register slock_t _res = 1 ;
__asm__ __volatile__ (
" lock \n "
" xchgb %0,%1 \n "
: " +q " ( _res ) , " +m " ( * lock )
:
: " memory " , " cc " ) ;
return ( int ) _res ;
}
# define SPIN_DELAY() spin_delay()
static __inline__ void
spin_delay ( void )
{
/*
* Adding a PAUSE in the spin delay loop is demonstrably a no - op on
* Opteron , but it may be of some use on EM64T , so we keep it .
*/
__asm__ __volatile__ (
" rep; nop \n " ) ;
}
# endif /* __x86_64__ */
1998-06-16 09:18:16 +02:00
2012-03-16 09:14:45 +01:00
# if defined(__ia64__) || defined(__ia64)
/*
* Intel Itanium , gcc or Intel ' s compiler .
*
* Itanium has weak memory ordering , but we rely on the compiler to enforce
* strict ordering of accesses to volatile data . In particular , while the
* xchg instruction implicitly acts as a memory barrier with ' acquire '
* semantics , we do not have an explicit memory fence instruction in the
* S_UNLOCK macro . We use a regular assignment to clear the spinlock , and
* trust that the compiler marks the generated store instruction with the
* " .rel " opcode .
*
* Testing shows that assumption to hold on gcc , although I could not find
* any explicit statement on that in the gcc manual . In Intel ' s compiler ,
* the - m [ no - ] serialize - volatile option controls that , and testing shows that
* it is enabled by default .
*/
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned int slock_t ;
2000-07-05 18:09:31 +02:00
# define TAS(lock) tas(lock)
2011-08-29 19:18:44 +02:00
/* On IA64, it's a win to use a non-locking test before the xchg proper */
# define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
2005-03-10 22:41:01 +01:00
# ifndef __INTEL_COMPILER
2000-07-05 18:09:31 +02:00
static __inline__ int
2001-01-19 03:58:59 +01:00
tas ( volatile slock_t * lock )
2000-07-05 18:09:31 +02:00
{
2001-01-19 03:58:59 +01:00
long int ret ;
2000-07-05 18:09:31 +02:00
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2001-01-20 01:03:55 +01:00
" xchg4 %0=%1,%2 \n "
2004-06-20 01:02:32 +02:00
: " =r " ( ret ) , " +m " ( * lock )
: " r " ( 1 )
2001-01-20 01:03:55 +01:00
: " memory " ) ;
2001-01-19 03:58:59 +01:00
return ( int ) ret ;
2000-07-05 18:09:31 +02:00
}
2001-01-19 03:58:59 +01:00
2005-08-26 16:47:35 +02:00
# else /* __INTEL_COMPILER */
2005-03-10 22:41:01 +01:00
static __inline__ int
tas ( volatile slock_t * lock )
{
int ret ;
ret = _InterlockedExchange ( lock , 1 ) ; /* this is a xchg asm macro */
return ret ;
}
2005-08-26 16:47:35 +02:00
# endif /* __INTEL_COMPILER */
2003-08-01 21:12:52 +02:00
# endif /* __ia64__ || __ia64 */
2000-07-05 18:09:31 +02:00
1998-06-16 09:18:16 +02:00
2012-01-07 21:38:52 +01:00
/*
* On ARM , we use __sync_lock_test_and_set ( int * , int ) if available , and if
* not fall back on the SWPB instruction . SWPB does not work on ARMv6 or
* later , so the compiler builtin is preferred if available . Note also that
* the int - width variant of the builtin works on more chips than other widths .
*/
2003-10-10 05:58:57 +02:00
# if defined(__arm__) || defined(__arm)
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
1999-04-13 19:42:26 +02:00
# define TAS(lock) tas(lock)
2012-01-07 21:38:52 +01:00
# ifdef HAVE_GCC_INT_ATOMICS
typedef int slock_t ;
static __inline__ int
tas ( volatile slock_t * lock )
{
return __sync_lock_test_and_set ( lock , 1 ) ;
}
# define S_UNLOCK(lock) __sync_lock_release(lock)
# else /* !HAVE_GCC_INT_ATOMICS */
typedef unsigned char slock_t ;
1999-04-13 19:42:26 +02:00
static __inline__ int
tas ( volatile slock_t * lock )
{
2001-01-19 03:58:59 +01:00
register slock_t _res = 1 ;
1999-04-13 19:42:26 +02:00
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2004-06-20 01:02:32 +02:00
" swpb %0, %0, [%2] \n "
: " +r " ( _res ) , " +m " ( * lock )
: " r " ( lock )
: " memory " ) ;
2001-01-19 03:58:59 +01:00
return ( int ) _res ;
1999-04-13 19:42:26 +02:00
}
2012-01-07 21:38:52 +01:00
# endif /* HAVE_GCC_INT_ATOMICS */
2001-01-19 03:58:59 +01:00
# endif /* __arm__ */
1999-04-13 19:42:26 +02:00
2002-11-22 02:13:16 +01:00
2013-06-04 21:42:02 +02:00
/*
* On ARM64 , we use __sync_lock_test_and_set ( int * , int ) if available .
*/
# if defined(__aarch64__) || defined(__aarch64)
# ifdef HAVE_GCC_INT_ATOMICS
# define HAS_TEST_AND_SET
# define TAS(lock) tas(lock)
typedef int slock_t ;
static __inline__ int
tas ( volatile slock_t * lock )
{
return __sync_lock_test_and_set ( lock , 1 ) ;
}
# define S_UNLOCK(lock) __sync_lock_release(lock)
# endif /* HAVE_GCC_INT_ATOMICS */
# endif /* __aarch64__ */
2004-06-20 01:02:32 +02:00
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
2005-12-17 21:15:43 +01:00
# if defined(__s390__) || defined(__s390x__)
2003-12-23 19:13:17 +01:00
# define HAS_TEST_AND_SET
typedef unsigned int slock_t ;
2002-11-22 02:13:16 +01:00
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
2004-06-20 01:02:32 +02:00
int _res = 0 ;
2002-11-22 02:13:16 +01:00
__asm__ __volatile__ (
2004-06-20 01:02:32 +02:00
" cs %0,%3,0(%2) \n "
: " +d " ( _res ) , " +m " ( * lock )
: " a " ( lock ) , " d " ( 1 )
: " memory " , " cc " ) ;
return _res ;
2002-11-22 02:13:16 +01:00
}
2004-06-20 01:02:32 +02:00
# endif /* __s390__ || __s390x__ */
2002-11-22 02:13:16 +01:00
1999-04-13 19:42:26 +02:00
2005-12-17 21:15:43 +01:00
# if defined(__sparc__) /* Sparc */
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned char slock_t ;
1998-06-16 09:18:16 +02:00
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
2004-06-20 01:02:32 +02:00
register slock_t _res ;
1998-06-16 09:18:16 +02:00
2006-05-18 01:57:03 +02:00
/*
* See comment in / pg / backend / port / tas / solaris_sparc . s for why this
2006-05-18 18:02:30 +02:00
* uses " ldstub " , and that file uses " cas " . gcc currently generates
* sparcv7 - targeted binaries , so " cas " use isn ' t possible .
2006-05-18 01:57:03 +02:00
*/
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2001-01-20 01:03:55 +01:00
" ldstub [%2], %0 \n "
2004-06-20 01:02:32 +02:00
: " =r " ( _res ) , " +m " ( * lock )
: " r " ( lock )
: " memory " ) ;
1998-06-16 09:18:16 +02:00
return ( int ) _res ;
}
1998-09-01 06:40:42 +02:00
1999-07-13 22:00:37 +02:00
# endif /* __sparc__ */
1998-06-16 09:18:16 +02:00
2003-12-23 19:13:17 +01:00
2005-12-17 21:15:43 +01:00
/* PowerPC */
2006-04-20 01:11:15 +02:00
# if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
2003-12-23 19:13:17 +01:00
# define HAS_TEST_AND_SET
typedef unsigned int slock_t ;
2003-12-23 04:31:30 +01:00
2003-04-20 23:54:34 +02:00
# define TAS(lock) tas(lock)
2012-01-02 04:44:01 +01:00
2012-01-03 22:00:06 +01:00
/* On PPC, it's a win to use a non-locking test before the lwarx */
# define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
2003-04-20 23:54:34 +02:00
/*
* NOTE : per the Enhanced PowerPC Architecture manual , v1 .0 dated 7 - May - 2002 ,
* an isync is a sufficient synchronization barrier after a lwarx / stwcx loop .
2012-01-02 06:01:33 +01:00
* On newer machines , we can use lwsync instead for better performance .
2003-04-20 23:54:34 +02:00
*/
2002-11-10 01:33:43 +01:00
static __inline__ int
tas ( volatile slock_t * lock )
{
slock_t _t ;
int _res ;
__asm__ __volatile__ (
2012-01-02 04:39:59 +01:00
# ifdef USE_PPC_LWARX_MUTEX_HINT
" lwarx %0,0,%3,1 \n "
# else
2004-06-20 01:02:32 +02:00
" lwarx %0,0,%3 \n "
2012-01-02 04:39:59 +01:00
# endif
2002-11-10 01:33:43 +01:00
" cmpwi %0,0 \n "
2003-04-04 07:32:30 +02:00
" bne 1f \n "
2002-11-10 01:33:43 +01:00
" addi %0,%0,1 \n "
2004-06-20 01:02:32 +02:00
" stwcx. %0,0,%3 \n "
2002-11-10 01:33:43 +01:00
" beq 2f \n "
2003-04-04 08:57:39 +02:00
" 1: li %1,1 \n "
2003-04-04 07:32:30 +02:00
" b 3f \n "
" 2: \n "
2012-01-02 06:01:33 +01:00
# ifdef USE_PPC_LWSYNC
" lwsync \n "
# else
2003-04-04 07:32:30 +02:00
" isync \n "
2012-01-02 06:01:33 +01:00
# endif
2003-04-04 08:57:39 +02:00
" li %1,0 \n "
2003-04-04 07:32:30 +02:00
" 3: \n "
2002-11-10 01:33:43 +01:00
2004-06-20 01:02:32 +02:00
: " =&r " ( _t ) , " =r " ( _res ) , " +m " ( * lock )
: " r " ( lock )
: " memory " , " cc " ) ;
2002-11-10 01:33:43 +01:00
return _res ;
}
2003-04-20 23:54:34 +02:00
2012-01-02 06:01:33 +01:00
/*
* PowerPC S_UNLOCK is almost standard but requires a " sync " instruction .
* On newer machines , we can use lwsync instead for better performance .
*/
# ifdef USE_PPC_LWSYNC
# define S_UNLOCK(lock) \
do \
{ \
__asm__ __volatile__ ( " lwsync \n " ) ; \
* ( ( volatile slock_t * ) ( lock ) ) = 0 ; \
} while ( 0 )
# else
2003-12-23 04:31:30 +01:00
# define S_UNLOCK(lock) \
do \
2005-12-17 21:15:43 +01:00
{ \
2003-12-23 04:31:30 +01:00
__asm__ __volatile__ ( " sync \n " ) ; \
* ( ( volatile slock_t * ) ( lock ) ) = 0 ; \
} while ( 0 )
2012-01-02 06:01:33 +01:00
# endif /* USE_PPC_LWSYNC */
2003-12-23 04:31:30 +01:00
2003-04-20 23:54:34 +02:00
# endif /* powerpc */
2002-11-10 01:33:43 +01:00
1998-06-16 09:18:16 +02:00
2005-12-17 21:15:43 +01:00
/* Linux Motorola 68k */
2005-08-26 16:47:35 +02:00
# if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned char slock_t ;
1999-06-11 00:59:22 +02:00
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
register int rv ;
2001-01-19 03:58:59 +01:00
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2001-02-10 05:07:25 +01:00
" clrl %0 \n "
" tas %1 \n "
" sne %0 \n "
2004-06-20 01:02:32 +02:00
: " =d " ( rv ) , " +m " ( * lock )
:
: " memory " , " cc " ) ;
1999-06-11 00:59:22 +02:00
return rv ;
}
2005-08-26 16:47:35 +02:00
# endif /* (__mc68000__ || __m68k__) && __linux__ */
1999-06-11 00:59:22 +02:00
1998-06-16 09:18:16 +02:00
/*
* VAXen - - even multiprocessor ones
* ( thanks to Tom Ivar Helbekkmo )
*/
2005-12-17 21:15:43 +01:00
# if defined(__vax__)
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned char slock_t ;
1998-06-16 09:18:16 +02:00
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
2001-04-14 01:32:57 +02:00
register int _res ;
1998-06-16 09:18:16 +02:00
2001-01-19 04:58:35 +01:00
__asm__ __volatile__ (
2004-06-20 01:02:32 +02:00
" movl $1, %0 \n "
" bbssi $0, (%2), 1f \n "
" clrl %0 \n "
" 1: \n "
: " =&r " ( _res ) , " +m " ( * lock )
2001-01-20 01:03:55 +01:00
: " r " ( lock )
2004-06-20 01:02:32 +02:00
: " memory " ) ;
2001-04-14 01:32:57 +02:00
return _res ;
1998-06-16 09:18:16 +02:00
}
1998-09-01 06:40:42 +02:00
2003-12-23 04:31:30 +01:00
# endif /* __vax__ */
1998-06-16 09:18:16 +02:00
2003-12-23 04:31:30 +01:00
2005-12-17 21:15:43 +01:00
# if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
2005-08-25 19:17:10 +02:00
/* Note: on SGI we use the OS' mutex ABI, see below */
2005-08-27 18:22:48 +02:00
/* Note: R10000 processors require a separate SYNC */
2005-08-25 19:17:10 +02:00
# define HAS_TEST_AND_SET
2003-12-23 04:31:30 +01:00
2005-08-25 19:17:10 +02:00
typedef unsigned int slock_t ;
2003-12-23 19:13:17 +01:00
2005-08-25 19:17:10 +02:00
# define TAS(lock) tas(lock)
2003-12-23 04:31:30 +01:00
2005-08-25 19:17:10 +02:00
static __inline__ int
tas ( volatile slock_t * lock )
{
2005-08-27 00:04:42 +02:00
register volatile slock_t * _l = lock ;
register int _res ;
register int _tmp ;
2003-12-23 04:31:30 +01:00
2005-08-25 19:17:10 +02:00
__asm__ __volatile__ (
" .set push \n "
" .set mips2 \n "
" .set noreorder \n "
" .set nomacro \n "
2005-08-27 00:04:42 +02:00
" ll %0, %2 \n "
2005-08-27 18:22:48 +02:00
" or %1, %0, 1 \n "
2005-08-27 00:04:42 +02:00
" sc %1, %2 \n "
2005-08-27 18:22:48 +02:00
" xori %1, 1 \n "
2005-08-27 00:04:42 +02:00
" or %0, %0, %1 \n "
2005-08-27 18:22:48 +02:00
" sync \n "
" .set pop "
2005-08-27 00:04:42 +02:00
: " =&r " ( _res ) , " =&r " ( _tmp ) , " +R " ( * _l )
2005-08-25 19:17:10 +02:00
:
2005-08-27 00:04:42 +02:00
: " memory " ) ;
return _res ;
2005-08-25 19:17:10 +02:00
}
2003-12-23 04:31:30 +01:00
2005-08-28 20:26:01 +02:00
/* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
# define S_UNLOCK(lock) \
do \
2005-08-29 02:41:34 +02:00
{ \
__asm__ __volatile__ ( \
" .set push \n " \
" .set mips2 \n " \
" .set noreorder \n " \
" .set nomacro \n " \
" sync \n " \
" .set pop " ) ; \
2005-08-28 20:26:01 +02:00
* ( ( volatile slock_t * ) ( lock ) ) = 0 ; \
} while ( 0 )
2005-08-25 19:17:10 +02:00
# endif /* __mips__ && !__sgi */
2007-05-04 17:20:52 +02:00
# if defined(__m32r__) && defined(HAVE_SYS_TAS_H) /* Renesas' M32R */
# define HAS_TEST_AND_SET
# include <sys/tas.h>
typedef int slock_t ;
# define TAS(lock) tas(lock)
# endif /* __m32r__ */
2009-07-27 07:31:05 +02:00
# if defined(__sh__) /* Renesas' SuperH */
# define HAS_TEST_AND_SET
typedef unsigned char slock_t ;
# define TAS(lock) tas(lock)
static __inline__ int
tas ( volatile slock_t * lock )
{
register int _res ;
/*
* This asm is coded as if % 0 could be any register , but actually SuperH
* restricts the target of xor - immediate to be R0 . That ' s handled by
* the " z " constraint on _res .
*/
__asm__ __volatile__ (
" tas.b @%2 \n "
" movt %0 \n "
" xor #1,%0 \n "
: " =z " ( _res ) , " +m " ( * lock )
: " r " ( lock )
: " memory " , " t " ) ;
return _res ;
}
# endif /* __sh__ */
2005-08-25 19:17:10 +02:00
/* These live in s_lock.c, but only for gcc */
2005-12-17 21:15:43 +01:00
# if defined(__m68k__) && !defined(__linux__) /* non-Linux Motorola 68k */
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2005-08-25 19:17:10 +02:00
typedef unsigned char slock_t ;
2003-12-23 04:31:30 +01:00
# endif
2003-12-23 19:13:17 +01:00
2008-10-29 17:06:47 +01:00
# endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
2003-12-23 04:31:30 +01:00
2005-12-17 21:39:16 +01:00
/*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2003-12-23 19:13:17 +01:00
* Platforms that use non - gcc inline assembly :
2005-12-17 21:39:16 +01:00
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2003-12-23 04:31:30 +01:00
*/
2003-12-23 04:52:10 +01:00
# if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
2003-12-23 04:31:30 +01:00
2003-12-23 19:13:17 +01:00
2005-12-17 21:39:16 +01:00
# if defined(USE_UNIVEL_CC) /* Unixware compiler */
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
typedef unsigned char slock_t ;
2003-12-23 04:31:30 +01:00
# define TAS(lock) tas(lock)
asm int
tas ( volatile slock_t * s_lock )
{
/* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
% mem s_lock
pushl % ebx
movl s_lock , % ebx
movl $ 255 , % eax
lock
xchgb % al , ( % ebx )
popl % ebx
}
# endif /* defined(USE_UNIVEL_CC) */
2000-12-30 03:34:56 +01:00
2005-12-17 21:39:16 +01:00
# if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
1998-07-18 16:51:10 +02:00
/*
2003-08-01 21:12:52 +02:00
* HP ' s PA - RISC
1998-07-18 16:51:10 +02:00
*
2003-12-23 23:15:07 +01:00
* See src / backend / port / hpux / tas . c . template for details about LDCWX . Because
* LDCWX requires a 16 - byte - aligned address , we declare slock_t as a 16 - byte
* struct . The active word in the struct is whichever has the aligned address ;
* the other three words just sit at - 1.
*
* When using gcc , we can inline the required assembly code .
1998-07-18 16:51:10 +02:00
*/
2003-12-23 19:13:17 +01:00
# define HAS_TEST_AND_SET
typedef struct
{
int sema [ 4 ] ;
} slock_t ;
1998-02-13 06:10:06 +01:00
2009-12-31 20:41:37 +01:00
# define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
2003-12-23 23:15:07 +01:00
# if defined(__GNUC__)
static __inline__ int
tas ( volatile slock_t * lock )
{
volatile int * lockword = TAS_ACTIVE_WORD ( lock ) ;
register int lockval ;
__asm__ __volatile__ (
" ldcwx 0(0,%2),%0 \n "
2004-06-20 01:02:32 +02:00
: " =r " ( lockval ) , " +m " ( * lockword )
: " r " ( lockword )
: " memory " ) ;
2003-12-23 23:15:07 +01:00
return ( lockval = = 0 ) ;
}
# endif /* __GNUC__ */
# define S_UNLOCK(lock) (*TAS_ACTIVE_WORD(lock) = -1)
# define S_INIT_LOCK(lock) \
2001-01-19 08:03:53 +01:00
do { \
2003-12-23 23:15:07 +01:00
volatile slock_t * lock_ = ( lock ) ; \
2001-02-17 00:50:40 +01:00
lock_ - > sema [ 0 ] = - 1 ; \
lock_ - > sema [ 1 ] = - 1 ; \
lock_ - > sema [ 2 ] = - 1 ; \
lock_ - > sema [ 3 ] = - 1 ; \
2001-01-19 08:03:53 +01:00
} while ( 0 )
1998-10-01 03:53:50 +02:00
2003-12-23 23:15:07 +01:00
# define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
1998-10-01 03:53:50 +02:00
2004-01-03 06:47:44 +01:00
# endif /* __hppa || __hppa__ */
1998-02-13 06:10:06 +01:00
2003-12-23 04:31:30 +01:00
2004-09-02 19:10:58 +02:00
# if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
2012-03-16 09:14:45 +01:00
/*
* HP - UX on Itanium , non - gcc compiler
*
* We assume that the compiler enforces strict ordering of loads / stores on
* volatile data ( see comments on the gcc - version earlier in this file ) .
* Note that this assumption does * not * hold if you use the
* + Ovolatile = __unordered option on the HP - UX compiler , so don ' t do that .
*
* See also Implementing Spinlocks on the Intel Itanium Architecture and
* PA - RISC , by Tor Ekqvist and David Graves , for more information . As of
* this writing , version 1.0 of the manual is available at :
* http : //h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
*/
2004-09-02 19:10:58 +02:00
# define HAS_TEST_AND_SET
typedef unsigned int slock_t ;
# include <ia64/sys/inline.h>
# define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
2011-08-29 19:18:44 +02:00
/* On IA64, it's a win to use a non-locking test before the xchg proper */
# define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
2004-09-02 19:10:58 +02:00
# endif /* HPUX on IA64, non gcc */
2005-12-17 21:39:16 +01:00
# if defined(_AIX) /* AIX */
1998-07-18 16:58:58 +02:00
/*
* AIX ( POWER )
*/
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2003-12-23 19:13:17 +01:00
2007-07-16 04:03:14 +02:00
# include <sys/atomic_op.h>
2007-07-16 16:02:22 +02:00
typedef int slock_t ;
2003-12-23 19:13:17 +01:00
2007-07-16 06:57:57 +02:00
# define TAS(lock) _check_lock((slock_t *) (lock), 0, 1)
# define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0)
1998-09-01 06:40:42 +02:00
# endif /* _AIX */
1998-07-18 16:58:58 +02:00
2003-12-23 04:31:30 +01:00
/* These are in s_lock.c */
1998-07-18 16:58:58 +02:00
2008-10-29 17:06:47 +01:00
# if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
2003-12-23 04:31:30 +01:00
# define HAS_TEST_AND_SET
2006-05-18 23:18:40 +02:00
2006-05-19 15:10:11 +02:00
# if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
2006-04-29 13:55:19 +02:00
typedef unsigned int slock_t ;
2006-05-18 23:18:40 +02:00
# else
typedef unsigned char slock_t ;
# endif
2004-09-24 02:21:32 +02:00
2006-04-28 05:43:19 +02:00
extern slock_t pg_atomic_cas ( volatile slock_t * lock , slock_t with ,
2006-04-28 00:28:42 +02:00
slock_t cmp ) ;
2004-09-24 02:21:32 +02:00
2006-04-28 00:28:42 +02:00
# define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
2004-09-24 02:21:32 +02:00
# endif
2006-06-08 00:24:46 +02:00
# ifdef WIN32_ONLY_COMPILER
typedef LONG slock_t ;
# define HAS_TEST_AND_SET
# define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
# define SPIN_DELAY() spin_delay()
2010-01-04 18:10:24 +01:00
/* If using Visual C++ on Win64, inline assembly is unavailable.
2010-01-05 12:06:28 +01:00
* Use a _mm_pause instrinsic instead of rep nop .
2010-01-04 18:10:24 +01:00
*/
# if defined(_WIN64)
static __forceinline void
spin_delay ( void )
{
2010-01-05 12:06:28 +01:00
_mm_pause ( ) ;
2010-01-04 18:10:24 +01:00
}
# else
2006-06-08 00:24:46 +02:00
static __forceinline void
spin_delay ( void )
{
/* See comment for gcc code. Same code, MASM syntax */
__asm rep nop ;
}
2010-01-04 18:10:24 +01:00
# endif
2006-06-08 00:24:46 +02:00
# endif
2010-11-23 21:27:50 +01:00
2003-12-23 19:13:17 +01:00
# endif /* !defined(HAS_TEST_AND_SET) */
2003-09-12 18:10:27 +02:00
2003-12-23 04:31:30 +01:00
2003-12-23 19:13:17 +01:00
/* Blow up if we didn't have any way to do spinlocks */
2003-12-23 04:31:30 +01:00
# ifndef HAS_TEST_AND_SET
2003-11-04 10:53:36 +01:00
# error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@postgresql.org.
2003-12-23 19:13:17 +01:00
# endif
# else /* !HAVE_SPINLOCKS */
2003-12-23 04:31:30 +01:00
2000-12-29 22:31:21 +01:00
/*
2002-05-05 02:03:29 +02:00
* Fake spinlock implementation using semaphores - - - slow and prone
2000-12-29 22:31:21 +01:00
* to fall foul of kernel limits on number of semaphores , so don ' t use this
2001-09-29 06:02:27 +02:00
* unless you must ! The subroutines appear in spin . c .
2000-12-29 22:31:21 +01:00
*/
Reduce the number of semaphores used under --disable-spinlocks.
Instead of allocating a semaphore from the operating system for every
spinlock, allocate a fixed number of semaphores (by default, 1024)
from the operating system and multiplex all the spinlocks that get
created onto them. This could self-deadlock if a process attempted
to acquire more than one spinlock at a time, but since processes
aren't supposed to execute anything other than short stretches of
straight-line code while holding a spinlock, that shouldn't happen.
One motivation for this change is that, with the introduction of
dynamic shared memory, it may be desirable to create spinlocks that
last for less than the lifetime of the server. Without this change,
attempting to use such facilities under --disable-spinlocks would
quickly exhaust any supply of available semaphores. Quite apart
from that, it's desirable to contain the quantity of semaphores
needed to run the server simply on convenience grounds, since using
too many may make it harder to get PostgreSQL running on a new
platform, which is mostly the point of --disable-spinlocks in the
first place.
Patch by me; review by Tom Lane.
2014-01-09 00:49:14 +01:00
typedef int slock_t ;
2000-12-29 22:31:21 +01:00
extern bool s_lock_free_sema ( volatile slock_t * lock ) ;
extern void s_unlock_sema ( volatile slock_t * lock ) ;
extern void s_init_lock_sema ( volatile slock_t * lock ) ;
2001-01-19 03:58:59 +01:00
extern int tas_sema ( volatile slock_t * lock ) ;
2000-12-29 22:31:21 +01:00
2001-01-19 03:58:59 +01:00
# define S_LOCK_FREE(lock) s_lock_free_sema(lock)
# define S_UNLOCK(lock) s_unlock_sema(lock)
# define S_INIT_LOCK(lock) s_init_lock_sema(lock)
# define TAS(lock) tas_sema(lock)
2000-12-29 22:31:21 +01:00
2003-12-23 19:13:17 +01:00
# endif /* HAVE_SPINLOCKS */
2000-12-29 22:31:21 +01:00
1998-07-18 16:58:58 +02:00
2001-09-29 06:02:27 +02:00
/*
1998-06-16 09:18:16 +02:00
* Default Definitions - override these above as needed .
*/
1997-09-18 16:21:02 +02:00
1998-06-16 09:18:16 +02:00
# if !defined(S_LOCK)
1998-06-15 20:40:05 +02:00
# define S_LOCK(lock) \
2012-06-26 22:02:55 +02:00
( TAS ( lock ) ? s_lock ( ( lock ) , __FILE__ , __LINE__ ) : 0 )
1998-09-01 06:40:42 +02:00
# endif /* S_LOCK */
1997-09-18 16:21:02 +02:00
1998-05-04 18:58:59 +02:00
# if !defined(S_LOCK_FREE)
1998-06-16 09:18:16 +02:00
# define S_LOCK_FREE(lock) (*(lock) == 0)
1998-09-01 06:40:42 +02:00
# endif /* S_LOCK_FREE */
1997-12-30 05:01:28 +01:00
1998-05-04 18:58:59 +02:00
# if !defined(S_UNLOCK)
2001-12-11 03:58:49 +01:00
# define S_UNLOCK(lock) (*((volatile slock_t *) (lock)) = 0)
1998-09-01 06:40:42 +02:00
# endif /* S_UNLOCK */
1997-09-18 16:21:02 +02:00
1998-05-04 18:58:59 +02:00
# if !defined(S_INIT_LOCK)
1998-02-26 05:46:47 +01:00
# define S_INIT_LOCK(lock) S_UNLOCK(lock)
1998-09-01 06:40:42 +02:00
# endif /* S_INIT_LOCK */
1997-09-18 16:21:02 +02:00
2003-12-27 21:58:58 +01:00
# if !defined(SPIN_DELAY)
# define SPIN_DELAY() ((void) 0)
# endif /* SPIN_DELAY */
1998-05-04 18:58:59 +02:00
# if !defined(TAS)
2000-12-29 22:31:21 +01:00
extern int tas ( volatile slock_t * lock ) ; /* in port/.../tas.s, or
1998-09-01 06:40:42 +02:00
* s_lock . c */
1997-09-25 01:37:26 +02:00
2000-12-29 22:31:21 +01:00
# define TAS(lock) tas(lock)
1998-09-01 06:40:42 +02:00
# endif /* TAS */
1998-06-16 09:18:16 +02:00
2011-08-29 16:05:48 +02:00
# if !defined(TAS_SPIN)
# define TAS_SPIN(lock) TAS(lock)
# endif /* TAS_SPIN */
2000-11-29 00:27:57 +01:00
2001-09-29 06:02:27 +02:00
/*
2001-03-25 19:52:46 +02:00
* Platform - independent out - of - line support routines
*/
2012-06-26 22:02:55 +02:00
extern int s_lock ( volatile slock_t * lock , const char * file , int line ) ;
2001-03-25 19:52:46 +02:00
2005-10-11 22:41:32 +02:00
/* Support for dynamic adjustment of spins_per_delay */
# define DEFAULT_SPINS_PER_DELAY 100
extern void set_spins_per_delay ( int shared_spins_per_delay ) ;
extern int update_spins_per_delay ( int shared_spins_per_delay ) ;
2000-11-29 00:27:57 +01:00
# endif /* S_LOCK_H */