From 6a6082c27c148eb452d804af306e8745f6e49b36 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Fri, 7 Oct 2011 23:32:30 -0400 Subject: [PATCH] Try to fix memory barriers on x86_64. %esp is no good; must use %rsp there. --- src/include/storage/barrier.h | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h index 0286817a38..b5cc60735e 100644 --- a/src/include/storage/barrier.h +++ b/src/include/storage/barrier.h @@ -62,14 +62,24 @@ extern slock_t dummy_spinlock; /* This works on any architecture, since it's only talking to GCC itself. */ #define pg_compiler_barrier() __asm__ __volatile__("" : : : "memory") -#if defined(__i386__) || defined(__x86_64__) /* 32 or 64 bit x86 */ +#if defined(__i386__) /* - * x86 and x86_64 do not allow loads to be reorded with other loads, or - * stores to be reordered with other stores, but a load can be performed - * before a subsequent store. + * i386 does not allow loads to be reorded with other loads, or stores to be + * reordered with other stores, but a load can be performed before a subsequent + * store. * * "lock; addl" has worked for longer than "mfence". + */ +#define pg_memory_barrier() \ + __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory") +#define pg_read_barrier() pg_compiler_barrier() +#define pg_write_barrier() pg_compiler_barrier() + +#elif defined(__x86_64__) /* 64 bit x86 */ + +/* + * x86_64 has similar ordering characteristics to i386. * * Technically, some x86-ish chips support uncached memory access and/or * special instructions that are weakly ordered. In those cases we'd need @@ -77,7 +87,7 @@ extern slock_t dummy_spinlock; * do those things, a compiler barrier should be enough. */ #define pg_memory_barrier() \ - __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory") + __asm__ __volatile__ ("lock; addl $0,0(%%rsp)" : : : "memory") #define pg_read_barrier() pg_compiler_barrier() #define pg_write_barrier() pg_compiler_barrier()