mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-09-30 14:01:27 +02:00
Fix assorted defects in 09adc9a8c0
.
That commit increased all shared memory allocations to the next higher multiple of PG_CACHE_LINE_SIZE, but it didn't ensure that allocation started on a cache line boundary. It also failed to remove a couple other pieces of now-useless code. BUFFERALIGN() is perhaps obsolete at this point, and likely should be removed at some point, too, but that seems like it can be left to a future cleanup. Mistakes all pointed out by Andres Freund. The patch is mine, with a few extra assertions which I adopted from his version of this fix.
This commit is contained in:
parent
7cb1db1d95
commit
9f84280ae9
@ -76,11 +76,9 @@ InitBufferPool(void)
|
||||
|
||||
/* Align descriptors to a cacheline boundary. */
|
||||
BufferDescriptors = (BufferDescPadded *)
|
||||
CACHELINEALIGN(
|
||||
ShmemInitStruct("Buffer Descriptors",
|
||||
NBuffers * sizeof(BufferDescPadded)
|
||||
+ PG_CACHE_LINE_SIZE,
|
||||
&foundDescs));
|
||||
NBuffers * sizeof(BufferDescPadded),
|
||||
&foundDescs);
|
||||
|
||||
BufferBlocks = (char *)
|
||||
ShmemInitStruct("Buffer Blocks",
|
||||
@ -88,10 +86,9 @@ InitBufferPool(void)
|
||||
|
||||
/* Align lwlocks to cacheline boundary */
|
||||
BufferIOLWLockArray = (LWLockMinimallyPadded *)
|
||||
CACHELINEALIGN(ShmemInitStruct("Buffer IO Locks",
|
||||
NBuffers * (Size) sizeof(LWLockMinimallyPadded)
|
||||
+ PG_CACHE_LINE_SIZE,
|
||||
&foundIOLocks));
|
||||
ShmemInitStruct("Buffer IO Locks",
|
||||
NBuffers * (Size) sizeof(LWLockMinimallyPadded),
|
||||
&foundIOLocks);
|
||||
|
||||
BufferIOLWLockTranche.name = "buffer_io";
|
||||
BufferIOLWLockTranche.array_base = BufferIOLWLockArray;
|
||||
|
@ -112,6 +112,7 @@ void
|
||||
InitShmemAllocation(void)
|
||||
{
|
||||
PGShmemHeader *shmhdr = ShmemSegHdr;
|
||||
char *aligned;
|
||||
|
||||
Assert(shmhdr != NULL);
|
||||
|
||||
@ -139,6 +140,11 @@ InitShmemAllocation(void)
|
||||
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
|
||||
Assert(shmhdr->freeoffset <= shmhdr->totalsize);
|
||||
|
||||
/* Make sure the first allocation begins on a cache line boundary. */
|
||||
aligned = (char *)
|
||||
(CACHELINEALIGN((((char *) shmhdr) + shmhdr->freeoffset)));
|
||||
shmhdr->freeoffset = aligned - (char *) shmhdr;
|
||||
|
||||
SpinLockInit(ShmemLock);
|
||||
|
||||
/* ShmemIndex can't be set up yet (need LWLocks first) */
|
||||
@ -189,10 +195,6 @@ ShmemAlloc(Size size)
|
||||
|
||||
newStart = ShmemSegHdr->freeoffset;
|
||||
|
||||
/* extra alignment for large requests, since they are probably buffers */
|
||||
if (size >= BLCKSZ)
|
||||
newStart = BUFFERALIGN(newStart);
|
||||
|
||||
newFree = newStart + size;
|
||||
if (newFree <= ShmemSegHdr->totalsize)
|
||||
{
|
||||
@ -209,6 +211,8 @@ ShmemAlloc(Size size)
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
errmsg("out of shared memory")));
|
||||
|
||||
Assert(newSpace == (void *) CACHELINEALIGN(newSpace));
|
||||
|
||||
return newSpace;
|
||||
}
|
||||
|
||||
@ -425,6 +429,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
|
||||
LWLockRelease(ShmemIndexLock);
|
||||
|
||||
Assert(ShmemAddrIsValid(structPtr));
|
||||
|
||||
Assert(structPtr == (void *) CACHELINEALIGN(structPtr));
|
||||
|
||||
return structPtr;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user