Abstract some more architecture-specific details away from SIMD functionality

Add a typedef to represent vectors containing four 32-bit integers,
and add functions operating on them. Also separate out saturating
subtraction into its own function. The motivation for this is to
prepare for a future commit to add ARM NEON support.

Nathan Bossart

Reviewed by John Naylor and Tom Lane
Discussion: https://www.postgresql.org/message-id/flat/CAFBsxsEyR9JkfbPcDXBRYEfdfC__OkwVGdwEAgY4Rv0cvw35EA%40mail.gmail.com#aba7a64b11503494ffd8dd27067626a9
This commit is contained in:
John Naylor 2022-08-29 13:40:53 +07:00
parent c6e0fe1f2a
commit f8f19f7086
2 changed files with 122 additions and 44 deletions

View File

@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
{ {
uint32 i = 0; uint32 i = 0;
#ifdef USE_SSE2 #ifndef USE_NO_SIMD
/* /*
* A 16-byte register only has four 4-byte lanes. For better * For better instruction-level parallelism, each loop iteration operates
* instruction-level parallelism, each loop iteration operates on a block * on a block of four registers. Testing for SSE2 has showed this is ~40%
* of four registers. Testing has showed this is ~40% faster than using a * faster than using a block of two registers.
* block of two registers.
*/ */
const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */ const Vector32 keys = vector32_broadcast(key); /* load copies of key */
uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */ const uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
const uint32 nelem_per_iteration = 4 * nelem_per_vector;
/* round down to multiple of elements per iteration */
const uint32 tail_idx = nelem & ~(nelem_per_iteration - 1);
#if defined(USE_ASSERT_CHECKING) #if defined(USE_ASSERT_CHECKING)
bool assert_result = false; bool assert_result = false;
@ -116,49 +119,59 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
} }
#endif #endif
for (i = 0; i < iterations; i += 16) for (i = 0; i < tail_idx; i += nelem_per_iteration)
{ {
/* load the next block into 4 registers holding 4 values each */ Vector32 vals1,
const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]); vals2,
const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]); vals3,
const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]); vals4,
const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]); result1,
result2,
result3,
result4,
tmp1,
tmp2,
result;
/* load the next block into 4 registers */
vector32_load(&vals1, &base[i]);
vector32_load(&vals2, &base[i + nelem_per_vector]);
vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
/* compare each value to the key */ /* compare each value to the key */
const __m128i result1 = _mm_cmpeq_epi32(keys, vals1); result1 = vector32_eq(keys, vals1);
const __m128i result2 = _mm_cmpeq_epi32(keys, vals2); result2 = vector32_eq(keys, vals2);
const __m128i result3 = _mm_cmpeq_epi32(keys, vals3); result3 = vector32_eq(keys, vals3);
const __m128i result4 = _mm_cmpeq_epi32(keys, vals4); result4 = vector32_eq(keys, vals4);
/* combine the results into a single variable */ /* combine the results into a single variable */
const __m128i tmp1 = _mm_or_si128(result1, result2); tmp1 = vector32_or(result1, result2);
const __m128i tmp2 = _mm_or_si128(result3, result4); tmp2 = vector32_or(result3, result4);
const __m128i result = _mm_or_si128(tmp1, tmp2); result = vector32_or(tmp1, tmp2);
/* see if there was a match */ /* see if there was a match */
if (_mm_movemask_epi8(result) != 0) if (vector8_is_highbit_set((Vector8) result))
{ {
#if defined(USE_ASSERT_CHECKING)
Assert(assert_result == true); Assert(assert_result == true);
#endif
return true; return true;
} }
} }
#endif /* USE_SSE2 */ #endif /* ! USE_NO_SIMD */
/* Process the remaining elements one at a time. */ /* Process the remaining elements one at a time. */
for (; i < nelem; i++) for (; i < nelem; i++)
{ {
if (key == base[i]) if (key == base[i])
{ {
#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) #ifndef USE_NO_SIMD
Assert(assert_result == true); Assert(assert_result == true);
#endif #endif
return true; return true;
} }
} }
#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) #ifndef USE_NO_SIMD
Assert(assert_result == false); Assert(assert_result == false);
#endif #endif
return false; return false;

View File

@ -31,22 +31,32 @@
#include <emmintrin.h> #include <emmintrin.h>
#define USE_SSE2 #define USE_SSE2
typedef __m128i Vector8; typedef __m128i Vector8;
typedef __m128i Vector32;
#else #else
/* /*
* If no SIMD instructions are available, we can in some cases emulate vector * If no SIMD instructions are available, we can in some cases emulate vector
* operations using bitwise operations on unsigned integers. * operations using bitwise operations on unsigned integers. Note that many
* of the functions in this file presently do not have non-SIMD
* implementations. In particular, none of the functions involving Vector32
* are implemented without SIMD since it's likely not worthwhile to represent
* two 32-bit integers using a uint64.
*/ */
#define USE_NO_SIMD #define USE_NO_SIMD
typedef uint64 Vector8; typedef uint64 Vector8;
#endif #endif
/* load/store operations */ /* load/store operations */
static inline void vector8_load(Vector8 *v, const uint8 *s); static inline void vector8_load(Vector8 *v, const uint8 *s);
#ifndef USE_NO_SIMD
static inline void vector32_load(Vector32 *v, const uint32 *s);
#endif
/* assignment operations */ /* assignment operations */
static inline Vector8 vector8_broadcast(const uint8 c); static inline Vector8 vector8_broadcast(const uint8 c);
#ifndef USE_NO_SIMD
static inline Vector32 vector32_broadcast(const uint32 c);
#endif
/* element-wise comparisons to a scalar */ /* element-wise comparisons to a scalar */
static inline bool vector8_has(const Vector8 v, const uint8 c); static inline bool vector8_has(const Vector8 v, const uint8 c);
@ -56,14 +66,21 @@ static inline bool vector8_is_highbit_set(const Vector8 v);
/* arithmetic operations */ /* arithmetic operations */
static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2); static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
/* Different semantics for SIMD architectures. */
#ifndef USE_NO_SIMD #ifndef USE_NO_SIMD
static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
#endif
/* comparisons between vectors */ /*
* comparisons between vectors
*
* Note: These return a vector rather than booloan, which is why we don't
* have non-SIMD implementations.
*/
#ifndef USE_NO_SIMD
static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2); static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
#endif /* ! USE_NO_SIMD */ #endif
/* /*
* Load a chunk of memory into the given vector. * Load a chunk of memory into the given vector.
@ -78,6 +95,15 @@ vector8_load(Vector8 *v, const uint8 *s)
#endif #endif
} }
#ifndef USE_NO_SIMD
static inline void
vector32_load(Vector32 *v, const uint32 *s)
{
#ifdef USE_SSE2
*v = _mm_loadu_si128((const __m128i *) s);
#endif
}
#endif /* ! USE_NO_SIMD */
/* /*
* Create a vector with all elements set to the same value. * Create a vector with all elements set to the same value.
@ -92,6 +118,16 @@ vector8_broadcast(const uint8 c)
#endif #endif
} }
#ifndef USE_NO_SIMD
static inline Vector32
vector32_broadcast(const uint32 c)
{
#ifdef USE_SSE2
return _mm_set1_epi32(c);
#endif
}
#endif /* ! USE_NO_SIMD */
/* /*
* Return true if any elements in the vector are equal to the given scalar. * Return true if any elements in the vector are equal to the given scalar.
*/ */
@ -118,7 +154,7 @@ vector8_has(const Vector8 v, const uint8 c)
/* any bytes in v equal to c will evaluate to zero via XOR */ /* any bytes in v equal to c will evaluate to zero via XOR */
result = vector8_has_zero(v ^ vector8_broadcast(c)); result = vector8_has_zero(v ^ vector8_broadcast(c));
#elif defined(USE_SSE2) #elif defined(USE_SSE2)
result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c))); result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
#endif #endif
Assert(assert_result == result); Assert(assert_result == result);
@ -133,8 +169,8 @@ vector8_has_zero(const Vector8 v)
{ {
#if defined(USE_NO_SIMD) #if defined(USE_NO_SIMD)
/* /*
* We cannot call vector8_has() here, because that would lead to a circular * We cannot call vector8_has() here, because that would lead to a
* definition. * circular definition.
*/ */
return vector8_has_le(v, 0); return vector8_has_le(v, 0);
#elif defined(USE_SSE2) #elif defined(USE_SSE2)
@ -150,9 +186,6 @@ static inline bool
vector8_has_le(const Vector8 v, const uint8 c) vector8_has_le(const Vector8 v, const uint8 c)
{ {
bool result = false; bool result = false;
#if defined(USE_SSE2)
__m128i sub;
#endif
/* pre-compute the result for assert checking */ /* pre-compute the result for assert checking */
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
@ -194,10 +227,10 @@ vector8_has_le(const Vector8 v, const uint8 c)
/* /*
* Use saturating subtraction to find bytes <= c, which will present as * Use saturating subtraction to find bytes <= c, which will present as
* NUL bytes in 'sub'. * NUL bytes. This approach is a workaround for the lack of unsigned
* comparison instructions on some architectures.
*/ */
sub = _mm_subs_epu8(v, vector8_broadcast(c)); result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
result = vector8_has_zero(sub);
#endif #endif
Assert(assert_result == result); Assert(assert_result == result);
@ -230,14 +263,37 @@ vector8_or(const Vector8 v1, const Vector8 v2)
#endif #endif
} }
/* Different semantics for SIMD architectures. */
#ifndef USE_NO_SIMD #ifndef USE_NO_SIMD
static inline Vector32
vector32_or(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_or_si128(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/*
* Return the result of subtracting the respective elements of the input
* vectors using saturation (i.e., if the operation would yield a value less
* than zero, zero is returned instead). For more information on saturation
* arithmetic, see https://en.wikipedia.org/wiki/Saturation_arithmetic
*/
#ifndef USE_NO_SIMD
static inline Vector8
vector8_ssub(const Vector8 v1, const Vector8 v2)
{
#ifdef USE_SSE2
return _mm_subs_epu8(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */
/* /*
* Return a vector with all bits set in each lane where the the corresponding * Return a vector with all bits set in each lane where the the corresponding
* lanes in the inputs are equal. * lanes in the inputs are equal.
*/ */
#ifndef USE_NO_SIMD
static inline Vector8 static inline Vector8
vector8_eq(const Vector8 v1, const Vector8 v2) vector8_eq(const Vector8 v1, const Vector8 v2)
{ {
@ -245,7 +301,16 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
return _mm_cmpeq_epi8(v1, v2); return _mm_cmpeq_epi8(v1, v2);
#endif #endif
} }
#endif /* ! USE_NO_SIMD */
#ifndef USE_NO_SIMD
static inline Vector32
vector32_eq(const Vector32 v1, const Vector32 v2)
{
#ifdef USE_SSE2
return _mm_cmpeq_epi32(v1, v2);
#endif
}
#endif /* ! USE_NO_SIMD */ #endif /* ! USE_NO_SIMD */
#endif /* SIMD_H */ #endif /* SIMD_H */