Abstract some more architecture-specific details away from SIMD functionality
Add a typedef to represent vectors containing four 32-bit integers, and add functions operating on them. Also separate out saturating subtraction into its own function. The motivation for this is to prepare for a future commit to add ARM NEON support. Nathan Bossart Reviewed by John Naylor and Tom Lane Discussion: https://www.postgresql.org/message-id/flat/CAFBsxsEyR9JkfbPcDXBRYEfdfC__OkwVGdwEAgY4Rv0cvw35EA%40mail.gmail.com#aba7a64b11503494ffd8dd27067626a9
This commit is contained in:
parent
c6e0fe1f2a
commit
f8f19f7086
|
@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
|
|||
{
|
||||
uint32 i = 0;
|
||||
|
||||
#ifdef USE_SSE2
|
||||
#ifndef USE_NO_SIMD
|
||||
|
||||
/*
|
||||
* A 16-byte register only has four 4-byte lanes. For better
|
||||
* instruction-level parallelism, each loop iteration operates on a block
|
||||
* of four registers. Testing has showed this is ~40% faster than using a
|
||||
* block of two registers.
|
||||
* For better instruction-level parallelism, each loop iteration operates
|
||||
* on a block of four registers. Testing for SSE2 has showed this is ~40%
|
||||
* faster than using a block of two registers.
|
||||
*/
|
||||
const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
|
||||
uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */
|
||||
const Vector32 keys = vector32_broadcast(key); /* load copies of key */
|
||||
const uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
|
||||
const uint32 nelem_per_iteration = 4 * nelem_per_vector;
|
||||
|
||||
/* round down to multiple of elements per iteration */
|
||||
const uint32 tail_idx = nelem & ~(nelem_per_iteration - 1);
|
||||
|
||||
#if defined(USE_ASSERT_CHECKING)
|
||||
bool assert_result = false;
|
||||
|
@ -116,49 +119,59 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
|
|||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < iterations; i += 16)
|
||||
for (i = 0; i < tail_idx; i += nelem_per_iteration)
|
||||
{
|
||||
/* load the next block into 4 registers holding 4 values each */
|
||||
const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
|
||||
const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
|
||||
const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
|
||||
const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
|
||||
Vector32 vals1,
|
||||
vals2,
|
||||
vals3,
|
||||
vals4,
|
||||
result1,
|
||||
result2,
|
||||
result3,
|
||||
result4,
|
||||
tmp1,
|
||||
tmp2,
|
||||
result;
|
||||
|
||||
/* load the next block into 4 registers */
|
||||
vector32_load(&vals1, &base[i]);
|
||||
vector32_load(&vals2, &base[i + nelem_per_vector]);
|
||||
vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
|
||||
vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
|
||||
|
||||
/* compare each value to the key */
|
||||
const __m128i result1 = _mm_cmpeq_epi32(keys, vals1);
|
||||
const __m128i result2 = _mm_cmpeq_epi32(keys, vals2);
|
||||
const __m128i result3 = _mm_cmpeq_epi32(keys, vals3);
|
||||
const __m128i result4 = _mm_cmpeq_epi32(keys, vals4);
|
||||
result1 = vector32_eq(keys, vals1);
|
||||
result2 = vector32_eq(keys, vals2);
|
||||
result3 = vector32_eq(keys, vals3);
|
||||
result4 = vector32_eq(keys, vals4);
|
||||
|
||||
/* combine the results into a single variable */
|
||||
const __m128i tmp1 = _mm_or_si128(result1, result2);
|
||||
const __m128i tmp2 = _mm_or_si128(result3, result4);
|
||||
const __m128i result = _mm_or_si128(tmp1, tmp2);
|
||||
tmp1 = vector32_or(result1, result2);
|
||||
tmp2 = vector32_or(result3, result4);
|
||||
result = vector32_or(tmp1, tmp2);
|
||||
|
||||
/* see if there was a match */
|
||||
if (_mm_movemask_epi8(result) != 0)
|
||||
if (vector8_is_highbit_set((Vector8) result))
|
||||
{
|
||||
#if defined(USE_ASSERT_CHECKING)
|
||||
Assert(assert_result == true);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif /* USE_SSE2 */
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
/* Process the remaining elements one at a time. */
|
||||
for (; i < nelem; i++)
|
||||
{
|
||||
if (key == base[i])
|
||||
{
|
||||
#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
|
||||
#ifndef USE_NO_SIMD
|
||||
Assert(assert_result == true);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
|
||||
#ifndef USE_NO_SIMD
|
||||
Assert(assert_result == false);
|
||||
#endif
|
||||
return false;
|
||||
|
|
|
@ -31,22 +31,32 @@
|
|||
#include <emmintrin.h>
|
||||
#define USE_SSE2
|
||||
typedef __m128i Vector8;
|
||||
typedef __m128i Vector32;
|
||||
|
||||
#else
|
||||
/*
|
||||
* If no SIMD instructions are available, we can in some cases emulate vector
|
||||
* operations using bitwise operations on unsigned integers.
|
||||
* operations using bitwise operations on unsigned integers. Note that many
|
||||
* of the functions in this file presently do not have non-SIMD
|
||||
* implementations. In particular, none of the functions involving Vector32
|
||||
* are implemented without SIMD since it's likely not worthwhile to represent
|
||||
* two 32-bit integers using a uint64.
|
||||
*/
|
||||
#define USE_NO_SIMD
|
||||
typedef uint64 Vector8;
|
||||
#endif
|
||||
|
||||
|
||||
/* load/store operations */
|
||||
static inline void vector8_load(Vector8 *v, const uint8 *s);
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline void vector32_load(Vector32 *v, const uint32 *s);
|
||||
#endif
|
||||
|
||||
/* assignment operations */
|
||||
static inline Vector8 vector8_broadcast(const uint8 c);
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector32 vector32_broadcast(const uint32 c);
|
||||
#endif
|
||||
|
||||
/* element-wise comparisons to a scalar */
|
||||
static inline bool vector8_has(const Vector8 v, const uint8 c);
|
||||
|
@ -56,14 +66,21 @@ static inline bool vector8_is_highbit_set(const Vector8 v);
|
|||
|
||||
/* arithmetic operations */
|
||||
static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
|
||||
|
||||
/* Different semantics for SIMD architectures. */
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
|
||||
static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
|
||||
#endif
|
||||
|
||||
/* comparisons between vectors */
|
||||
/*
|
||||
* comparisons between vectors
|
||||
*
|
||||
* Note: These return a vector rather than booloan, which is why we don't
|
||||
* have non-SIMD implementations.
|
||||
*/
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
|
||||
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Load a chunk of memory into the given vector.
|
||||
|
@ -78,6 +95,15 @@ vector8_load(Vector8 *v, const uint8 *s)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline void
|
||||
vector32_load(Vector32 *v, const uint32 *s)
|
||||
{
|
||||
#ifdef USE_SSE2
|
||||
*v = _mm_loadu_si128((const __m128i *) s);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
/*
|
||||
* Create a vector with all elements set to the same value.
|
||||
|
@ -92,6 +118,16 @@ vector8_broadcast(const uint8 c)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector32
|
||||
vector32_broadcast(const uint32 c)
|
||||
{
|
||||
#ifdef USE_SSE2
|
||||
return _mm_set1_epi32(c);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
/*
|
||||
* Return true if any elements in the vector are equal to the given scalar.
|
||||
*/
|
||||
|
@ -118,7 +154,7 @@ vector8_has(const Vector8 v, const uint8 c)
|
|||
/* any bytes in v equal to c will evaluate to zero via XOR */
|
||||
result = vector8_has_zero(v ^ vector8_broadcast(c));
|
||||
#elif defined(USE_SSE2)
|
||||
result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
|
||||
result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
|
||||
#endif
|
||||
|
||||
Assert(assert_result == result);
|
||||
|
@ -133,8 +169,8 @@ vector8_has_zero(const Vector8 v)
|
|||
{
|
||||
#if defined(USE_NO_SIMD)
|
||||
/*
|
||||
* We cannot call vector8_has() here, because that would lead to a circular
|
||||
* definition.
|
||||
* We cannot call vector8_has() here, because that would lead to a
|
||||
* circular definition.
|
||||
*/
|
||||
return vector8_has_le(v, 0);
|
||||
#elif defined(USE_SSE2)
|
||||
|
@ -150,9 +186,6 @@ static inline bool
|
|||
vector8_has_le(const Vector8 v, const uint8 c)
|
||||
{
|
||||
bool result = false;
|
||||
#if defined(USE_SSE2)
|
||||
__m128i sub;
|
||||
#endif
|
||||
|
||||
/* pre-compute the result for assert checking */
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
|
@ -194,10 +227,10 @@ vector8_has_le(const Vector8 v, const uint8 c)
|
|||
|
||||
/*
|
||||
* Use saturating subtraction to find bytes <= c, which will present as
|
||||
* NUL bytes in 'sub'.
|
||||
* NUL bytes. This approach is a workaround for the lack of unsigned
|
||||
* comparison instructions on some architectures.
|
||||
*/
|
||||
sub = _mm_subs_epu8(v, vector8_broadcast(c));
|
||||
result = vector8_has_zero(sub);
|
||||
result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
|
||||
#endif
|
||||
|
||||
Assert(assert_result == result);
|
||||
|
@ -230,14 +263,37 @@ vector8_or(const Vector8 v1, const Vector8 v2)
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* Different semantics for SIMD architectures. */
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector32
|
||||
vector32_or(const Vector32 v1, const Vector32 v2)
|
||||
{
|
||||
#ifdef USE_SSE2
|
||||
return _mm_or_si128(v1, v2);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
/*
|
||||
* Return the result of subtracting the respective elements of the input
|
||||
* vectors using saturation (i.e., if the operation would yield a value less
|
||||
* than zero, zero is returned instead). For more information on saturation
|
||||
* arithmetic, see https://en.wikipedia.org/wiki/Saturation_arithmetic
|
||||
*/
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector8
|
||||
vector8_ssub(const Vector8 v1, const Vector8 v2)
|
||||
{
|
||||
#ifdef USE_SSE2
|
||||
return _mm_subs_epu8(v1, v2);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
/*
|
||||
* Return a vector with all bits set in each lane where the the corresponding
|
||||
* lanes in the inputs are equal.
|
||||
*/
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector8
|
||||
vector8_eq(const Vector8 v1, const Vector8 v2)
|
||||
{
|
||||
|
@ -245,7 +301,16 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
|
|||
return _mm_cmpeq_epi8(v1, v2);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
#ifndef USE_NO_SIMD
|
||||
static inline Vector32
|
||||
vector32_eq(const Vector32 v1, const Vector32 v2)
|
||||
{
|
||||
#ifdef USE_SSE2
|
||||
return _mm_cmpeq_epi32(v1, v2);
|
||||
#endif
|
||||
}
|
||||
#endif /* ! USE_NO_SIMD */
|
||||
|
||||
#endif /* SIMD_H */
|
||||
|
|
Loading…
Reference in New Issue