642 lines
21 KiB
C
642 lines
21 KiB
C
/* SPDX-License-Identifier: MIT
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person
|
|
* obtaining a copy of this software and associated documentation
|
|
* files (the "Software"), to deal in the Software without
|
|
* restriction, including without limitation the rights to use, copy,
|
|
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
* of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be
|
|
* included in all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*
|
|
* Copyright:
|
|
* 2020 Evan Nemerson <evan@nemerson.com>
|
|
* 2020 Hidayat Khan <huk2209@gmail.com>
|
|
*/
|
|
|
|
#if !defined(SIMDE_X86_AVX512_ADD_H)
|
|
#define SIMDE_X86_AVX512_ADD_H
|
|
|
|
#include "types.h"
|
|
#include "../avx2.h"
|
|
#include "mov.h"
|
|
|
|
HEDLEY_DIAGNOSTIC_PUSH
|
|
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
|
|
SIMDE_BEGIN_DECLS_
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_mask_add_epi8(simde__m128i src, simde__mmask16 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm_mask_add_epi8(src, k, a, b);
|
|
#else
|
|
return simde_mm_mask_mov_epi8(src, k, simde_mm_add_epi8(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_mask_add_epi8
|
|
#define _mm_mask_add_epi8(src, k, a, b) simde_mm_mask_add_epi8(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_maskz_add_epi8(simde__mmask16 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm_maskz_add_epi8(k, a, b);
|
|
#else
|
|
return simde_mm_maskz_mov_epi8(k, simde_mm_add_epi8(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_maskz_add_epi8
|
|
#define _mm_maskz_add_epi8(k, a, b) simde_mm_maskz_add_epi8(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_mask_add_epi16(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm_mask_add_epi16(src, k, a, b);
|
|
#else
|
|
return simde_mm_mask_mov_epi16(src, k, simde_mm_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_mask_add_epi16
|
|
#define _mm_mask_add_epi16(src, k, a, b) simde_mm_mask_add_epi16(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_maskz_add_epi16(simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm_maskz_add_epi16(k, a, b);
|
|
#else
|
|
return simde_mm_maskz_mov_epi16(k, simde_mm_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_maskz_add_epi16
|
|
#define _mm_maskz_add_epi16(k, a, b) simde_mm_maskz_add_epi16(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_mask_add_epi32(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm_mask_add_epi32(src, k, a, b);
|
|
#else
|
|
return simde_mm_mask_mov_epi32(src, k, simde_mm_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_mask_add_epi32
|
|
#define _mm_mask_add_epi32(src, k, a, b) simde_mm_mask_add_epi32(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_maskz_add_epi32(simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm_maskz_add_epi32(k, a, b);
|
|
#else
|
|
return simde_mm_maskz_mov_epi32(k, simde_mm_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_maskz_add_epi32
|
|
#define _mm_maskz_add_epi32(k, a, b) simde_mm_maskz_add_epi32(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_mask_add_epi64(simde__m128i src, simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm_mask_add_epi64(src, k, a, b);
|
|
#else
|
|
return simde_mm_mask_mov_epi64(src, k, simde_mm_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_mask_add_epi64
|
|
#define _mm_mask_add_epi64(src, k, a, b) simde_mm_mask_add_epi64(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128i
|
|
simde_mm_maskz_add_epi64(simde__mmask8 k, simde__m128i a, simde__m128i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm_maskz_add_epi64(k, a, b);
|
|
#else
|
|
return simde_mm_maskz_mov_epi64(k, simde_mm_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_maskz_add_epi64
|
|
#define _mm_maskz_add_epi64(k, a, b) simde_mm_maskz_add_epi64(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128
|
|
simde_mm_mask_add_ss(simde__m128 src, simde__mmask8 k, simde__m128 a, simde__m128 b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
|
|
return _mm_mask_add_ss(src, k, a, b);
|
|
#elif 1
|
|
simde__m128_private
|
|
src_ = simde__m128_to_private(src),
|
|
a_ = simde__m128_to_private(a),
|
|
b_ = simde__m128_to_private(b),
|
|
r_ = simde__m128_to_private(a);
|
|
|
|
r_.f32[0] = (k & 1) ? (a_.f32[0] + b_.f32[0]) : src_.f32[0];
|
|
|
|
return simde__m128_from_private(r_);
|
|
#else
|
|
return simde_mm_move_ss(a, simde_mm_mask_mov_ps(src, k, simde_mm_add_ps(a, b)));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_mask_add_ss
|
|
#define _mm_mask_add_ss(src, k, a, b) simde_mm_mask_add_ss(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m128
|
|
simde_mm_maskz_add_ss(simde__mmask8 k, simde__m128 a, simde__m128 b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
|
|
return _mm_maskz_add_ss(k, a, b);
|
|
#elif 1
|
|
simde__m128_private
|
|
a_ = simde__m128_to_private(a),
|
|
b_ = simde__m128_to_private(b),
|
|
r_ = simde__m128_to_private(a);
|
|
|
|
r_.f32[0] = (k & 1) ? (a_.f32[0] + b_.f32[0]) : 0.0f;
|
|
|
|
return simde__m128_from_private(r_);
|
|
#else
|
|
return simde_mm_move_ss(a, simde_mm_maskz_mov_ps(k, simde_mm_add_ps(a, b)));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm_maskz_add_ss
|
|
#define _mm_maskz_add_ss(k, a, b) simde_mm_maskz_add_ss(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_mask_add_epi16(simde__m256i src, simde__mmask16 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm256_mask_add_epi16(src, k, a, b);
|
|
#else
|
|
return simde_mm256_mask_mov_epi16(src, k, simde_mm256_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_mask_add_epi16
|
|
#define _mm256_mask_add_epi16(src, k, a, b) simde_mm256_mask_add_epi16(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_maskz_add_epi16(simde__mmask16 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm256_maskz_add_epi16(k, a, b);
|
|
#else
|
|
return simde_mm256_maskz_mov_epi16(k, simde_mm256_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_maskz_add_epi16
|
|
#define _mm256_maskz_add_epi16(k, a, b) simde_mm256_maskz_add_epi16(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_mask_add_epi32(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm256_mask_add_epi32(src, k, a, b);
|
|
#else
|
|
return simde_mm256_mask_mov_epi32(src, k, simde_mm256_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_mask_add_epi32
|
|
#define _mm256_mask_add_epi32(src, k, a, b) simde_mm256_mask_add_epi32(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_maskz_add_epi32(simde__mmask8 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm256_maskz_add_epi32(k, a, b);
|
|
#else
|
|
return simde_mm256_maskz_mov_epi32(k, simde_mm256_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_maskz_add_epi32
|
|
#define _mm256_maskz_add_epi32(k, a, b) simde_mm256_maskz_add_epi32(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_mask_add_epi64(simde__m256i src, simde__mmask8 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm256_mask_add_epi64(src, k, a, b);
|
|
#else
|
|
return simde_mm256_mask_mov_epi64(src, k, simde_mm256_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_mask_add_epi64
|
|
#define _mm256_mask_add_epi64(src, k, a, b) simde_mm256_mask_add_epi64(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m256i
|
|
simde_mm256_maskz_add_epi64(simde__mmask8 k, simde__m256i a, simde__m256i b) {
|
|
#if defined(SIMDE_X86_AVX512VL_NATIVE)
|
|
return _mm256_maskz_add_epi64(k, a, b);
|
|
#else
|
|
return simde_mm256_maskz_mov_epi64(k, simde_mm256_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm256_maskz_add_epi64
|
|
#define _mm256_maskz_add_epi64(k, a, b) simde_mm256_maskz_add_epi64(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_add_epi8 (simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_add_epi8(a, b);
|
|
#else
|
|
simde__m512i_private
|
|
r_,
|
|
a_ = simde__m512i_to_private(a),
|
|
b_ = simde__m512i_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.i8 = a_.i8 + b_.i8;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi8(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512i_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_epi8
|
|
#define _mm512_add_epi8(a, b) simde_mm512_add_epi8(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_mask_add_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_mask_add_epi8(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_epi8(src, k, simde_mm512_add_epi8(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_epi8
|
|
#define _mm512_mask_add_epi8(src, k, a, b) simde_mm512_mask_add_epi8(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_maskz_add_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_maskz_add_epi8(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_epi8(k, simde_mm512_add_epi8(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_epi8
|
|
#define _mm512_maskz_add_epi8(k, a, b) simde_mm512_maskz_add_epi8(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_add_epi16 (simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_add_epi16(a, b);
|
|
#else
|
|
simde__m512i_private
|
|
r_,
|
|
a_ = simde__m512i_to_private(a),
|
|
b_ = simde__m512i_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.i16 = a_.i16 + b_.i16;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi16(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512i_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_epi16
|
|
#define _mm512_add_epi16(a, b) simde_mm512_add_epi16(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_mask_add_epi16 (simde__m512i src, simde__mmask32 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_mask_add_epi16(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_epi16(src, k, simde_mm512_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_epi16
|
|
#define _mm512_mask_add_epi16(src, k, a, b) simde_mm512_mask_add_epi16(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_maskz_add_epi16 (simde__mmask32 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512BW_NATIVE)
|
|
return _mm512_maskz_add_epi16(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_epi16(k, simde_mm512_add_epi16(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_epi16
|
|
#define _mm512_maskz_add_epi16(k, a, b) simde_mm512_maskz_add_epi16(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_add_epi32 (simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_add_epi32(a, b);
|
|
#else
|
|
simde__m512i_private
|
|
r_,
|
|
a_ = simde__m512i_to_private(a),
|
|
b_ = simde__m512i_to_private(b);
|
|
|
|
#if defined(SIMDE_ARM_SVE_NATIVE)
|
|
const size_t n = sizeof(a_.i32) / sizeof(a_.i32[0]);
|
|
size_t i = 0;
|
|
svbool_t pg = svwhilelt_b32(i, n);
|
|
do {
|
|
svint32_t
|
|
va = svld1_s32(pg, &(a_.i32[i])),
|
|
vb = svld1_s32(pg, &(b_.i32[i]));
|
|
svst1_s32(pg, &(r_.i32[i]), svadd_s32_x(pg, va, vb));
|
|
i += svcntw();
|
|
pg = svwhilelt_b32(i, n);
|
|
} while (svptest_any(svptrue_b32(), pg));
|
|
#elif SIMDE_NATURAL_VECTOR_SIZE_LE(256)
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi32(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.i32 = a_.i32 + b_.i32;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi32(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512i_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_epi32
|
|
#define _mm512_add_epi32(a, b) simde_mm512_add_epi32(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_mask_add_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_mask_add_epi32(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_epi32(src, k, simde_mm512_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_epi32
|
|
#define _mm512_mask_add_epi32(src, k, a, b) simde_mm512_mask_add_epi32(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_maskz_add_epi32(simde__mmask16 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_maskz_add_epi32(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_epi32(k, simde_mm512_add_epi32(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_epi32
|
|
#define _mm512_maskz_add_epi32(k, a, b) simde_mm512_maskz_add_epi32(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_add_epi64 (simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_add_epi64(a, b);
|
|
#else
|
|
simde__m512i_private
|
|
r_,
|
|
a_ = simde__m512i_to_private(a),
|
|
b_ = simde__m512i_to_private(b);
|
|
|
|
#if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi64(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_CLANG_BAD_VI64_OPS)
|
|
r_.i64 = a_.i64 + b_.i64;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
|
|
r_.m256i[i] = simde_mm256_add_epi64(a_.m256i[i], b_.m256i[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512i_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_epi64
|
|
#define _mm512_add_epi64(a, b) simde_mm512_add_epi64(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_mask_add_epi64(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_mask_add_epi64(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_epi64(src, k, simde_mm512_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_epi64
|
|
#define _mm512_mask_add_epi64(src, k, a, b) simde_mm512_mask_add_epi64(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512i
|
|
simde_mm512_maskz_add_epi64(simde__mmask8 k, simde__m512i a, simde__m512i b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_maskz_add_epi64(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_epi64(k, simde_mm512_add_epi64(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_epi64
|
|
#define _mm512_maskz_add_epi64(k, a, b) simde_mm512_maskz_add_epi64(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512
|
|
simde_mm512_add_ps (simde__m512 a, simde__m512 b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_add_ps(a, b);
|
|
#else
|
|
simde__m512_private
|
|
r_,
|
|
a_ = simde__m512_to_private(a),
|
|
b_ = simde__m512_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.f32 = a_.f32 + b_.f32;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
|
|
r_.m256[i] = simde_mm256_add_ps(a_.m256[i], b_.m256[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_ps
|
|
#define _mm512_add_ps(a, b) simde_mm512_add_ps(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512
|
|
simde_mm512_mask_add_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_mask_add_ps(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_ps(src, k, simde_mm512_add_ps(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_ps
|
|
#define _mm512_mask_add_ps(src, k, a, b) simde_mm512_mask_add_ps(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512
|
|
simde_mm512_maskz_add_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_maskz_add_ps(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_ps(k, simde_mm512_add_ps(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_ps
|
|
#define _mm512_maskz_add_ps(k, a, b) simde_mm512_maskz_add_ps(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512d
|
|
simde_mm512_add_pd (simde__m512d a, simde__m512d b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_add_pd(a, b);
|
|
#else
|
|
simde__m512d_private
|
|
r_,
|
|
a_ = simde__m512d_to_private(a),
|
|
b_ = simde__m512d_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.f64 = a_.f64 + b_.f64;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) {
|
|
r_.m256d[i] = simde_mm256_add_pd(a_.m256d[i], b_.m256d[i]);
|
|
}
|
|
#endif
|
|
|
|
return simde__m512d_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_add_pd
|
|
#define _mm512_add_pd(a, b) simde_mm512_add_pd(a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512d
|
|
simde_mm512_mask_add_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_mask_add_pd(src, k, a, b);
|
|
#else
|
|
return simde_mm512_mask_mov_pd(src, k, simde_mm512_add_pd(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_mask_add_pd
|
|
#define _mm512_mask_add_pd(src, k, a, b) simde_mm512_mask_add_pd(src, k, a, b)
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde__m512d
|
|
simde_mm512_maskz_add_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
|
|
#if defined(SIMDE_X86_AVX512F_NATIVE)
|
|
return _mm512_maskz_add_pd(k, a, b);
|
|
#else
|
|
return simde_mm512_maskz_mov_pd(k, simde_mm512_add_pd(a, b));
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
|
|
#undef _mm512_maskz_add_pd
|
|
#define _mm512_maskz_add_pd(k, a, b) simde_mm512_maskz_add_pd(k, a, b)
|
|
#endif
|
|
|
|
SIMDE_END_DECLS_
|
|
HEDLEY_DIAGNOSTIC_POP
|
|
|
|
#endif /* !defined(SIMDE_X86_AVX512_ADD_H) */
|