580 lines
16 KiB
C
580 lines
16 KiB
C
/* SPDX-License-Identifier: MIT
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person
|
|
* obtaining a copy of this software and associated documentation
|
|
* files (the "Software"), to deal in the Software without
|
|
* restriction, including without limitation the rights to use, copy,
|
|
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
* of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be
|
|
* included in all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*
|
|
* Copyright:
|
|
* 2020 Evan Nemerson <evan@nemerson.com>
|
|
* 2020 Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
|
|
*/
|
|
|
|
#if !defined(SIMDE_ARM_NEON_MUL_H)
|
|
#define SIMDE_ARM_NEON_MUL_H
|
|
|
|
#include "types.h"
|
|
|
|
#include "reinterpret.h"
|
|
|
|
HEDLEY_DIAGNOSTIC_PUSH
|
|
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
|
|
SIMDE_BEGIN_DECLS_
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_float32x2_t
|
|
simde_vmul_f32(simde_float32x2_t a, simde_float32x2_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_f32(a, b);
|
|
#else
|
|
simde_float32x2_private
|
|
r_,
|
|
a_ = simde_float32x2_to_private(a),
|
|
b_ = simde_float32x2_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_float32x2_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_f32
|
|
#define vmul_f32(a, b) simde_vmul_f32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_float64x1_t
|
|
simde_vmul_f64(simde_float64x1_t a, simde_float64x1_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
|
|
return vmul_f64(a, b);
|
|
#else
|
|
simde_float64x1_private
|
|
r_,
|
|
a_ = simde_float64x1_to_private(a),
|
|
b_ = simde_float64x1_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_float64x1_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_f64
|
|
#define vmul_f64(a, b) simde_vmul_f64((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int8x8_t
|
|
simde_vmul_s8(simde_int8x8_t a, simde_int8x8_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_s8(a, b);
|
|
#else
|
|
simde_int8x8_private
|
|
r_,
|
|
a_ = simde_int8x8_to_private(a),
|
|
b_ = simde_int8x8_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int8x8_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_s8
|
|
#define vmul_s8(a, b) simde_vmul_s8((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int16x4_t
|
|
simde_vmul_s16(simde_int16x4_t a, simde_int16x4_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_s16(a, b);
|
|
#else
|
|
simde_int16x4_private
|
|
r_,
|
|
a_ = simde_int16x4_to_private(a),
|
|
b_ = simde_int16x4_to_private(b);
|
|
|
|
#if defined(SIMDE_X86_MMX_NATIVE)
|
|
r_.m64 = _m_pmullw(a_.m64, b_.m64);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int16x4_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_s16
|
|
#define vmul_s16(a, b) simde_vmul_s16((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int32x2_t
|
|
simde_vmul_s32(simde_int32x2_t a, simde_int32x2_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_s32(a, b);
|
|
#else
|
|
simde_int32x2_private
|
|
r_,
|
|
a_ = simde_int32x2_to_private(a),
|
|
b_ = simde_int32x2_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int32x2_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_s32
|
|
#define vmul_s32(a, b) simde_vmul_s32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int64x1_t
|
|
simde_x_vmul_s64(simde_int64x1_t a, simde_int64x1_t b) {
|
|
simde_int64x1_private
|
|
r_,
|
|
a_ = simde_int64x1_to_private(a),
|
|
b_ = simde_int64x1_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int64x1_from_private(r_);
|
|
}
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint8x8_t
|
|
simde_vmul_u8(simde_uint8x8_t a, simde_uint8x8_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_u8(a, b);
|
|
#else
|
|
simde_uint8x8_private
|
|
r_,
|
|
a_ = simde_uint8x8_to_private(a),
|
|
b_ = simde_uint8x8_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_uint8x8_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_u8
|
|
#define vmul_u8(a, b) simde_vmul_u8((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint16x4_t
|
|
simde_vmul_u16(simde_uint16x4_t a, simde_uint16x4_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_u16(a, b);
|
|
#else
|
|
simde_uint16x4_private
|
|
r_,
|
|
a_ = simde_uint16x4_to_private(a),
|
|
b_ = simde_uint16x4_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_uint16x4_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_u16
|
|
#define vmul_u16(a, b) simde_vmul_u16((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint32x2_t
|
|
simde_vmul_u32(simde_uint32x2_t a, simde_uint32x2_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmul_u32(a, b);
|
|
#else
|
|
simde_uint32x2_private
|
|
r_,
|
|
a_ = simde_uint32x2_to_private(a),
|
|
b_ = simde_uint32x2_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_GCC_100762)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_uint32x2_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmul_u32
|
|
#define vmul_u32(a, b) simde_vmul_u32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint64x1_t
|
|
simde_x_vmul_u64(simde_uint64x1_t a, simde_uint64x1_t b) {
|
|
simde_uint64x1_private
|
|
r_,
|
|
a_ = simde_uint64x1_to_private(a),
|
|
b_ = simde_uint64x1_to_private(b);
|
|
|
|
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_uint64x1_from_private(r_);
|
|
}
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_float32x4_t
|
|
simde_vmulq_f32(simde_float32x4_t a, simde_float32x4_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_f32(a, b);
|
|
#else
|
|
simde_float32x4_private
|
|
r_,
|
|
a_ = simde_float32x4_to_private(a),
|
|
b_ = simde_float32x4_to_private(b);
|
|
|
|
#if defined(SIMDE_X86_SSE_NATIVE)
|
|
r_.m128 = _mm_mul_ps(a_.m128, b_.m128);
|
|
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
|
|
r_.v128 = wasm_f32x4_mul(a_.v128, b_.v128);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_float32x4_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_f32
|
|
#define vmulq_f32(a, b) simde_vmulq_f32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_float64x2_t
|
|
simde_vmulq_f64(simde_float64x2_t a, simde_float64x2_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
|
|
return vmulq_f64(a, b);
|
|
#else
|
|
simde_float64x2_private
|
|
r_,
|
|
a_ = simde_float64x2_to_private(a),
|
|
b_ = simde_float64x2_to_private(b);
|
|
|
|
#if defined(SIMDE_X86_SSE2_NATIVE)
|
|
r_.m128d = _mm_mul_pd(a_.m128d, b_.m128d);
|
|
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
|
|
r_.v128 = wasm_f64x2_mul(a_.v128, b_.v128);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_float64x2_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_f64
|
|
#define vmulq_f64(a, b) simde_vmulq_f64((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int8x16_t
|
|
simde_vmulq_s8(simde_int8x16_t a, simde_int8x16_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_s8(a, b);
|
|
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
|
|
return vec_mul(a, b);
|
|
#else
|
|
simde_int8x16_private
|
|
r_,
|
|
a_ = simde_int8x16_to_private(a),
|
|
b_ = simde_int8x16_to_private(b);
|
|
|
|
#if defined(SIMDE_X86_SSE2_NATIVE)
|
|
/* https://stackoverflow.com/a/29155682/501126 */
|
|
const __m128i dst_even = _mm_mullo_epi16(a_.m128i, b_.m128i);
|
|
r_.m128i =
|
|
_mm_or_si128(
|
|
_mm_slli_epi16(
|
|
_mm_mullo_epi16(
|
|
_mm_srli_epi16(a_.m128i, 8),
|
|
_mm_srli_epi16(b_.m128i, 8)
|
|
),
|
|
8
|
|
),
|
|
#if defined(SIMDE_X86_AVX2_NATIVE)
|
|
_mm_and_si128(dst_even, _mm_set1_epi16(0xFF))
|
|
#else
|
|
_mm_srli_epi16(
|
|
_mm_slli_epi16(dst_even, 8),
|
|
8
|
|
)
|
|
#endif
|
|
);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int8x16_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_s8
|
|
#define vmulq_s8(a, b) simde_vmulq_s8((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int16x8_t
|
|
simde_vmulq_s16(simde_int16x8_t a, simde_int16x8_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_s16(a, b);
|
|
#else
|
|
simde_int16x8_private
|
|
r_,
|
|
a_ = simde_int16x8_to_private(a),
|
|
b_ = simde_int16x8_to_private(b);
|
|
|
|
#if defined(SIMDE_X86_SSE2_NATIVE)
|
|
r_.m128i = _mm_mullo_epi16(a_.m128i, b_.m128i);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int16x8_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_s16
|
|
#define vmulq_s16(a, b) simde_vmulq_s16((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int32x4_t
|
|
simde_vmulq_s32(simde_int32x4_t a, simde_int32x4_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_s32(a, b);
|
|
#else
|
|
simde_int32x4_private
|
|
r_,
|
|
a_ = simde_int32x4_to_private(a),
|
|
b_ = simde_int32x4_to_private(b);
|
|
|
|
#if defined(SIMDE_WASM_SIMD128_NATIVE)
|
|
r_.v128 = wasm_i32x4_mul(a_.v128, b_.v128);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int32x4_from_private(r_);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_s32
|
|
#define vmulq_s32(a, b) simde_vmulq_s32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_int64x2_t
|
|
simde_x_vmulq_s64(simde_int64x2_t a, simde_int64x2_t b) {
|
|
simde_int64x2_private
|
|
r_,
|
|
a_ = simde_int64x2_to_private(a),
|
|
b_ = simde_int64x2_to_private(b);
|
|
|
|
#if defined(SIMDE_WASM_SIMD128_NATIVE)
|
|
r_.v128 = wasm_i64x2_mul(a_.v128, b_.v128);
|
|
#elif defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512DQ_NATIVE)
|
|
r_.m128i = _mm_mullo_epi64(a_.m128i, b_.m128i);
|
|
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
|
|
r_.values = a_.values * b_.values;
|
|
#else
|
|
SIMDE_VECTORIZE
|
|
for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
|
|
r_.values[i] = a_.values[i] * b_.values[i];
|
|
}
|
|
#endif
|
|
|
|
return simde_int64x2_from_private(r_);
|
|
}
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint8x16_t
|
|
simde_vmulq_u8(simde_uint8x16_t a, simde_uint8x16_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_u8(a, b);
|
|
#else
|
|
return
|
|
simde_vreinterpretq_u8_s8(
|
|
simde_vmulq_s8(
|
|
simde_vreinterpretq_s8_u8(a),
|
|
simde_vreinterpretq_s8_u8(b)
|
|
)
|
|
);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_u8
|
|
#define vmulq_u8(a, b) simde_vmulq_u8((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint16x8_t
|
|
simde_vmulq_u16(simde_uint16x8_t a, simde_uint16x8_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_u16(a, b);
|
|
#else
|
|
return
|
|
simde_vreinterpretq_u16_s16(
|
|
simde_vmulq_s16(
|
|
simde_vreinterpretq_s16_u16(a),
|
|
simde_vreinterpretq_s16_u16(b)
|
|
)
|
|
);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_u16
|
|
#define vmulq_u16(a, b) simde_vmulq_u16((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint32x4_t
|
|
simde_vmulq_u32(simde_uint32x4_t a, simde_uint32x4_t b) {
|
|
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
|
|
return vmulq_u32(a, b);
|
|
#else
|
|
return
|
|
simde_vreinterpretq_u32_s32(
|
|
simde_vmulq_s32(
|
|
simde_vreinterpretq_s32_u32(a),
|
|
simde_vreinterpretq_s32_u32(b)
|
|
)
|
|
);
|
|
#endif
|
|
}
|
|
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
|
|
#undef vmulq_u32
|
|
#define vmulq_u32(a, b) simde_vmulq_u32((a), (b))
|
|
#endif
|
|
|
|
SIMDE_FUNCTION_ATTRIBUTES
|
|
simde_uint64x2_t
|
|
simde_x_vmulq_u64(simde_uint64x2_t a, simde_uint64x2_t b) {
|
|
return
|
|
simde_vreinterpretq_u64_s64(
|
|
simde_x_vmulq_s64(
|
|
simde_vreinterpretq_s64_u64(a),
|
|
simde_vreinterpretq_s64_u64(b)
|
|
)
|
|
);
|
|
}
|
|
|
|
SIMDE_END_DECLS_
|
|
HEDLEY_DIAGNOSTIC_POP
|
|
|
|
#endif /* !defined(SIMDE_ARM_NEON_MUL_H) */
|