summaryrefslogtreecommitdiffstats
path: root/renderscript
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-02-19 18:30:17 -0800
committerStephen Hines <srhines@google.com>2014-02-19 18:30:17 -0800
commit803c468d0423f799a67ab2a98c5cadea90fd1665 (patch)
tree856b980af9f30f76c9ddd8f877a00d0d2bd612b1 /renderscript
parent74fad5f719b208e79dc377f466080b4094cb4aae (diff)
downloadprebuilts_sdk-803c468d0423f799a67ab2a98c5cadea90fd1665.zip
prebuilts_sdk-803c468d0423f799a67ab2a98c5cadea90fd1665.tar.gz
prebuilts_sdk-803c468d0423f799a67ab2a98c5cadea90fd1665.tar.bz2
Update Linux RS prebuilts for LLVM 3.4 rebase.
Change-Id: I82679f065d999719d402e20252de9c167fa22e22
Diffstat (limited to 'renderscript')
-rw-r--r--renderscript/clang-include/Intrin.h784
-rw-r--r--renderscript/clang-include/arm_neon.h9595
-rw-r--r--renderscript/clang-include/avx2intrin.h20
-rw-r--r--renderscript/clang-include/avxintrin.h8
-rw-r--r--renderscript/clang-include/emmintrin.h24
-rw-r--r--renderscript/clang-include/f16cintrin.h4
-rw-r--r--renderscript/clang-include/immintrin.h4
-rw-r--r--renderscript/clang-include/limits.h6
-rw-r--r--renderscript/clang-include/shaintrin.h74
-rw-r--r--renderscript/clang-include/smmintrin.h15
-rw-r--r--renderscript/clang-include/tbmintrin.h158
-rw-r--r--renderscript/clang-include/unwind.h159
-rw-r--r--renderscript/clang-include/x86intrin.h4
-rw-r--r--renderscript/include/rs_core_math.rsh5428
-rwxr-xr-xrenderscript/lib/arm/libRSSupport.sobin338204 -> 338220 bytes
-rwxr-xr-xrenderscript/lib/arm/libc.sobin310140 -> 310140 bytes
-rw-r--r--renderscript/lib/arm/libclcore.bcbin214836 -> 216916 bytes
-rwxr-xr-xrenderscript/lib/arm/libm.sobin103692 -> 103692 bytes
-rwxr-xr-xrenderscript/lib/arm/librsjni.sobin22620 -> 22640 bytes
-rw-r--r--renderscript/lib/arm/librsrt_arm.bcbin214836 -> 216916 bytes
-rw-r--r--renderscript/lib/javalib.jarbin137754 -> 137754 bytes
-rwxr-xr-xrenderscript/lib/mips/libRSSupport.sobin536820 -> 536836 bytes
-rwxr-xr-xrenderscript/lib/mips/libc.sobin611248 -> 611248 bytes
-rw-r--r--renderscript/lib/mips/libclcore.bcbin265796 -> 266808 bytes
-rwxr-xr-xrenderscript/lib/mips/libm.sobin136880 -> 136880 bytes
-rwxr-xr-xrenderscript/lib/mips/librsjni.sobin71976 -> 71996 bytes
-rw-r--r--renderscript/lib/mips/librsrt_mips.bcbin265796 -> 266808 bytes
-rwxr-xr-xrenderscript/lib/x86/libRSSupport.sobin636905 -> 641386 bytes
-rwxr-xr-xrenderscript/lib/x86/libc.sobin746497 -> 746830 bytes
-rw-r--r--renderscript/lib/x86/libclcore.bcbin211456 -> 212408 bytes
-rwxr-xr-xrenderscript/lib/x86/libm.sobin187085 -> 187085 bytes
-rwxr-xr-xrenderscript/lib/x86/librsjni.sobin44547 -> 44567 bytes
-rw-r--r--renderscript/lib/x86/librsrt_x86.bcbin213176 -> 214172 bytes
33 files changed, 13537 insertions, 2746 deletions
diff --git a/renderscript/clang-include/Intrin.h b/renderscript/clang-include/Intrin.h
new file mode 100644
index 0000000..4376464
--- /dev/null
+++ b/renderscript/clang-include/Intrin.h
@@ -0,0 +1,784 @@
+/* ===-------- Intrin.h ---------------------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <Intrin.h>
+#else
+
+#ifndef __INTRIN_H
+#define __INTRIN_H
+
+/* First include the standard intrinsics. */
+#include <x86intrin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* And the random ones that aren't in those files. */
+__m64 _m_from_float(float);
+__m64 _m_from_int(int _l);
+void _m_prefetch(void *);
+float _m_to_float(__m64);
+int _m_to_int(__m64 _M);
+
+/* Other assorted instruction intrinsics. */
+void __addfsbyte(unsigned long, unsigned char);
+void __addfsdword(unsigned long, unsigned long);
+void __addfsword(unsigned long, unsigned short);
+void __code_seg(const char *);
+void __cpuid(int[4], int);
+void __cpuidex(int[4], int, int);
+void __debugbreak(void);
+__int64 __emul(int, int);
+unsigned __int64 __emulu(unsigned int, unsigned int);
+void __cdecl __fastfail(unsigned int);
+unsigned int __getcallerseflags(void);
+void __halt(void);
+unsigned char __inbyte(unsigned short);
+void __inbytestring(unsigned short, unsigned char *, unsigned long);
+void __incfsbyte(unsigned long);
+void __incfsdword(unsigned long);
+void __incfsword(unsigned long);
+unsigned long __indword(unsigned short);
+void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
+void __invlpg(void *);
+unsigned short __inword(unsigned short);
+void __inwordstring(unsigned short, unsigned short *, unsigned long);
+void __lidt(void *);
+unsigned __int64 __ll_lshift(unsigned __int64, int);
+__int64 __ll_rshift(__int64, int);
+void __llwpcb(void *);
+unsigned char __lwpins32(unsigned int, unsigned int, unsigned int);
+void __lwpval32(unsigned int, unsigned int, unsigned int);
+unsigned int __lzcnt(unsigned int);
+unsigned short __lzcnt16(unsigned short);
+void __movsb(unsigned char *, unsigned char const *, size_t);
+void __movsd(unsigned long *, unsigned long const *, size_t);
+void __movsw(unsigned short *, unsigned short const *, size_t);
+void __nop(void);
+void __nvreg_restore_fence(void);
+void __nvreg_save_fence(void);
+void __outbyte(unsigned short, unsigned char);
+void __outbytestring(unsigned short, unsigned char *, unsigned long);
+void __outdword(unsigned short, unsigned long);
+void __outdwordstring(unsigned short, unsigned long *, unsigned long);
+void __outword(unsigned short, unsigned short);
+void __outwordstring(unsigned short, unsigned short *, unsigned long);
+static __inline__
+unsigned int __popcnt(unsigned int);
+static __inline__
+unsigned short __popcnt16(unsigned short);
+unsigned __int64 __rdtsc(void);
+unsigned __int64 __rdtscp(unsigned int *);
+unsigned long __readcr0(void);
+unsigned long __readcr2(void);
+unsigned long __readcr3(void);
+unsigned long __readcr5(void);
+unsigned long __readcr8(void);
+unsigned int __readdr(unsigned int);
+unsigned int __readeflags(void);
+unsigned char __readfsbyte(unsigned long);
+unsigned long __readfsdword(unsigned long);
+unsigned __int64 __readfsqword(unsigned long);
+unsigned short __readfsword(unsigned long);
+unsigned __int64 __readmsr(unsigned long);
+unsigned __int64 __readpmc(unsigned long);
+unsigned long __segmentlimit(unsigned long);
+void __sidt(void *);
+void *__slwpcb(void);
+void __stosb(unsigned char *, unsigned char, size_t);
+void __stosd(unsigned long *, unsigned long, size_t);
+void __stosw(unsigned short *, unsigned short, size_t);
+void __svm_clgi(void);
+void __svm_invlpga(void *, int);
+void __svm_skinit(int);
+void __svm_stgi(void);
+void __svm_vmload(size_t);
+void __svm_vmrun(size_t);
+void __svm_vmsave(size_t);
+void __ud2(void);
+unsigned __int64 __ull_rshift(unsigned __int64, int);
+void __vmx_off(void);
+void __vmx_vmptrst(unsigned __int64 *);
+void __wbinvd(void);
+void __writecr0(unsigned int);
+void __writecr3(unsigned int);
+void __writecr4(unsigned int);
+void __writecr8(unsigned int);
+void __writedr(unsigned int, unsigned int);
+void __writeeflags(unsigned int);
+void __writefsbyte(unsigned long, unsigned char);
+void __writefsdword(unsigned long, unsigned long);
+void __writefsqword(unsigned long, unsigned __int64);
+void __writefsword(unsigned long, unsigned short);
+void __writemsr(unsigned long, unsigned __int64);
+static __inline__
+void *_AddressOfReturnAddress(void);
+unsigned int _andn_u32(unsigned int, unsigned int);
+unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
+unsigned int _bextr_u32(unsigned int, unsigned int, unsigned int);
+unsigned int _bextri_u32(unsigned int, unsigned int);
+static __inline__
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+static __inline__
+unsigned char _bittest(long const *, long);
+static __inline__
+unsigned char _bittestandcomplement(long *, long);
+static __inline__
+unsigned char _bittestandreset(long *, long);
+static __inline__
+unsigned char _bittestandset(long *, long);
+unsigned int _blcfill_u32(unsigned int);
+unsigned int _blci_u32(unsigned int);
+unsigned int _blcic_u32(unsigned int);
+unsigned int _blcmsk_u32(unsigned int);
+unsigned int _blcs_u32(unsigned int);
+unsigned int _blsfill_u32(unsigned int);
+unsigned int _blsi_u32(unsigned int);
+unsigned int _blsic_u32(unsigned int);
+unsigned int _blsmsk_u32(unsigned int);
+unsigned int _blsmsk_u32(unsigned int);
+unsigned int _blsr_u32(unsigned int);
+unsigned int _blsr_u32(unsigned int);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned long __cdecl _byteswap_ulong(unsigned long);
+unsigned short __cdecl _byteswap_ushort(unsigned short);
+unsigned _bzhi_u32(unsigned int, unsigned int);
+void __cdecl _disable(void);
+void __cdecl _enable(void);
+void __cdecl _fxrstor(void const *);
+void __cdecl _fxsave(void *);
+long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
+static __inline__
+long _InterlockedAnd(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedAnd16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedAnd8(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset(long volatile *, long);
+unsigned char _interlockedbittestandset(long volatile *, long);
+static __inline__
+long __cdecl _InterlockedCompareExchange(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
+long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
+static __inline__
+short _InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+static __inline__
+__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
+ __int64);
+__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
+ __int64);
+static __inline__
+char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange,
+ char _Comparand);
+void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
+ void *);
+void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
+ void *);
+static __inline__
+long __cdecl _InterlockedDecrement(long volatile *_Addend);
+static __inline__
+short _InterlockedDecrement16(short volatile *_Addend);
+static __inline__
+long __cdecl _InterlockedExchange(long volatile *_Target, long _Value);
+static __inline__
+short _InterlockedExchange16(short volatile *_Target, short _Value);
+static __inline__
+char _InterlockedExchange8(char volatile *_Target, char _Value);
+static __inline__
+long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
+long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
+static __inline__
+char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value);
+static __inline__
+long __cdecl _InterlockedIncrement(long volatile *_Addend);
+static __inline__
+short _InterlockedIncrement16(short volatile *_Addend);
+static __inline__
+long _InterlockedOr(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedOr16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedOr8(char volatile *_Value, char _Mask);
+static __inline__
+long _InterlockedXor(long volatile *_Value, long _Mask);
+static __inline__
+short _InterlockedXor16(short volatile *_Value, short _Mask);
+static __inline__
+char _InterlockedXor8(char volatile *_Value, char _Mask);
+void __cdecl _invpcid(unsigned int, void *);
+static __inline__
+unsigned long __cdecl _lrotl(unsigned long, int);
+static __inline__
+unsigned long __cdecl _lrotr(unsigned long, int);
+static __inline__
+unsigned int _lzcnt_u32(unsigned int);
+static __inline__
+void _ReadBarrier(void);
+static __inline__
+void _ReadWriteBarrier(void);
+static __inline__
+void *_ReturnAddress(void);
+unsigned int _rorx_u32(unsigned int, const unsigned int);
+int __cdecl _rdrand16_step(unsigned short *);
+int __cdecl _rdrand32_step(unsigned int *);
+static __inline__
+unsigned int __cdecl _rotl(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotl16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotl8(unsigned char _Value, unsigned char _Shift);
+static __inline__
+unsigned int __cdecl _rotr(unsigned int _Value, int _Shift);
+static __inline__
+unsigned short _rotr16(unsigned short _Value, unsigned char _Shift);
+static __inline__
+unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
+static __inline__
+unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
+int _sarx_i32(int, unsigned int);
+
+/* FIXME: Need definition for jmp_buf.
+ int __cdecl _setjmp(jmp_buf); */
+
+unsigned int _shlx_u32(unsigned int, unsigned int);
+unsigned int _shrx_u32(unsigned int, unsigned int);
+void _Store_HLERelease(long volatile *, long);
+void _Store64_HLERelease(__int64 volatile *, __int64);
+void _StorePointer_HLERelease(void *volatile *, void *);
+unsigned int _t1mskc_u32(unsigned int);
+unsigned int _tzcnt_u32(unsigned int);
+unsigned int _tzcnt_u32(unsigned int);
+unsigned int _tzmsk_u32(unsigned int);
+static __inline__
+void _WriteBarrier(void);
+void _xabort(const unsigned int imm);
+unsigned __int32 xbegin(void);
+void _xend(void);
+unsigned __int64 __cdecl _xgetbv(unsigned int);
+void __cdecl _xrstor(void const *, unsigned __int64);
+void __cdecl _xsave(void *, unsigned __int64);
+void __cdecl _xsaveopt(void *, unsigned __int64);
+void __cdecl _xsetbv(unsigned int, unsigned __int64);
+unsigned char _xtest(void);
+
+/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
+#ifdef __x86_64__
+void __addgsbyte(unsigned long, unsigned char);
+void __addgsdword(unsigned long, unsigned long);
+void __addgsqword(unsigned long, unsigned __int64);
+void __addgsword(unsigned long, unsigned short);
+void __faststorefence(void);
+void __incgsbyte(unsigned long);
+void __incgsdword(unsigned long);
+void __incgsqword(unsigned long);
+void __incgsword(unsigned long);
+unsigned __int64 __popcnt64(unsigned __int64);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+void __stosq(unsigned __int64 *, unsigned __int64, size_t);
+unsigned __int64 _andn_u64(unsigned __int64, unsigned __int64);
+unsigned __int64 _bextr_u64(unsigned __int64, unsigned int, unsigned int);
+unsigned __int64 _bextri_u64(unsigned __int64, unsigned int);
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _bittest64(__int64 const *, __int64);
+static __inline__
+unsigned char _bittestandcomplement64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandreset64(__int64 *, __int64);
+static __inline__
+unsigned char _bittestandset64(__int64 *, __int64);
+unsigned __int64 _blcfill_u64(unsigned __int64);
+unsigned __int64 _blci_u64(unsigned __int64);
+unsigned __int64 _blcic_u64(unsigned __int64);
+unsigned __int64 _blcmsk_u64(unsigned __int64);
+unsigned __int64 _blcs_u64(unsigned __int64);
+unsigned __int64 _blsfill_u64(unsigned __int64);
+unsigned __int64 _blsi_u64(unsigned __int64);
+unsigned __int64 _blsic_u64(unsigned __int64);
+unsigned __int64 _blmsk_u64(unsigned __int64);
+unsigned __int64 _blsr_u64(unsigned __int64);
+unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
+unsigned __int64 _bzhi_u64(unsigned __int64, unsigned int);
+void __cdecl _fxrstor64(void const *);
+void __cdecl _fxsave64(void *);
+long _InterlockedAnd_np(long volatile *_Value, long _Mask);
+short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
+unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
+unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
+long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
+ long _Comparand);
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_CompareandResult);
+unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+short _InterlockedCompareExchange16_np(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
+ void *_Exchange, void *_Comparand);
+long _InterlockedOr_np(long volatile *_Value, long _Mask);
+short _InterlockedOr16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedOr8_np(char volatile *_Value, char _Mask);
+long _InterlockedXor_np(long volatile *_Value, long _Mask);
+short _InterlockedXor16_np(short volatile *_Value, short _Mask);
+__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
+char _InterlockedXor8_np(char volatile *_Value, char _Mask);
+unsigned __int64 _lzcnt_u64(unsigned __int64);
+__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
+ __int64 *_HighProduct);
+unsigned int __cdecl _readfsbase_u32(void);
+unsigned __int64 __cdecl _readfsbase_u64(void);
+unsigned int __cdecl _readgsbase_u32(void);
+unsigned __int64 __cdecl _readgsbase_u64(void);
+unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
+unsigned __int64 _tzcnt_u64(unsigned __int64);
+unsigned __int64 _tzmsk_u64(unsigned __int64);
+unsigned __int64 _umul128(unsigned __int64 _Multiplier,
+ unsigned __int64 _Multiplicand,
+ unsigned __int64 *_HighProduct);
+void __cdecl _writefsbase_u32(unsigned int);
+void _cdecl _writefsbase_u64(unsigned __int64);
+void __cdecl _writegsbase_u32(unsigned int);
+void __cdecl _writegsbase_u64(unsigned __int64);
+void __cdecl _xrstor64(void const *, unsigned __int64);
+void __cdecl _xsave64(void *, unsigned __int64);
+void __cdecl _xsaveopt64(void *, unsigned __int64);
+
+#endif /* __x86_64__ */
+
+/*----------------------------------------------------------------------------*\
+|* Bit Twiddling
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_rotl8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_rotr8(unsigned char _Value, unsigned char _Shift) {
+ _Shift &= 0x7;
+ return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+_rotl16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+_rotr16(unsigned short _Value, unsigned char _Shift) {
+ _Shift &= 0xf;
+ return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_rotl(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_rotr(unsigned int _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+_lrotl(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
+}
+static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+_lrotr(unsigned long _Value, int _Shift) {
+ _Shift &= 0x1f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+_rotl64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
+}
+static
+__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+_rotr64(unsigned __int64 _Value, int _Shift) {
+ _Shift &= 0x3f;
+ return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
+}
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 31 - __builtin_clzl(_Mask);
+ return 1;
+}
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_lzcnt_u32(unsigned int a) {
+ if (!a)
+ return 32;
+ return __builtin_clzl(a);
+}
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__popcnt16(unsigned short value) {
+ return __builtin_popcount((int)value);
+}
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__popcnt(unsigned int value) {
+ return __builtin_popcount(value);
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittest(long const *a, long b) {
+ return (*a >> b) & 1;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandcomplement(long *a, long b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a ^ (1 << b);
+ return x;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandreset(long *a, long b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a & ~(1 << b);
+ return x;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandset(long *a, long b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a | (1 << b);
+ return x;
+}
+#ifdef __x86_64__
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = __builtin_ctzll(_Mask);
+ return 1;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
+ if (!_Mask)
+ return 0;
+ *_Index = 63 - __builtin_clzll(_Mask);
+ return 1;
+}
+static
+__inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+_lzcnt_u64(unsigned __int64 a) {
+ if (!a)
+ return 64;
+ return __builtin_clzll(a);
+}
+static __inline__
+unsigned __int64 __attribute__((__always_inline__, __nodebug__))
+ __popcnt64(unsigned __int64 value) {
+ return __builtin_popcountll(value);
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittest64(__int64 const *a, __int64 b) {
+ return (*a >> b) & 1;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandcomplement64(__int64 *a, __int64 b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a ^ (1ll << b);
+ return x;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandreset64(__int64 *a, __int64 b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a & ~(1ll << b);
+ return x;
+}
+static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+_bittestandset64(__int64 *a, __int64 b) {
+ unsigned char x = (*a >> b) & 1;
+ *a = *a | (1ll << b);
+ return x;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
+ return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
+ return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
+ return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
+ return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
+ return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
+ return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+ return __atomic_sub_fetch(_Subend, _Value, 0) + _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedIncrement16(char volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, 0);
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedIncrement(long volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, 0);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedIncrement64(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, 0);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedDecrement16(char volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, 0);
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedDecrement(long volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, 0);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedDecrement64(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, 0);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedAnd8(char volatile *_Value, char _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, 0);
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedAnd16(short volatile *_Value, short _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, 0);
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedAnd(long volatile *_Value, long _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, 0);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_and_fetch(_Value, _Mask, 0);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedOr8(char volatile *_Value, char _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, 0);
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedOr16(short volatile *_Value, short _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, 0);
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedOr(long volatile *_Value, long _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, 0);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_or_fetch(_Value, _Mask, 0);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedXor8(char volatile *_Value, char _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, 0);
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedXor16(short volatile *_Value, short _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, 0);
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedXor(long volatile *_Value, long _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, 0);
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_xor_fetch(_Value, _Mask, 0);
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchange8(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, 0);
+ return _Value;
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchange16(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, 0);
+ return _Value;
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchange(long volatile *_Target, long _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, 0);
+ return _Value;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, 0);
+ return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ char __attribute__((__always_inline__, __nodebug__))
+_InterlockedCompareExchange8(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
+ return _Comparand;
+}
+static __inline__ short __attribute__((__always_inline__, __nodebug__))
+_InterlockedCompareExchange16(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
+ return _Comparand;
+}
+static __inline__ long __attribute__((__always_inline__, __nodebug__))
+_InterlockedCompareExchange(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
+ return _Comparand;
+}
+#ifdef __x86_64__
+static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
+_InterlockedCompareExchange64(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
+ return _Comparand;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Barriers
+\*----------------------------------------------------------------------------*/
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void) {
+ __asm__ volatile ("" : : : "memory");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void) {
+ __asm__ volatile ("" : : : "memory");
+}
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void) {
+ __asm__ volatile ("" : : : "memory");
+}
+/*----------------------------------------------------------------------------*\
+|* Misc
+\*----------------------------------------------------------------------------*/
+static __inline__ void * __attribute__((__always_inline__, __nodebug__))
+_AddressOfReturnAddress(void) {
+ return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
+}
+static __inline__ void * __attribute__((__always_inline__, __nodebug__))
+_ReturnAddress(void) {
+ return __builtin_return_address(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INTRIN_H */
+#endif /* _MSC_VER */
diff --git a/renderscript/clang-include/arm_neon.h b/renderscript/clang-include/arm_neon.h
new file mode 100644
index 0000000..c297518
--- /dev/null
+++ b/renderscript/clang-include/arm_neon.h
@@ -0,0 +1,9595 @@
+/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_NEON_H
+#define __ARM_NEON_H
+
+#if !defined(__ARM_NEON__) && !defined(__ARM_NEON)
+#error "NEON support not enabled"
+#endif
+
+#include <stdint.h>
+
+typedef float float32_t;
+typedef __fp16 float16_t;
+#ifdef __aarch64__
+typedef double float64_t;
+#endif
+
+#ifdef __aarch64__
+typedef uint8_t poly8_t;
+typedef uint16_t poly16_t;
+typedef uint64_t poly64_t;
+#else
+typedef int8_t poly8_t;
+typedef int16_t poly16_t;
+#endif
+typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
+typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
+typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
+typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
+typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
+typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
+typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
+typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
+typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
+typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
+typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
+typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
+typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
+typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
+typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
+typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
+typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
+typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
+typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
+typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
+typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
+#endif
+typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
+typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
+typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
+typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
+typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
+#endif
+
+typedef struct int8x8x2_t {
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t {
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t {
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t {
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t {
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t {
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t {
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t {
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t {
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t {
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t {
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t {
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t {
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t {
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t {
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t {
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float16x4x2_t {
+ float16x4_t val[2];
+} float16x4x2_t;
+
+typedef struct float16x8x2_t {
+ float16x8_t val[2];
+} float16x8x2_t;
+
+typedef struct float32x2x2_t {
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t {
+ float32x4_t val[2];
+} float32x4x2_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x2_t {
+ float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct float64x2x2_t {
+ float64x2_t val[2];
+} float64x2x2_t;
+
+#endif
+typedef struct poly8x8x2_t {
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t {
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t {
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t {
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x2_t {
+ poly64x1_t val[2];
+} poly64x1x2_t;
+
+typedef struct poly64x2x2_t {
+ poly64x2_t val[2];
+} poly64x2x2_t;
+
+#endif
+typedef struct int8x8x3_t {
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t {
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t {
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t {
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t {
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t {
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t {
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t {
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t {
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t {
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t {
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t {
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t {
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t {
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t {
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t {
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float16x4x3_t {
+ float16x4_t val[3];
+} float16x4x3_t;
+
+typedef struct float16x8x3_t {
+ float16x8_t val[3];
+} float16x8x3_t;
+
+typedef struct float32x2x3_t {
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t {
+ float32x4_t val[3];
+} float32x4x3_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x3_t {
+ float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct float64x2x3_t {
+ float64x2_t val[3];
+} float64x2x3_t;
+
+#endif
+typedef struct poly8x8x3_t {
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t {
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t {
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t {
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x3_t {
+ poly64x1_t val[3];
+} poly64x1x3_t;
+
+typedef struct poly64x2x3_t {
+ poly64x2_t val[3];
+} poly64x2x3_t;
+
+#endif
+typedef struct int8x8x4_t {
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t {
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t {
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t {
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t {
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t {
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t {
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t {
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t {
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t {
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t {
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t {
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t {
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t {
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t {
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t {
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float16x4x4_t {
+ float16x4_t val[4];
+} float16x4x4_t;
+
+typedef struct float16x8x4_t {
+ float16x8_t val[4];
+} float16x8x4_t;
+
+typedef struct float32x2x4_t {
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t {
+ float32x4_t val[4];
+} float32x4x4_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x4_t {
+ float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct float64x2x4_t {
+ float64x2_t val[4];
+} float64x2x4_t;
+
+#endif
+typedef struct poly8x8x4_t {
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t {
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t {
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t {
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x4_t {
+ poly64x1_t val[4];
+} poly64x1x4_t;
+
+typedef struct poly64x2x4_t {
+ poly64x2_t val[4];
+} poly64x2x4_t;
+
+#endif
+
+#define __ai static inline __attribute__((__always_inline__, __nodebug__))
+
+__ai int16x8_t vmovl_s8(int8x8_t __a) {
+ return (int16x8_t)__builtin_neon_vmovl_v(__a, 33); }
+__ai int32x4_t vmovl_s16(int16x4_t __a) {
+ return (int32x4_t)__builtin_neon_vmovl_v((int8x8_t)__a, 34); }
+__ai int64x2_t vmovl_s32(int32x2_t __a) {
+ return (int64x2_t)__builtin_neon_vmovl_v((int8x8_t)__a, 35); }
+__ai uint16x8_t vmovl_u8(uint8x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vmovl_v((int8x8_t)__a, 49); }
+__ai uint32x4_t vmovl_u16(uint16x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vmovl_v((int8x8_t)__a, 50); }
+__ai uint64x2_t vmovl_u32(uint32x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vmovl_v((int8x8_t)__a, 51); }
+
+__ai int16x8_t vmull_s8(int8x8_t __a, int8x8_t __b) {
+ return (int16x8_t)__builtin_neon_vmull_v(__a, __b, 33); }
+__ai int32x4_t vmull_s16(int16x4_t __a, int16x4_t __b) {
+ return (int32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 34); }
+__ai int64x2_t vmull_s32(int32x2_t __a, int32x2_t __b) {
+ return (int64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 35); }
+__ai uint16x8_t vmull_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 49); }
+__ai uint32x4_t vmull_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 50); }
+__ai uint64x2_t vmull_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 51); }
+__ai poly16x8_t vmull_p8(poly8x8_t __a, poly8x8_t __b) {
+ return (poly16x8_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)__b, 37); }
+
+__ai int8x8_t vabd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vabd_v(__a, __b, 0); }
+__ai int16x4_t vabd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vabd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vabd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vabd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vabd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vabd_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai int8x16_t vabdq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vabdq_v(__a, __b, 32); }
+__ai int16x8_t vabdq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vabdq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vabdq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vabdq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vabdq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai float32x4_t vabdq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+
+__ai int16x8_t vabdl_s8(int8x8_t __a, int8x8_t __b) {
+ return (int16x8_t)vmovl_u8((uint8x8_t)vabd_s8(__a, __b)); }
+__ai int32x4_t vabdl_s16(int16x4_t __a, int16x4_t __b) {
+ return (int32x4_t)vmovl_u16((uint16x4_t)vabd_s16(__a, __b)); }
+__ai int64x2_t vabdl_s32(int32x2_t __a, int32x2_t __b) {
+ return (int64x2_t)vmovl_u32((uint32x2_t)vabd_s32(__a, __b)); }
+__ai uint16x8_t vabdl_u8(uint8x8_t __a, uint8x8_t __b) {
+ return vmovl_u8(vabd_u8(__a, __b)); }
+__ai uint32x4_t vabdl_u16(uint16x4_t __a, uint16x4_t __b) {
+ return vmovl_u16(vabd_u16(__a, __b)); }
+__ai uint64x2_t vabdl_u32(uint32x2_t __a, uint32x2_t __b) {
+ return vmovl_u32(vabd_u32(__a, __b)); }
+
+__ai int8x8_t vaba_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a + vabd_s8(__b, __c); }
+__ai int16x4_t vaba_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a + vabd_s16(__b, __c); }
+__ai int32x2_t vaba_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a + vabd_s32(__b, __c); }
+__ai uint8x8_t vaba_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a + vabd_u8(__b, __c); }
+__ai uint16x4_t vaba_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a + vabd_u16(__b, __c); }
+__ai uint32x2_t vaba_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a + vabd_u32(__b, __c); }
+__ai int8x16_t vabaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
+ return __a + vabdq_s8(__b, __c); }
+__ai int16x8_t vabaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return __a + vabdq_s16(__b, __c); }
+__ai int32x4_t vabaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return __a + vabdq_s32(__b, __c); }
+__ai uint8x16_t vabaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return __a + vabdq_u8(__b, __c); }
+__ai uint16x8_t vabaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return __a + vabdq_u16(__b, __c); }
+__ai uint32x4_t vabaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return __a + vabdq_u32(__b, __c); }
+
+__ai int16x8_t vabal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a + vabdl_s8(__b, __c); }
+__ai int32x4_t vabal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a + vabdl_s16(__b, __c); }
+__ai int64x2_t vabal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a + vabdl_s32(__b, __c); }
+__ai uint16x8_t vabal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a + vabdl_u8(__b, __c); }
+__ai uint32x4_t vabal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a + vabdl_u16(__b, __c); }
+__ai uint64x2_t vabal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a + vabdl_u32(__b, __c); }
+
+
+__ai int8x8_t vabs_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vabs_v(__a, 0); }
+__ai int16x4_t vabs_s16(int16x4_t __a) {
+ return (int16x4_t)__builtin_neon_vabs_v((int8x8_t)__a, 1); }
+__ai int32x2_t vabs_s32(int32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vabs_v((int8x8_t)__a, 2); }
+__ai float32x2_t vabs_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vabs_v((int8x8_t)__a, 8); }
+__ai int8x16_t vabsq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vabsq_v(__a, 32); }
+__ai int16x8_t vabsq_s16(int16x8_t __a) {
+ return (int16x8_t)__builtin_neon_vabsq_v((int8x16_t)__a, 33); }
+__ai int32x4_t vabsq_s32(int32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vabsq_v((int8x16_t)__a, 34); }
+__ai float32x4_t vabsq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vabsq_v((int8x16_t)__a, 40); }
+
+__ai int8x8_t vadd_s8(int8x8_t __a, int8x8_t __b) {
+ return __a + __b; }
+__ai int16x4_t vadd_s16(int16x4_t __a, int16x4_t __b) {
+ return __a + __b; }
+__ai int32x2_t vadd_s32(int32x2_t __a, int32x2_t __b) {
+ return __a + __b; }
+__ai int64x1_t vadd_s64(int64x1_t __a, int64x1_t __b) {
+ return __a + __b; }
+__ai float32x2_t vadd_f32(float32x2_t __a, float32x2_t __b) {
+ return __a + __b; }
+__ai uint8x8_t vadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a + __b; }
+__ai uint16x4_t vadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a + __b; }
+__ai uint32x2_t vadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a + __b; }
+__ai uint64x1_t vadd_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a + __b; }
+__ai int8x16_t vaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a + __b; }
+__ai int16x8_t vaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a + __b; }
+__ai int32x4_t vaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a + __b; }
+__ai int64x2_t vaddq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a + __b; }
+__ai float32x4_t vaddq_f32(float32x4_t __a, float32x4_t __b) {
+ return __a + __b; }
+__ai uint8x16_t vaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a + __b; }
+__ai uint16x8_t vaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a + __b; }
+__ai uint32x4_t vaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a + __b; }
+__ai uint64x2_t vaddq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a + __b; }
+
+__ai int8x8_t vaddhn_s16(int16x8_t __a, int16x8_t __b) {
+ return (int8x8_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
+__ai int16x4_t vaddhn_s32(int32x4_t __a, int32x4_t __b) {
+ return (int16x4_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
+__ai int32x2_t vaddhn_s64(int64x2_t __a, int64x2_t __b) {
+ return (int32x2_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
+__ai uint8x8_t vaddhn_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
+__ai uint16x4_t vaddhn_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
+__ai uint32x2_t vaddhn_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vaddhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
+
+__ai int16x8_t vaddl_s8(int8x8_t __a, int8x8_t __b) {
+ return vmovl_s8(__a) + vmovl_s8(__b); }
+__ai int32x4_t vaddl_s16(int16x4_t __a, int16x4_t __b) {
+ return vmovl_s16(__a) + vmovl_s16(__b); }
+__ai int64x2_t vaddl_s32(int32x2_t __a, int32x2_t __b) {
+ return vmovl_s32(__a) + vmovl_s32(__b); }
+__ai uint16x8_t vaddl_u8(uint8x8_t __a, uint8x8_t __b) {
+ return vmovl_u8(__a) + vmovl_u8(__b); }
+__ai uint32x4_t vaddl_u16(uint16x4_t __a, uint16x4_t __b) {
+ return vmovl_u16(__a) + vmovl_u16(__b); }
+__ai uint64x2_t vaddl_u32(uint32x2_t __a, uint32x2_t __b) {
+ return vmovl_u32(__a) + vmovl_u32(__b); }
+
+__ai int16x8_t vaddw_s8(int16x8_t __a, int8x8_t __b) {
+ return __a + vmovl_s8(__b); }
+__ai int32x4_t vaddw_s16(int32x4_t __a, int16x4_t __b) {
+ return __a + vmovl_s16(__b); }
+__ai int64x2_t vaddw_s32(int64x2_t __a, int32x2_t __b) {
+ return __a + vmovl_s32(__b); }
+__ai uint16x8_t vaddw_u8(uint16x8_t __a, uint8x8_t __b) {
+ return __a + vmovl_u8(__b); }
+__ai uint32x4_t vaddw_u16(uint32x4_t __a, uint16x4_t __b) {
+ return __a + vmovl_u16(__b); }
+__ai uint64x2_t vaddw_u32(uint64x2_t __a, uint32x2_t __b) {
+ return __a + vmovl_u32(__b); }
+
+__ai int8x8_t vand_s8(int8x8_t __a, int8x8_t __b) {
+ return __a & __b; }
+__ai int16x4_t vand_s16(int16x4_t __a, int16x4_t __b) {
+ return __a & __b; }
+__ai int32x2_t vand_s32(int32x2_t __a, int32x2_t __b) {
+ return __a & __b; }
+__ai int64x1_t vand_s64(int64x1_t __a, int64x1_t __b) {
+ return __a & __b; }
+__ai uint8x8_t vand_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a & __b; }
+__ai uint16x4_t vand_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a & __b; }
+__ai uint32x2_t vand_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a & __b; }
+__ai uint64x1_t vand_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a & __b; }
+__ai int8x16_t vandq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a & __b; }
+__ai int16x8_t vandq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a & __b; }
+__ai int32x4_t vandq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a & __b; }
+__ai int64x2_t vandq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a & __b; }
+__ai uint8x16_t vandq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a & __b; }
+__ai uint16x8_t vandq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a & __b; }
+__ai uint32x4_t vandq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a & __b; }
+__ai uint64x2_t vandq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a & __b; }
+
+__ai int8x8_t vbic_s8(int8x8_t __a, int8x8_t __b) {
+ return __a & ~__b; }
+__ai int16x4_t vbic_s16(int16x4_t __a, int16x4_t __b) {
+ return __a & ~__b; }
+__ai int32x2_t vbic_s32(int32x2_t __a, int32x2_t __b) {
+ return __a & ~__b; }
+__ai int64x1_t vbic_s64(int64x1_t __a, int64x1_t __b) {
+ return __a & ~__b; }
+__ai uint8x8_t vbic_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a & ~__b; }
+__ai uint16x4_t vbic_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a & ~__b; }
+__ai uint32x2_t vbic_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a & ~__b; }
+__ai uint64x1_t vbic_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a & ~__b; }
+__ai int8x16_t vbicq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a & ~__b; }
+__ai int16x8_t vbicq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a & ~__b; }
+__ai int32x4_t vbicq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a & ~__b; }
+__ai int64x2_t vbicq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a & ~__b; }
+__ai uint8x16_t vbicq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a & ~__b; }
+__ai uint16x8_t vbicq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a & ~__b; }
+__ai uint32x4_t vbicq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a & ~__b; }
+__ai uint64x2_t vbicq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a & ~__b; }
+
+__ai int8x8_t vbsl_s8(uint8x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, __b, __c, 0); }
+__ai int16x4_t vbsl_s16(uint16x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return (int16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 1); }
+__ai int32x2_t vbsl_s32(uint32x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return (int32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 2); }
+__ai int64x1_t vbsl_s64(uint64x1_t __a, int64x1_t __b, int64x1_t __c) {
+ return (int64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 3); }
+__ai uint8x8_t vbsl_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 16); }
+__ai uint16x4_t vbsl_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return (uint16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 17); }
+__ai uint32x2_t vbsl_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return (uint32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 18); }
+__ai uint64x1_t vbsl_u64(uint64x1_t __a, uint64x1_t __b, uint64x1_t __c) {
+ return (uint64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 19); }
+__ai float32x2_t vbsl_f32(uint32x2_t __a, float32x2_t __b, float32x2_t __c) {
+ return (float32x2_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
+__ai poly8x8_t vbsl_p8(uint8x8_t __a, poly8x8_t __b, poly8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 4); }
+__ai poly16x4_t vbsl_p16(uint16x4_t __a, poly16x4_t __b, poly16x4_t __c) {
+ return (poly16x4_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 5); }
+__ai int8x16_t vbslq_s8(uint8x16_t __a, int8x16_t __b, int8x16_t __c) {
+ return (int8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, __b, __c, 32); }
+__ai int16x8_t vbslq_s16(uint16x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return (int16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 33); }
+__ai int32x4_t vbslq_s32(uint32x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return (int32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 34); }
+__ai int64x2_t vbslq_s64(uint64x2_t __a, int64x2_t __b, int64x2_t __c) {
+ return (int64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 35); }
+__ai uint8x16_t vbslq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return (uint8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 48); }
+__ai uint16x8_t vbslq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return (uint16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 49); }
+__ai uint32x4_t vbslq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
+__ai uint64x2_t vbslq_u64(uint64x2_t __a, uint64x2_t __b, uint64x2_t __c) {
+ return (uint64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 51); }
+__ai float32x4_t vbslq_f32(uint32x4_t __a, float32x4_t __b, float32x4_t __c) {
+ return (float32x4_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
+__ai poly8x16_t vbslq_p8(uint8x16_t __a, poly8x16_t __b, poly8x16_t __c) {
+ return (poly8x16_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 36); }
+__ai poly16x8_t vbslq_p16(uint16x8_t __a, poly16x8_t __b, poly16x8_t __c) {
+ return (poly16x8_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 37); }
+
+__ai uint32x2_t vcage_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vcage_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint32x4_t vcageq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vcageq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint32x2_t vcagt_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vcagt_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint32x4_t vcagtq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vcagtq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint32x2_t vcale_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vcale_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint32x4_t vcaleq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vcaleq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint32x2_t vcalt_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vcalt_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint32x4_t vcaltq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vcaltq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint8x8_t vceq_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)(__a == __b); }
+__ai uint16x4_t vceq_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)(__a == __b); }
+__ai uint32x2_t vceq_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)(__a == __b); }
+__ai uint32x2_t vceq_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)(__a == __b); }
+__ai uint8x8_t vceq_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)(__a == __b); }
+__ai uint16x4_t vceq_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)(__a == __b); }
+__ai uint32x2_t vceq_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)(__a == __b); }
+__ai uint8x8_t vceq_p8(poly8x8_t __a, poly8x8_t __b) {
+ return (uint8x8_t)(__a == __b); }
+__ai uint8x16_t vceqq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)(__a == __b); }
+__ai uint16x8_t vceqq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)(__a == __b); }
+__ai uint32x4_t vceqq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)(__a == __b); }
+__ai uint32x4_t vceqq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)(__a == __b); }
+__ai uint8x16_t vceqq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)(__a == __b); }
+__ai uint16x8_t vceqq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)(__a == __b); }
+__ai uint32x4_t vceqq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)(__a == __b); }
+__ai uint8x16_t vceqq_p8(poly8x16_t __a, poly8x16_t __b) {
+ return (uint8x16_t)(__a == __b); }
+
+__ai uint8x8_t vcge_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)(__a >= __b); }
+__ai uint16x4_t vcge_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)(__a >= __b); }
+__ai uint32x2_t vcge_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)(__a >= __b); }
+__ai uint32x2_t vcge_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)(__a >= __b); }
+__ai uint8x8_t vcge_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)(__a >= __b); }
+__ai uint16x4_t vcge_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)(__a >= __b); }
+__ai uint32x2_t vcge_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)(__a >= __b); }
+__ai uint8x16_t vcgeq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)(__a >= __b); }
+__ai uint16x8_t vcgeq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)(__a >= __b); }
+__ai uint32x4_t vcgeq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)(__a >= __b); }
+__ai uint32x4_t vcgeq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)(__a >= __b); }
+__ai uint8x16_t vcgeq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)(__a >= __b); }
+__ai uint16x8_t vcgeq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)(__a >= __b); }
+__ai uint32x4_t vcgeq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)(__a >= __b); }
+
+__ai uint8x8_t vcgt_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)(__a > __b); }
+__ai uint16x4_t vcgt_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)(__a > __b); }
+__ai uint32x2_t vcgt_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)(__a > __b); }
+__ai uint32x2_t vcgt_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)(__a > __b); }
+__ai uint8x8_t vcgt_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)(__a > __b); }
+__ai uint16x4_t vcgt_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)(__a > __b); }
+__ai uint32x2_t vcgt_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)(__a > __b); }
+__ai uint8x16_t vcgtq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)(__a > __b); }
+__ai uint16x8_t vcgtq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)(__a > __b); }
+__ai uint32x4_t vcgtq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)(__a > __b); }
+__ai uint32x4_t vcgtq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)(__a > __b); }
+__ai uint8x16_t vcgtq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)(__a > __b); }
+__ai uint16x8_t vcgtq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)(__a > __b); }
+__ai uint32x4_t vcgtq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)(__a > __b); }
+
+__ai uint8x8_t vcle_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)(__a <= __b); }
+__ai uint16x4_t vcle_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)(__a <= __b); }
+__ai uint32x2_t vcle_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)(__a <= __b); }
+__ai uint32x2_t vcle_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)(__a <= __b); }
+__ai uint8x8_t vcle_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)(__a <= __b); }
+__ai uint16x4_t vcle_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)(__a <= __b); }
+__ai uint32x2_t vcle_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)(__a <= __b); }
+__ai uint8x16_t vcleq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)(__a <= __b); }
+__ai uint16x8_t vcleq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)(__a <= __b); }
+__ai uint32x4_t vcleq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)(__a <= __b); }
+__ai uint32x4_t vcleq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)(__a <= __b); }
+__ai uint8x16_t vcleq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)(__a <= __b); }
+__ai uint16x8_t vcleq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)(__a <= __b); }
+__ai uint32x4_t vcleq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)(__a <= __b); }
+
+__ai int8x8_t vcls_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vcls_v(__a, 0); }
+__ai int16x4_t vcls_s16(int16x4_t __a) {
+ return (int16x4_t)__builtin_neon_vcls_v((int8x8_t)__a, 1); }
+__ai int32x2_t vcls_s32(int32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcls_v((int8x8_t)__a, 2); }
+__ai int8x16_t vclsq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vclsq_v(__a, 32); }
+__ai int16x8_t vclsq_s16(int16x8_t __a) {
+ return (int16x8_t)__builtin_neon_vclsq_v((int8x16_t)__a, 33); }
+__ai int32x4_t vclsq_s32(int32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vclsq_v((int8x16_t)__a, 34); }
+
+__ai uint8x8_t vclt_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)(__a < __b); }
+__ai uint16x4_t vclt_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)(__a < __b); }
+__ai uint32x2_t vclt_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)(__a < __b); }
+__ai uint32x2_t vclt_f32(float32x2_t __a, float32x2_t __b) {
+ return (uint32x2_t)(__a < __b); }
+__ai uint8x8_t vclt_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)(__a < __b); }
+__ai uint16x4_t vclt_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)(__a < __b); }
+__ai uint32x2_t vclt_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)(__a < __b); }
+__ai uint8x16_t vcltq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)(__a < __b); }
+__ai uint16x8_t vcltq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)(__a < __b); }
+__ai uint32x4_t vcltq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)(__a < __b); }
+__ai uint32x4_t vcltq_f32(float32x4_t __a, float32x4_t __b) {
+ return (uint32x4_t)(__a < __b); }
+__ai uint8x16_t vcltq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)(__a < __b); }
+__ai uint16x8_t vcltq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)(__a < __b); }
+__ai uint32x4_t vcltq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)(__a < __b); }
+
+__ai int8x8_t vclz_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vclz_v(__a, 0); }
+__ai int16x4_t vclz_s16(int16x4_t __a) {
+ return (int16x4_t)__builtin_neon_vclz_v((int8x8_t)__a, 1); }
+__ai int32x2_t vclz_s32(int32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vclz_v((int8x8_t)__a, 2); }
+__ai uint8x8_t vclz_u8(uint8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vclz_v((int8x8_t)__a, 16); }
+__ai uint16x4_t vclz_u16(uint16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vclz_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vclz_u32(uint32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vclz_v((int8x8_t)__a, 18); }
+__ai int8x16_t vclzq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vclzq_v(__a, 32); }
+__ai int16x8_t vclzq_s16(int16x8_t __a) {
+ return (int16x8_t)__builtin_neon_vclzq_v((int8x16_t)__a, 33); }
+__ai int32x4_t vclzq_s32(int32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vclzq_v((int8x16_t)__a, 34); }
+__ai uint8x16_t vclzq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vclzq_v((int8x16_t)__a, 48); }
+__ai uint16x8_t vclzq_u16(uint16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vclzq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vclzq_u32(uint32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vclzq_v((int8x16_t)__a, 50); }
+
+__ai uint8x8_t vcnt_u8(uint8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vcnt_v((int8x8_t)__a, 16); }
+__ai int8x8_t vcnt_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vcnt_v(__a, 0); }
+__ai poly8x8_t vcnt_p8(poly8x8_t __a) {
+ return (poly8x8_t)__builtin_neon_vcnt_v((int8x8_t)__a, 4); }
+__ai uint8x16_t vcntq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vcntq_v((int8x16_t)__a, 48); }
+__ai int8x16_t vcntq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vcntq_v(__a, 32); }
+__ai poly8x16_t vcntq_p8(poly8x16_t __a) {
+ return (poly8x16_t)__builtin_neon_vcntq_v((int8x16_t)__a, 36); }
+
+__ai int8x16_t vcombine_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai int16x8_t vcombine_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai int32x4_t vcombine_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai int64x2_t vcombine_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai float16x8_t vcombine_f16(float16x4_t __a, float16x4_t __b) {
+ return (float16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai float32x4_t vcombine_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai uint8x16_t vcombine_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai uint16x8_t vcombine_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai uint32x4_t vcombine_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x4_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai uint64x2_t vcombine_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai poly8x16_t vcombine_p8(poly8x8_t __a, poly8x8_t __b) {
+ return (poly8x16_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai poly16x8_t vcombine_p16(poly16x4_t __a, poly16x4_t __b) {
+ return (poly16x8_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+
+__ai int8x8_t vcreate_s8(uint64_t __a) {
+ return (int8x8_t)__a; }
+__ai int16x4_t vcreate_s16(uint64_t __a) {
+ return (int16x4_t)__a; }
+__ai int32x2_t vcreate_s32(uint64_t __a) {
+ return (int32x2_t)__a; }
+__ai float16x4_t vcreate_f16(uint64_t __a) {
+ return (float16x4_t)__a; }
+__ai float32x2_t vcreate_f32(uint64_t __a) {
+ return (float32x2_t)__a; }
+__ai uint8x8_t vcreate_u8(uint64_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint16x4_t vcreate_u16(uint64_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint32x2_t vcreate_u32(uint64_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint64x1_t vcreate_u64(uint64_t __a) {
+ return (uint64x1_t)__a; }
+__ai poly8x8_t vcreate_p8(uint64_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly16x4_t vcreate_p16(uint64_t __a) {
+ return (poly16x4_t)__a; }
+__ai int64x1_t vcreate_s64(uint64_t __a) {
+ return (int64x1_t)__a; }
+
+__ai float16x4_t vcvt_f16_f32(float32x4_t __a) {
+ return (float16x4_t)__builtin_neon_vcvt_f16_v((int8x16_t)__a, 7); }
+
+__ai float32x2_t vcvt_f32_s32(int32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vcvt_f32_v((int8x8_t)__a, 2); }
+__ai float32x2_t vcvt_f32_u32(uint32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vcvt_f32_v((int8x8_t)__a, 18); }
+__ai float32x4_t vcvtq_f32_s32(int32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vcvtq_f32_v((int8x16_t)__a, 34); }
+__ai float32x4_t vcvtq_f32_u32(uint32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vcvtq_f32_v((int8x16_t)__a, 50); }
+
+__ai float32x4_t vcvt_f32_f16(float16x4_t __a) {
+ return (float32x4_t)__builtin_neon_vcvt_f32_f16((int8x8_t)__a, 7); }
+
+#define vcvt_n_f32_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (float32x2_t)__builtin_neon_vcvt_n_f32_v((int8x8_t)__a, __b, 2); })
+#define vcvt_n_f32_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (float32x2_t)__builtin_neon_vcvt_n_f32_v((int8x8_t)__a, __b, 18); })
+#define vcvtq_n_f32_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (float32x4_t)__builtin_neon_vcvtq_n_f32_v((int8x16_t)__a, __b, 34); })
+#define vcvtq_n_f32_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (float32x4_t)__builtin_neon_vcvtq_n_f32_v((int8x16_t)__a, __b, 50); })
+
+#define vcvt_n_s32_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vcvt_n_s32_v((int8x8_t)__a, __b, 2); })
+#define vcvtq_n_s32_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vcvtq_n_s32_v((int8x16_t)__a, __b, 34); })
+
+#define vcvt_n_u32_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vcvt_n_u32_v((int8x8_t)__a, __b, 18); })
+#define vcvtq_n_u32_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vcvtq_n_u32_v((int8x16_t)__a, __b, 50); })
+
+__ai int32x2_t vcvt_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcvt_s32_v((int8x8_t)__a, 2); }
+__ai int32x4_t vcvtq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vcvtq_s32_v((int8x16_t)__a, 34); }
+
+__ai uint32x2_t vcvt_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcvt_u32_v((int8x8_t)__a, 18); }
+__ai uint32x4_t vcvtq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcvtq_u32_v((int8x16_t)__a, 50); }
+
+#define vdup_lane_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_lane_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_lane_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_lane_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_lane_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_lane_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_lane_p8(a, __b) __extension__ ({ \
+ poly8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_lane_p16(a, __b) __extension__ ({ \
+ poly16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_lane_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdupq_lane_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdupq_lane_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdupq_lane_p8(a, __b) __extension__ ({ \
+ poly8x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_p16(a, __b) __extension__ ({ \
+ poly16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_lane_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdup_lane_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdupq_lane_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdupq_lane_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+
+__ai uint8x8_t vdup_n_u8(uint8_t __a) {
+ return (uint8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint16x4_t vdup_n_u16(uint16_t __a) {
+ return (uint16x4_t){ __a, __a, __a, __a }; }
+__ai uint32x2_t vdup_n_u32(uint32_t __a) {
+ return (uint32x2_t){ __a, __a }; }
+__ai int8x8_t vdup_n_s8(int8_t __a) {
+ return (int8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int16x4_t vdup_n_s16(int16_t __a) {
+ return (int16x4_t){ __a, __a, __a, __a }; }
+__ai int32x2_t vdup_n_s32(int32_t __a) {
+ return (int32x2_t){ __a, __a }; }
+__ai poly8x8_t vdup_n_p8(poly8_t __a) {
+ return (poly8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai poly16x4_t vdup_n_p16(poly16_t __a) {
+ return (poly16x4_t){ __a, __a, __a, __a }; }
+__ai float32x2_t vdup_n_f32(float32_t __a) {
+ return (float32x2_t){ __a, __a }; }
+__ai uint8x16_t vdupq_n_u8(uint8_t __a) {
+ return (uint8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint16x8_t vdupq_n_u16(uint16_t __a) {
+ return (uint16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint32x4_t vdupq_n_u32(uint32_t __a) {
+ return (uint32x4_t){ __a, __a, __a, __a }; }
+__ai int8x16_t vdupq_n_s8(int8_t __a) {
+ return (int8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int16x8_t vdupq_n_s16(int16_t __a) {
+ return (int16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int32x4_t vdupq_n_s32(int32_t __a) {
+ return (int32x4_t){ __a, __a, __a, __a }; }
+__ai poly8x16_t vdupq_n_p8(poly8_t __a) {
+ return (poly8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai poly16x8_t vdupq_n_p16(poly16_t __a) {
+ return (poly16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai float32x4_t vdupq_n_f32(float32_t __a) {
+ return (float32x4_t){ __a, __a, __a, __a }; }
+__ai int64x1_t vdup_n_s64(int64_t __a) {
+ return (int64x1_t){ __a }; }
+__ai uint64x1_t vdup_n_u64(uint64_t __a) {
+ return (uint64x1_t){ __a }; }
+__ai int64x2_t vdupq_n_s64(int64_t __a) {
+ return (int64x2_t){ __a, __a }; }
+__ai uint64x2_t vdupq_n_u64(uint64_t __a) {
+ return (uint64x2_t){ __a, __a }; }
+
+__ai int8x8_t veor_s8(int8x8_t __a, int8x8_t __b) {
+ return __a ^ __b; }
+__ai int16x4_t veor_s16(int16x4_t __a, int16x4_t __b) {
+ return __a ^ __b; }
+__ai int32x2_t veor_s32(int32x2_t __a, int32x2_t __b) {
+ return __a ^ __b; }
+__ai int64x1_t veor_s64(int64x1_t __a, int64x1_t __b) {
+ return __a ^ __b; }
+__ai uint8x8_t veor_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a ^ __b; }
+__ai uint16x4_t veor_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a ^ __b; }
+__ai uint32x2_t veor_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a ^ __b; }
+__ai uint64x1_t veor_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a ^ __b; }
+__ai int8x16_t veorq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a ^ __b; }
+__ai int16x8_t veorq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a ^ __b; }
+__ai int32x4_t veorq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a ^ __b; }
+__ai int64x2_t veorq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a ^ __b; }
+__ai uint8x16_t veorq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a ^ __b; }
+__ai uint16x8_t veorq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a ^ __b; }
+__ai uint32x4_t veorq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a ^ __b; }
+__ai uint64x2_t veorq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a ^ __b; }
+
+#define vext_s8(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vext_v(__a, __b, __c, 0); })
+#define vext_u8(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
+#define vext_p8(a, b, __c) __extension__ ({ \
+ poly8x8_t __a = (a); poly8x8_t __b = (b); \
+ (poly8x8_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
+#define vext_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
+#define vext_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
+#define vext_p16(a, b, __c) __extension__ ({ \
+ poly16x4_t __a = (a); poly16x4_t __b = (b); \
+ (poly16x4_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
+#define vext_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
+#define vext_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
+#define vext_s64(a, b, __c) __extension__ ({ \
+ int64x1_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
+#define vext_u64(a, b, __c) __extension__ ({ \
+ uint64x1_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
+#define vext_f32(a, b, __c) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); \
+ (float32x2_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 8); })
+#define vextq_s8(a, b, __c) __extension__ ({ \
+ int8x16_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vextq_v(__a, __b, __c, 32); })
+#define vextq_u8(a, b, __c) __extension__ ({ \
+ uint8x16_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
+#define vextq_p8(a, b, __c) __extension__ ({ \
+ poly8x16_t __a = (a); poly8x16_t __b = (b); \
+ (poly8x16_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
+#define vextq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
+#define vextq_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
+#define vextq_p16(a, b, __c) __extension__ ({ \
+ poly16x8_t __a = (a); poly16x8_t __b = (b); \
+ (poly16x8_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
+#define vextq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
+#define vextq_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
+#define vextq_s64(a, b, __c) __extension__ ({ \
+ int64x2_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
+#define vextq_u64(a, b, __c) __extension__ ({ \
+ uint64x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
+#define vextq_f32(a, b, __c) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); \
+ (float32x4_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 40); })
+
+__ai float32x2_t vfma_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
+ return (float32x2_t)__builtin_neon_vfma_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
+__ai float32x4_t vfmaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
+ return (float32x4_t)__builtin_neon_vfmaq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
+
+__ai int8x8_t vget_high_s8(int8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai int16x4_t vget_high_s16(int16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
+__ai int32x2_t vget_high_s32(int32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 2, 3); }
+__ai int64x1_t vget_high_s64(int64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1); }
+__ai float16x4_t vget_high_f16(float16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
+__ai float32x2_t vget_high_f32(float32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 2, 3); }
+__ai uint8x8_t vget_high_u8(uint8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai uint16x4_t vget_high_u16(uint16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
+__ai uint32x2_t vget_high_u32(uint32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 2, 3); }
+__ai uint64x1_t vget_high_u64(uint64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1); }
+__ai poly8x8_t vget_high_p8(poly8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai poly16x4_t vget_high_p16(poly16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 4, 5, 6, 7); }
+
+#define vget_lane_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8_t)__builtin_neon_vget_lane_i8((int8x8_t)__a, __b); })
+#define vget_lane_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16_t)__builtin_neon_vget_lane_i16((int16x4_t)__a, __b); })
+#define vget_lane_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32_t)__builtin_neon_vget_lane_i32((int32x2_t)__a, __b); })
+#define vget_lane_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8_t)__builtin_neon_vget_lane_i8(__a, __b); })
+#define vget_lane_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16_t)__builtin_neon_vget_lane_i16(__a, __b); })
+#define vget_lane_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32_t)__builtin_neon_vget_lane_i32(__a, __b); })
+#define vget_lane_p8(a, __b) __extension__ ({ \
+ poly8x8_t __a = (a); \
+ (poly8_t)__builtin_neon_vget_lane_i8((int8x8_t)__a, __b); })
+#define vget_lane_p16(a, __b) __extension__ ({ \
+ poly16x4_t __a = (a); \
+ (poly16_t)__builtin_neon_vget_lane_i16((int16x4_t)__a, __b); })
+#define vget_lane_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ (float32_t)__builtin_neon_vget_lane_f32(__a, __b); })
+#define vgetq_lane_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8_t)__builtin_neon_vgetq_lane_i8((int8x16_t)__a, __b); })
+#define vgetq_lane_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16_t)__builtin_neon_vgetq_lane_i16((int16x8_t)__a, __b); })
+#define vgetq_lane_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32_t)__builtin_neon_vgetq_lane_i32((int32x4_t)__a, __b); })
+#define vgetq_lane_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8_t)__builtin_neon_vgetq_lane_i8(__a, __b); })
+#define vgetq_lane_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16_t)__builtin_neon_vgetq_lane_i16(__a, __b); })
+#define vgetq_lane_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32_t)__builtin_neon_vgetq_lane_i32(__a, __b); })
+#define vgetq_lane_p8(a, __b) __extension__ ({ \
+ poly8x16_t __a = (a); \
+ (poly8_t)__builtin_neon_vgetq_lane_i8((int8x16_t)__a, __b); })
+#define vgetq_lane_p16(a, __b) __extension__ ({ \
+ poly16x8_t __a = (a); \
+ (poly16_t)__builtin_neon_vgetq_lane_i16((int16x8_t)__a, __b); })
+#define vgetq_lane_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ (float32_t)__builtin_neon_vgetq_lane_f32(__a, __b); })
+#define vget_lane_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64_t)__builtin_neon_vget_lane_i64(__a, __b); })
+#define vget_lane_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64_t)__builtin_neon_vget_lane_i64((int64x1_t)__a, __b); })
+#define vgetq_lane_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64_t)__builtin_neon_vgetq_lane_i64(__a, __b); })
+#define vgetq_lane_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64_t)__builtin_neon_vgetq_lane_i64((int64x2_t)__a, __b); })
+
+__ai int8x8_t vget_low_s8(int8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai int16x4_t vget_low_s16(int16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
+__ai int32x2_t vget_low_s32(int32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1); }
+__ai int64x1_t vget_low_s64(int64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 0); }
+__ai float16x4_t vget_low_f16(float16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
+__ai float32x2_t vget_low_f32(float32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1); }
+__ai uint8x8_t vget_low_u8(uint8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai uint16x4_t vget_low_u16(uint16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
+__ai uint32x2_t vget_low_u32(uint32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1); }
+__ai uint64x1_t vget_low_u64(uint64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 0); }
+__ai poly8x8_t vget_low_p8(poly8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai poly16x4_t vget_low_p16(poly16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); }
+
+__ai int8x8_t vhadd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vhadd_v(__a, __b, 0); }
+__ai int16x4_t vhadd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vhadd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vhadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vhadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vhadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vhadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai int8x16_t vhaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vhaddq_v(__a, __b, 32); }
+__ai int16x8_t vhaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vhaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vhaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vhaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vhaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vhaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai int8x8_t vhsub_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vhsub_v(__a, __b, 0); }
+__ai int16x4_t vhsub_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vhsub_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vhsub_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vhsub_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vhsub_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vhsub_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai int8x16_t vhsubq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vhsubq_v(__a, __b, 32); }
+__ai int16x8_t vhsubq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vhsubq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vhsubq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vhsubq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vhsubq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vhsubq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+#define vld1q_u8(__a) __extension__ ({ \
+ (uint8x16_t)__builtin_neon_vld1q_v(__a, 48); })
+#define vld1q_u16(__a) __extension__ ({ \
+ (uint16x8_t)__builtin_neon_vld1q_v(__a, 49); })
+#define vld1q_u32(__a) __extension__ ({ \
+ (uint32x4_t)__builtin_neon_vld1q_v(__a, 50); })
+#define vld1q_u64(__a) __extension__ ({ \
+ (uint64x2_t)__builtin_neon_vld1q_v(__a, 51); })
+#define vld1q_s8(__a) __extension__ ({ \
+ (int8x16_t)__builtin_neon_vld1q_v(__a, 32); })
+#define vld1q_s16(__a) __extension__ ({ \
+ (int16x8_t)__builtin_neon_vld1q_v(__a, 33); })
+#define vld1q_s32(__a) __extension__ ({ \
+ (int32x4_t)__builtin_neon_vld1q_v(__a, 34); })
+#define vld1q_s64(__a) __extension__ ({ \
+ (int64x2_t)__builtin_neon_vld1q_v(__a, 35); })
+#define vld1q_f16(__a) __extension__ ({ \
+ (float16x8_t)__builtin_neon_vld1q_v(__a, 39); })
+#define vld1q_f32(__a) __extension__ ({ \
+ (float32x4_t)__builtin_neon_vld1q_v(__a, 40); })
+#define vld1q_p8(__a) __extension__ ({ \
+ (poly8x16_t)__builtin_neon_vld1q_v(__a, 36); })
+#define vld1q_p16(__a) __extension__ ({ \
+ (poly16x8_t)__builtin_neon_vld1q_v(__a, 37); })
+#define vld1_u8(__a) __extension__ ({ \
+ (uint8x8_t)__builtin_neon_vld1_v(__a, 16); })
+#define vld1_u16(__a) __extension__ ({ \
+ (uint16x4_t)__builtin_neon_vld1_v(__a, 17); })
+#define vld1_u32(__a) __extension__ ({ \
+ (uint32x2_t)__builtin_neon_vld1_v(__a, 18); })
+#define vld1_u64(__a) __extension__ ({ \
+ (uint64x1_t)__builtin_neon_vld1_v(__a, 19); })
+#define vld1_s8(__a) __extension__ ({ \
+ (int8x8_t)__builtin_neon_vld1_v(__a, 0); })
+#define vld1_s16(__a) __extension__ ({ \
+ (int16x4_t)__builtin_neon_vld1_v(__a, 1); })
+#define vld1_s32(__a) __extension__ ({ \
+ (int32x2_t)__builtin_neon_vld1_v(__a, 2); })
+#define vld1_s64(__a) __extension__ ({ \
+ (int64x1_t)__builtin_neon_vld1_v(__a, 3); })
+#define vld1_f16(__a) __extension__ ({ \
+ (float16x4_t)__builtin_neon_vld1_v(__a, 7); })
+#define vld1_f32(__a) __extension__ ({ \
+ (float32x2_t)__builtin_neon_vld1_v(__a, 8); })
+#define vld1_p8(__a) __extension__ ({ \
+ (poly8x8_t)__builtin_neon_vld1_v(__a, 4); })
+#define vld1_p16(__a) __extension__ ({ \
+ (poly16x4_t)__builtin_neon_vld1_v(__a, 5); })
+
+#define vld1q_dup_u8(__a) __extension__ ({ \
+ (uint8x16_t)__builtin_neon_vld1q_dup_v(__a, 48); })
+#define vld1q_dup_u16(__a) __extension__ ({ \
+ (uint16x8_t)__builtin_neon_vld1q_dup_v(__a, 49); })
+#define vld1q_dup_u32(__a) __extension__ ({ \
+ (uint32x4_t)__builtin_neon_vld1q_dup_v(__a, 50); })
+#define vld1q_dup_u64(__a) __extension__ ({ \
+ (uint64x2_t)__builtin_neon_vld1q_dup_v(__a, 51); })
+#define vld1q_dup_s8(__a) __extension__ ({ \
+ (int8x16_t)__builtin_neon_vld1q_dup_v(__a, 32); })
+#define vld1q_dup_s16(__a) __extension__ ({ \
+ (int16x8_t)__builtin_neon_vld1q_dup_v(__a, 33); })
+#define vld1q_dup_s32(__a) __extension__ ({ \
+ (int32x4_t)__builtin_neon_vld1q_dup_v(__a, 34); })
+#define vld1q_dup_s64(__a) __extension__ ({ \
+ (int64x2_t)__builtin_neon_vld1q_dup_v(__a, 35); })
+#define vld1q_dup_f16(__a) __extension__ ({ \
+ (float16x8_t)__builtin_neon_vld1q_dup_v(__a, 39); })
+#define vld1q_dup_f32(__a) __extension__ ({ \
+ (float32x4_t)__builtin_neon_vld1q_dup_v(__a, 40); })
+#define vld1q_dup_p8(__a) __extension__ ({ \
+ (poly8x16_t)__builtin_neon_vld1q_dup_v(__a, 36); })
+#define vld1q_dup_p16(__a) __extension__ ({ \
+ (poly16x8_t)__builtin_neon_vld1q_dup_v(__a, 37); })
+#define vld1_dup_u8(__a) __extension__ ({ \
+ (uint8x8_t)__builtin_neon_vld1_dup_v(__a, 16); })
+#define vld1_dup_u16(__a) __extension__ ({ \
+ (uint16x4_t)__builtin_neon_vld1_dup_v(__a, 17); })
+#define vld1_dup_u32(__a) __extension__ ({ \
+ (uint32x2_t)__builtin_neon_vld1_dup_v(__a, 18); })
+#define vld1_dup_u64(__a) __extension__ ({ \
+ (uint64x1_t)__builtin_neon_vld1_dup_v(__a, 19); })
+#define vld1_dup_s8(__a) __extension__ ({ \
+ (int8x8_t)__builtin_neon_vld1_dup_v(__a, 0); })
+#define vld1_dup_s16(__a) __extension__ ({ \
+ (int16x4_t)__builtin_neon_vld1_dup_v(__a, 1); })
+#define vld1_dup_s32(__a) __extension__ ({ \
+ (int32x2_t)__builtin_neon_vld1_dup_v(__a, 2); })
+#define vld1_dup_s64(__a) __extension__ ({ \
+ (int64x1_t)__builtin_neon_vld1_dup_v(__a, 3); })
+#define vld1_dup_f16(__a) __extension__ ({ \
+ (float16x4_t)__builtin_neon_vld1_dup_v(__a, 7); })
+#define vld1_dup_f32(__a) __extension__ ({ \
+ (float32x2_t)__builtin_neon_vld1_dup_v(__a, 8); })
+#define vld1_dup_p8(__a) __extension__ ({ \
+ (poly8x8_t)__builtin_neon_vld1_dup_v(__a, 4); })
+#define vld1_dup_p16(__a) __extension__ ({ \
+ (poly16x4_t)__builtin_neon_vld1_dup_v(__a, 5); })
+
+#define vld1q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 48); })
+#define vld1q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 49); })
+#define vld1q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 50); })
+#define vld1q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 51); })
+#define vld1q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vld1q_lane_v(__a, __b, __c, 32); })
+#define vld1q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 33); })
+#define vld1q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 34); })
+#define vld1q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 35); })
+#define vld1q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8_t __b = (b); \
+ (float16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 39); })
+#define vld1q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4_t __b = (b); \
+ (float32x4_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 40); })
+#define vld1q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16_t __b = (b); \
+ (poly8x16_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 36); })
+#define vld1q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8_t __b = (b); \
+ (poly16x8_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 37); })
+#define vld1_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 16); })
+#define vld1_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 17); })
+#define vld1_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 18); })
+#define vld1_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 19); })
+#define vld1_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vld1_lane_v(__a, __b, __c, 0); })
+#define vld1_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 1); })
+#define vld1_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 2); })
+#define vld1_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 3); })
+#define vld1_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4_t __b = (b); \
+ (float16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 7); })
+#define vld1_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2_t __b = (b); \
+ (float32x2_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 8); })
+#define vld1_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8_t __b = (b); \
+ (poly8x8_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 4); })
+#define vld1_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4_t __b = (b); \
+ (poly16x4_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 5); })
+
+#define vld2q_u8(__a) __extension__ ({ \
+ uint8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 48); r; })
+#define vld2q_u16(__a) __extension__ ({ \
+ uint16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 49); r; })
+#define vld2q_u32(__a) __extension__ ({ \
+ uint32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 50); r; })
+#define vld2q_s8(__a) __extension__ ({ \
+ int8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 32); r; })
+#define vld2q_s16(__a) __extension__ ({ \
+ int16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 33); r; })
+#define vld2q_s32(__a) __extension__ ({ \
+ int32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 34); r; })
+#define vld2q_f16(__a) __extension__ ({ \
+ float16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 39); r; })
+#define vld2q_f32(__a) __extension__ ({ \
+ float32x4x2_t r; __builtin_neon_vld2q_v(&r, __a, 40); r; })
+#define vld2q_p8(__a) __extension__ ({ \
+ poly8x16x2_t r; __builtin_neon_vld2q_v(&r, __a, 36); r; })
+#define vld2q_p16(__a) __extension__ ({ \
+ poly16x8x2_t r; __builtin_neon_vld2q_v(&r, __a, 37); r; })
+#define vld2_u8(__a) __extension__ ({ \
+ uint8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 16); r; })
+#define vld2_u16(__a) __extension__ ({ \
+ uint16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 17); r; })
+#define vld2_u32(__a) __extension__ ({ \
+ uint32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 18); r; })
+#define vld2_u64(__a) __extension__ ({ \
+ uint64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 19); r; })
+#define vld2_s8(__a) __extension__ ({ \
+ int8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 0); r; })
+#define vld2_s16(__a) __extension__ ({ \
+ int16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 1); r; })
+#define vld2_s32(__a) __extension__ ({ \
+ int32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 2); r; })
+#define vld2_s64(__a) __extension__ ({ \
+ int64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 3); r; })
+#define vld2_f16(__a) __extension__ ({ \
+ float16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 7); r; })
+#define vld2_f32(__a) __extension__ ({ \
+ float32x2x2_t r; __builtin_neon_vld2_v(&r, __a, 8); r; })
+#define vld2_p8(__a) __extension__ ({ \
+ poly8x8x2_t r; __builtin_neon_vld2_v(&r, __a, 4); r; })
+#define vld2_p16(__a) __extension__ ({ \
+ poly16x4x2_t r; __builtin_neon_vld2_v(&r, __a, 5); r; })
+
+#define vld2_dup_u8(__a) __extension__ ({ \
+ uint8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 16); r; })
+#define vld2_dup_u16(__a) __extension__ ({ \
+ uint16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 17); r; })
+#define vld2_dup_u32(__a) __extension__ ({ \
+ uint32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 18); r; })
+#define vld2_dup_u64(__a) __extension__ ({ \
+ uint64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 19); r; })
+#define vld2_dup_s8(__a) __extension__ ({ \
+ int8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 0); r; })
+#define vld2_dup_s16(__a) __extension__ ({ \
+ int16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 1); r; })
+#define vld2_dup_s32(__a) __extension__ ({ \
+ int32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 2); r; })
+#define vld2_dup_s64(__a) __extension__ ({ \
+ int64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 3); r; })
+#define vld2_dup_f16(__a) __extension__ ({ \
+ float16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 7); r; })
+#define vld2_dup_f32(__a) __extension__ ({ \
+ float32x2x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 8); r; })
+#define vld2_dup_p8(__a) __extension__ ({ \
+ poly8x8x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 4); r; })
+#define vld2_dup_p16(__a) __extension__ ({ \
+ poly16x4x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 5); r; })
+
+#define vld2q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x2_t __b = (b); \
+ uint16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 49); r; })
+#define vld2q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x2_t __b = (b); \
+ uint32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 50); r; })
+#define vld2q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x2_t __b = (b); \
+ int16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 33); r; })
+#define vld2q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x2_t __b = (b); \
+ int32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 34); r; })
+#define vld2q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x2_t __b = (b); \
+ float16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 39); r; })
+#define vld2q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x2_t __b = (b); \
+ float32x4x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 40); r; })
+#define vld2q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x2_t __b = (b); \
+ poly16x8x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 37); r; })
+#define vld2_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x2_t __b = (b); \
+ uint8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 16); r; })
+#define vld2_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x2_t __b = (b); \
+ uint16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 17); r; })
+#define vld2_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x2_t __b = (b); \
+ uint32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 18); r; })
+#define vld2_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x2_t __b = (b); \
+ int8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, __b.val[0], __b.val[1], __c, 0); r; })
+#define vld2_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x2_t __b = (b); \
+ int16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 1); r; })
+#define vld2_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x2_t __b = (b); \
+ int32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 2); r; })
+#define vld2_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x2_t __b = (b); \
+ float16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 7); r; })
+#define vld2_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x2_t __b = (b); \
+ float32x2x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 8); r; })
+#define vld2_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x2_t __b = (b); \
+ poly8x8x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 4); r; })
+#define vld2_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x2_t __b = (b); \
+ poly16x4x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 5); r; })
+
+#define vld3q_u8(__a) __extension__ ({ \
+ uint8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 48); r; })
+#define vld3q_u16(__a) __extension__ ({ \
+ uint16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 49); r; })
+#define vld3q_u32(__a) __extension__ ({ \
+ uint32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 50); r; })
+#define vld3q_s8(__a) __extension__ ({ \
+ int8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 32); r; })
+#define vld3q_s16(__a) __extension__ ({ \
+ int16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 33); r; })
+#define vld3q_s32(__a) __extension__ ({ \
+ int32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 34); r; })
+#define vld3q_f16(__a) __extension__ ({ \
+ float16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 39); r; })
+#define vld3q_f32(__a) __extension__ ({ \
+ float32x4x3_t r; __builtin_neon_vld3q_v(&r, __a, 40); r; })
+#define vld3q_p8(__a) __extension__ ({ \
+ poly8x16x3_t r; __builtin_neon_vld3q_v(&r, __a, 36); r; })
+#define vld3q_p16(__a) __extension__ ({ \
+ poly16x8x3_t r; __builtin_neon_vld3q_v(&r, __a, 37); r; })
+#define vld3_u8(__a) __extension__ ({ \
+ uint8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 16); r; })
+#define vld3_u16(__a) __extension__ ({ \
+ uint16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 17); r; })
+#define vld3_u32(__a) __extension__ ({ \
+ uint32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 18); r; })
+#define vld3_u64(__a) __extension__ ({ \
+ uint64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 19); r; })
+#define vld3_s8(__a) __extension__ ({ \
+ int8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 0); r; })
+#define vld3_s16(__a) __extension__ ({ \
+ int16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 1); r; })
+#define vld3_s32(__a) __extension__ ({ \
+ int32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 2); r; })
+#define vld3_s64(__a) __extension__ ({ \
+ int64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 3); r; })
+#define vld3_f16(__a) __extension__ ({ \
+ float16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 7); r; })
+#define vld3_f32(__a) __extension__ ({ \
+ float32x2x3_t r; __builtin_neon_vld3_v(&r, __a, 8); r; })
+#define vld3_p8(__a) __extension__ ({ \
+ poly8x8x3_t r; __builtin_neon_vld3_v(&r, __a, 4); r; })
+#define vld3_p16(__a) __extension__ ({ \
+ poly16x4x3_t r; __builtin_neon_vld3_v(&r, __a, 5); r; })
+
+#define vld3_dup_u8(__a) __extension__ ({ \
+ uint8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 16); r; })
+#define vld3_dup_u16(__a) __extension__ ({ \
+ uint16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 17); r; })
+#define vld3_dup_u32(__a) __extension__ ({ \
+ uint32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 18); r; })
+#define vld3_dup_u64(__a) __extension__ ({ \
+ uint64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 19); r; })
+#define vld3_dup_s8(__a) __extension__ ({ \
+ int8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 0); r; })
+#define vld3_dup_s16(__a) __extension__ ({ \
+ int16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 1); r; })
+#define vld3_dup_s32(__a) __extension__ ({ \
+ int32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 2); r; })
+#define vld3_dup_s64(__a) __extension__ ({ \
+ int64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 3); r; })
+#define vld3_dup_f16(__a) __extension__ ({ \
+ float16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 7); r; })
+#define vld3_dup_f32(__a) __extension__ ({ \
+ float32x2x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 8); r; })
+#define vld3_dup_p8(__a) __extension__ ({ \
+ poly8x8x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 4); r; })
+#define vld3_dup_p16(__a) __extension__ ({ \
+ poly16x4x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 5); r; })
+
+#define vld3q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x3_t __b = (b); \
+ uint16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 49); r; })
+#define vld3q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x3_t __b = (b); \
+ uint32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 50); r; })
+#define vld3q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x3_t __b = (b); \
+ int16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 33); r; })
+#define vld3q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x3_t __b = (b); \
+ int32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 34); r; })
+#define vld3q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x3_t __b = (b); \
+ float16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 39); r; })
+#define vld3q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x3_t __b = (b); \
+ float32x4x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 40); r; })
+#define vld3q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x3_t __b = (b); \
+ poly16x8x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 37); r; })
+#define vld3_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x3_t __b = (b); \
+ uint8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 16); r; })
+#define vld3_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x3_t __b = (b); \
+ uint16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 17); r; })
+#define vld3_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x3_t __b = (b); \
+ uint32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 18); r; })
+#define vld3_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x3_t __b = (b); \
+ int8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __c, 0); r; })
+#define vld3_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x3_t __b = (b); \
+ int16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 1); r; })
+#define vld3_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x3_t __b = (b); \
+ int32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 2); r; })
+#define vld3_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x3_t __b = (b); \
+ float16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 7); r; })
+#define vld3_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x3_t __b = (b); \
+ float32x2x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 8); r; })
+#define vld3_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x3_t __b = (b); \
+ poly8x8x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 4); r; })
+#define vld3_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x3_t __b = (b); \
+ poly16x4x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 5); r; })
+
+#define vld4q_u8(__a) __extension__ ({ \
+ uint8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 48); r; })
+#define vld4q_u16(__a) __extension__ ({ \
+ uint16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 49); r; })
+#define vld4q_u32(__a) __extension__ ({ \
+ uint32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 50); r; })
+#define vld4q_s8(__a) __extension__ ({ \
+ int8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 32); r; })
+#define vld4q_s16(__a) __extension__ ({ \
+ int16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 33); r; })
+#define vld4q_s32(__a) __extension__ ({ \
+ int32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 34); r; })
+#define vld4q_f16(__a) __extension__ ({ \
+ float16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 39); r; })
+#define vld4q_f32(__a) __extension__ ({ \
+ float32x4x4_t r; __builtin_neon_vld4q_v(&r, __a, 40); r; })
+#define vld4q_p8(__a) __extension__ ({ \
+ poly8x16x4_t r; __builtin_neon_vld4q_v(&r, __a, 36); r; })
+#define vld4q_p16(__a) __extension__ ({ \
+ poly16x8x4_t r; __builtin_neon_vld4q_v(&r, __a, 37); r; })
+#define vld4_u8(__a) __extension__ ({ \
+ uint8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 16); r; })
+#define vld4_u16(__a) __extension__ ({ \
+ uint16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 17); r; })
+#define vld4_u32(__a) __extension__ ({ \
+ uint32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 18); r; })
+#define vld4_u64(__a) __extension__ ({ \
+ uint64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 19); r; })
+#define vld4_s8(__a) __extension__ ({ \
+ int8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 0); r; })
+#define vld4_s16(__a) __extension__ ({ \
+ int16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 1); r; })
+#define vld4_s32(__a) __extension__ ({ \
+ int32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 2); r; })
+#define vld4_s64(__a) __extension__ ({ \
+ int64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 3); r; })
+#define vld4_f16(__a) __extension__ ({ \
+ float16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 7); r; })
+#define vld4_f32(__a) __extension__ ({ \
+ float32x2x4_t r; __builtin_neon_vld4_v(&r, __a, 8); r; })
+#define vld4_p8(__a) __extension__ ({ \
+ poly8x8x4_t r; __builtin_neon_vld4_v(&r, __a, 4); r; })
+#define vld4_p16(__a) __extension__ ({ \
+ poly16x4x4_t r; __builtin_neon_vld4_v(&r, __a, 5); r; })
+
+#define vld4_dup_u8(__a) __extension__ ({ \
+ uint8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 16); r; })
+#define vld4_dup_u16(__a) __extension__ ({ \
+ uint16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 17); r; })
+#define vld4_dup_u32(__a) __extension__ ({ \
+ uint32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 18); r; })
+#define vld4_dup_u64(__a) __extension__ ({ \
+ uint64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 19); r; })
+#define vld4_dup_s8(__a) __extension__ ({ \
+ int8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 0); r; })
+#define vld4_dup_s16(__a) __extension__ ({ \
+ int16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 1); r; })
+#define vld4_dup_s32(__a) __extension__ ({ \
+ int32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 2); r; })
+#define vld4_dup_s64(__a) __extension__ ({ \
+ int64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 3); r; })
+#define vld4_dup_f16(__a) __extension__ ({ \
+ float16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 7); r; })
+#define vld4_dup_f32(__a) __extension__ ({ \
+ float32x2x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 8); r; })
+#define vld4_dup_p8(__a) __extension__ ({ \
+ poly8x8x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 4); r; })
+#define vld4_dup_p16(__a) __extension__ ({ \
+ poly16x4x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 5); r; })
+
+#define vld4q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x4_t __b = (b); \
+ uint16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 49); r; })
+#define vld4q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x4_t __b = (b); \
+ uint32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 50); r; })
+#define vld4q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x4_t __b = (b); \
+ int16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 33); r; })
+#define vld4q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x4_t __b = (b); \
+ int32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 34); r; })
+#define vld4q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x4_t __b = (b); \
+ float16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 39); r; })
+#define vld4q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x4_t __b = (b); \
+ float32x4x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 40); r; })
+#define vld4q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x4_t __b = (b); \
+ poly16x8x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 37); r; })
+#define vld4_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x4_t __b = (b); \
+ uint8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 16); r; })
+#define vld4_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x4_t __b = (b); \
+ uint16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 17); r; })
+#define vld4_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x4_t __b = (b); \
+ uint32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 18); r; })
+#define vld4_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x4_t __b = (b); \
+ int8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); r; })
+#define vld4_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x4_t __b = (b); \
+ int16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 1); r; })
+#define vld4_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x4_t __b = (b); \
+ int32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 2); r; })
+#define vld4_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x4_t __b = (b); \
+ float16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 7); r; })
+#define vld4_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x4_t __b = (b); \
+ float32x2x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 8); r; })
+#define vld4_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x4_t __b = (b); \
+ poly8x8x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 4); r; })
+#define vld4_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x4_t __b = (b); \
+ poly16x4x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 5); r; })
+
+__ai int8x8_t vmax_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vmax_v(__a, __b, 0); }
+__ai int16x4_t vmax_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vmax_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vmax_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vmax_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vmax_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vmax_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai int8x16_t vmaxq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vmaxq_v(__a, __b, 32); }
+__ai int16x8_t vmaxq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vmaxq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vmaxq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vmaxq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vmaxq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai float32x4_t vmaxq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+
+__ai int8x8_t vmin_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vmin_v(__a, __b, 0); }
+__ai int16x4_t vmin_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vmin_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vmin_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vmin_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vmin_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vmin_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai int8x16_t vminq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vminq_v(__a, __b, 32); }
+__ai int16x8_t vminq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vminq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vminq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vminq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vminq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai float32x4_t vminq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+
+__ai int8x8_t vmla_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a + (__b * __c); }
+__ai int16x4_t vmla_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a + (__b * __c); }
+__ai int32x2_t vmla_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a + (__b * __c); }
+__ai float32x2_t vmla_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
+ return __a + (__b * __c); }
+__ai uint8x8_t vmla_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a + (__b * __c); }
+__ai uint16x4_t vmla_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a + (__b * __c); }
+__ai uint32x2_t vmla_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a + (__b * __c); }
+__ai int8x16_t vmlaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
+ return __a + (__b * __c); }
+__ai int16x8_t vmlaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return __a + (__b * __c); }
+__ai int32x4_t vmlaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return __a + (__b * __c); }
+__ai float32x4_t vmlaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
+ return __a + (__b * __c); }
+__ai uint8x16_t vmlaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return __a + (__b * __c); }
+__ai uint16x8_t vmlaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return __a + (__b * __c); }
+__ai uint32x4_t vmlaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return __a + (__b * __c); }
+
+__ai int16x8_t vmlal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a + vmull_s8(__b, __c); }
+__ai int32x4_t vmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a + vmull_s16(__b, __c); }
+__ai int64x2_t vmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a + vmull_s32(__b, __c); }
+__ai uint16x8_t vmlal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a + vmull_u8(__b, __c); }
+__ai uint32x4_t vmlal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a + vmull_u16(__b, __c); }
+__ai uint64x2_t vmlal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a + vmull_u32(__b, __c); }
+
+#define vmlal_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ __a + vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ __a + vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlal_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
+ __a + vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
+ __a + vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
+ return __a + vmull_s16(__b, (int16x4_t){ __c, __c, __c, __c }); }
+__ai int64x2_t vmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
+ return __a + vmull_s32(__b, (int32x2_t){ __c, __c }); }
+__ai uint32x4_t vmlal_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c) {
+ return __a + vmull_u16(__b, (uint16x4_t){ __c, __c, __c, __c }); }
+__ai uint64x2_t vmlal_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c) {
+ return __a + vmull_u32(__b, (uint32x2_t){ __c, __c }); }
+
+#define vmla_lane_s16(a, b, c, __d) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmla_lane_s32(a, b, c, __d) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmla_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmla_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmla_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlaq_lane_s16(a, b, c, __d) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlaq_lane_s32(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlaq_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlaq_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlaq_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+
+__ai int16x4_t vmla_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c) {
+ return __a + (__b * (int16x4_t){ __c, __c, __c, __c }); }
+__ai int32x2_t vmla_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c) {
+ return __a + (__b * (int32x2_t){ __c, __c }); }
+__ai uint16x4_t vmla_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c) {
+ return __a + (__b * (uint16x4_t){ __c, __c, __c, __c }); }
+__ai uint32x2_t vmla_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c) {
+ return __a + (__b * (uint32x2_t){ __c, __c }); }
+__ai float32x2_t vmla_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
+ return __a + (__b * (float32x2_t){ __c, __c }); }
+__ai int16x8_t vmlaq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c) {
+ return __a + (__b * (int16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
+__ai int32x4_t vmlaq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c) {
+ return __a + (__b * (int32x4_t){ __c, __c, __c, __c }); }
+__ai uint16x8_t vmlaq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c) {
+ return __a + (__b * (uint16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
+__ai uint32x4_t vmlaq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c) {
+ return __a + (__b * (uint32x4_t){ __c, __c, __c, __c }); }
+__ai float32x4_t vmlaq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
+ return __a + (__b * (float32x4_t){ __c, __c, __c, __c }); }
+
+__ai int8x8_t vmls_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a - (__b * __c); }
+__ai int16x4_t vmls_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a - (__b * __c); }
+__ai int32x2_t vmls_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a - (__b * __c); }
+__ai float32x2_t vmls_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
+ return __a - (__b * __c); }
+__ai uint8x8_t vmls_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a - (__b * __c); }
+__ai uint16x4_t vmls_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a - (__b * __c); }
+__ai uint32x2_t vmls_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a - (__b * __c); }
+__ai int8x16_t vmlsq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
+ return __a - (__b * __c); }
+__ai int16x8_t vmlsq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return __a - (__b * __c); }
+__ai int32x4_t vmlsq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return __a - (__b * __c); }
+__ai float32x4_t vmlsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
+ return __a - (__b * __c); }
+__ai uint8x16_t vmlsq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return __a - (__b * __c); }
+__ai uint16x8_t vmlsq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return __a - (__b * __c); }
+__ai uint32x4_t vmlsq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return __a - (__b * __c); }
+
+__ai int16x8_t vmlsl_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return __a - vmull_s8(__b, __c); }
+__ai int32x4_t vmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return __a - vmull_s16(__b, __c); }
+__ai int64x2_t vmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return __a - vmull_s32(__b, __c); }
+__ai uint16x8_t vmlsl_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return __a - vmull_u8(__b, __c); }
+__ai uint32x4_t vmlsl_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) {
+ return __a - vmull_u16(__b, __c); }
+__ai uint64x2_t vmlsl_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) {
+ return __a - vmull_u32(__b, __c); }
+
+#define vmlsl_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ __a - vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ __a - vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsl_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
+ __a - vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
+ __a - vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
+ return __a - vmull_s16(__b, (int16x4_t){ __c, __c, __c, __c }); }
+__ai int64x2_t vmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
+ return __a - vmull_s32(__b, (int32x2_t){ __c, __c }); }
+__ai uint32x4_t vmlsl_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c) {
+ return __a - vmull_u16(__b, (uint16x4_t){ __c, __c, __c, __c }); }
+__ai uint64x2_t vmlsl_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c) {
+ return __a - vmull_u32(__b, (uint32x2_t){ __c, __c }); }
+
+#define vmls_lane_s16(a, b, c, __d) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmls_lane_s32(a, b, c, __d) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmls_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmls_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmls_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsq_lane_s16(a, b, c, __d) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlsq_lane_s32(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsq_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlsq_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsq_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+
+__ai int16x4_t vmls_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c) {
+ return __a - (__b * (int16x4_t){ __c, __c, __c, __c }); }
+__ai int32x2_t vmls_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c) {
+ return __a - (__b * (int32x2_t){ __c, __c }); }
+__ai uint16x4_t vmls_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c) {
+ return __a - (__b * (uint16x4_t){ __c, __c, __c, __c }); }
+__ai uint32x2_t vmls_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c) {
+ return __a - (__b * (uint32x2_t){ __c, __c }); }
+__ai float32x2_t vmls_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
+ return __a - (__b * (float32x2_t){ __c, __c }); }
+__ai int16x8_t vmlsq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c) {
+ return __a - (__b * (int16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
+__ai int32x4_t vmlsq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c) {
+ return __a - (__b * (int32x4_t){ __c, __c, __c, __c }); }
+__ai uint16x8_t vmlsq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c) {
+ return __a - (__b * (uint16x8_t){ __c, __c, __c, __c, __c, __c, __c, __c }); }
+__ai uint32x4_t vmlsq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c) {
+ return __a - (__b * (uint32x4_t){ __c, __c, __c, __c }); }
+__ai float32x4_t vmlsq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
+ return __a - (__b * (float32x4_t){ __c, __c, __c, __c }); }
+
+__ai int8x8_t vmovn_s16(int16x8_t __a) {
+ return (int8x8_t)__builtin_neon_vmovn_v((int8x16_t)__a, 0); }
+__ai int16x4_t vmovn_s32(int32x4_t __a) {
+ return (int16x4_t)__builtin_neon_vmovn_v((int8x16_t)__a, 1); }
+__ai int32x2_t vmovn_s64(int64x2_t __a) {
+ return (int32x2_t)__builtin_neon_vmovn_v((int8x16_t)__a, 2); }
+__ai uint8x8_t vmovn_u16(uint16x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vmovn_v((int8x16_t)__a, 16); }
+__ai uint16x4_t vmovn_u32(uint32x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vmovn_v((int8x16_t)__a, 17); }
+__ai uint32x2_t vmovn_u64(uint64x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vmovn_v((int8x16_t)__a, 18); }
+
+__ai uint8x8_t vmov_n_u8(uint8_t __a) {
+ return (uint8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint16x4_t vmov_n_u16(uint16_t __a) {
+ return (uint16x4_t){ __a, __a, __a, __a }; }
+__ai uint32x2_t vmov_n_u32(uint32_t __a) {
+ return (uint32x2_t){ __a, __a }; }
+__ai int8x8_t vmov_n_s8(int8_t __a) {
+ return (int8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int16x4_t vmov_n_s16(int16_t __a) {
+ return (int16x4_t){ __a, __a, __a, __a }; }
+__ai int32x2_t vmov_n_s32(int32_t __a) {
+ return (int32x2_t){ __a, __a }; }
+__ai poly8x8_t vmov_n_p8(poly8_t __a) {
+ return (poly8x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai poly16x4_t vmov_n_p16(poly16_t __a) {
+ return (poly16x4_t){ __a, __a, __a, __a }; }
+__ai float32x2_t vmov_n_f32(float32_t __a) {
+ return (float32x2_t){ __a, __a }; }
+__ai uint8x16_t vmovq_n_u8(uint8_t __a) {
+ return (uint8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint16x8_t vmovq_n_u16(uint16_t __a) {
+ return (uint16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai uint32x4_t vmovq_n_u32(uint32_t __a) {
+ return (uint32x4_t){ __a, __a, __a, __a }; }
+__ai int8x16_t vmovq_n_s8(int8_t __a) {
+ return (int8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int16x8_t vmovq_n_s16(int16_t __a) {
+ return (int16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai int32x4_t vmovq_n_s32(int32_t __a) {
+ return (int32x4_t){ __a, __a, __a, __a }; }
+__ai poly8x16_t vmovq_n_p8(poly8_t __a) {
+ return (poly8x16_t){ __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai poly16x8_t vmovq_n_p16(poly16_t __a) {
+ return (poly16x8_t){ __a, __a, __a, __a, __a, __a, __a, __a }; }
+__ai float32x4_t vmovq_n_f32(float32_t __a) {
+ return (float32x4_t){ __a, __a, __a, __a }; }
+__ai int64x1_t vmov_n_s64(int64_t __a) {
+ return (int64x1_t){ __a }; }
+__ai uint64x1_t vmov_n_u64(uint64_t __a) {
+ return (uint64x1_t){ __a }; }
+__ai int64x2_t vmovq_n_s64(int64_t __a) {
+ return (int64x2_t){ __a, __a }; }
+__ai uint64x2_t vmovq_n_u64(uint64_t __a) {
+ return (uint64x2_t){ __a, __a }; }
+
+__ai int8x8_t vmul_s8(int8x8_t __a, int8x8_t __b) {
+ return __a * __b; }
+__ai int16x4_t vmul_s16(int16x4_t __a, int16x4_t __b) {
+ return __a * __b; }
+__ai int32x2_t vmul_s32(int32x2_t __a, int32x2_t __b) {
+ return __a * __b; }
+__ai float32x2_t vmul_f32(float32x2_t __a, float32x2_t __b) {
+ return __a * __b; }
+__ai uint8x8_t vmul_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a * __b; }
+__ai uint16x4_t vmul_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a * __b; }
+__ai uint32x2_t vmul_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a * __b; }
+__ai int8x16_t vmulq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a * __b; }
+__ai int16x8_t vmulq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a * __b; }
+__ai int32x4_t vmulq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a * __b; }
+__ai float32x4_t vmulq_f32(float32x4_t __a, float32x4_t __b) {
+ return __a * __b; }
+__ai uint8x16_t vmulq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a * __b; }
+__ai uint16x8_t vmulq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a * __b; }
+__ai uint32x4_t vmulq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a * __b; }
+
+#define vmull_lane_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ vmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_lane_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ vmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmull_lane_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ vmull_u16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_lane_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ vmull_u32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+__ai int32x4_t vmull_n_s16(int16x4_t __a, int16_t __b) {
+ return (int32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 34); }
+__ai int64x2_t vmull_n_s32(int32x2_t __a, int32_t __b) {
+ return (int64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 35); }
+__ai uint32x4_t vmull_n_u16(uint16x4_t __a, uint16_t __b) {
+ return (uint32x4_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(uint16x4_t){ __b, __b, __b, __b }, 50); }
+__ai uint64x2_t vmull_n_u32(uint32x2_t __a, uint32_t __b) {
+ return (uint64x2_t)__builtin_neon_vmull_v((int8x8_t)__a, (int8x8_t)(uint32x2_t){ __b, __b }, 51); }
+
+__ai poly8x8_t vmul_p8(poly8x8_t __a, poly8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vmul_v((int8x8_t)__a, (int8x8_t)__b, 4); }
+__ai poly8x16_t vmulq_p8(poly8x16_t __a, poly8x16_t __b) {
+ return (poly8x16_t)__builtin_neon_vmulq_v((int8x16_t)__a, (int8x16_t)__b, 36); }
+
+#define vmul_lane_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmul_lane_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmul_lane_f32(a, b, __c) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmul_lane_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmul_lane_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmulq_lane_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
+#define vmulq_lane_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmulq_lane_f32(a, b, __c) __extension__ ({ \
+ float32x4_t __a = (a); float32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmulq_lane_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
+#define vmulq_lane_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+
+__ai int16x4_t vmul_n_s16(int16x4_t __a, int16_t __b) {
+ return __a * (int16x4_t){ __b, __b, __b, __b }; }
+__ai int32x2_t vmul_n_s32(int32x2_t __a, int32_t __b) {
+ return __a * (int32x2_t){ __b, __b }; }
+__ai float32x2_t vmul_n_f32(float32x2_t __a, float32_t __b) {
+ return __a * (float32x2_t){ __b, __b }; }
+__ai uint16x4_t vmul_n_u16(uint16x4_t __a, uint16_t __b) {
+ return __a * (uint16x4_t){ __b, __b, __b, __b }; }
+__ai uint32x2_t vmul_n_u32(uint32x2_t __a, uint32_t __b) {
+ return __a * (uint32x2_t){ __b, __b }; }
+__ai int16x8_t vmulq_n_s16(int16x8_t __a, int16_t __b) {
+ return __a * (int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }; }
+__ai int32x4_t vmulq_n_s32(int32x4_t __a, int32_t __b) {
+ return __a * (int32x4_t){ __b, __b, __b, __b }; }
+__ai float32x4_t vmulq_n_f32(float32x4_t __a, float32_t __b) {
+ return __a * (float32x4_t){ __b, __b, __b, __b }; }
+__ai uint16x8_t vmulq_n_u16(uint16x8_t __a, uint16_t __b) {
+ return __a * (uint16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }; }
+__ai uint32x4_t vmulq_n_u32(uint32x4_t __a, uint32_t __b) {
+ return __a * (uint32x4_t){ __b, __b, __b, __b }; }
+
+__ai int8x8_t vmvn_s8(int8x8_t __a) {
+ return ~__a; }
+__ai int16x4_t vmvn_s16(int16x4_t __a) {
+ return ~__a; }
+__ai int32x2_t vmvn_s32(int32x2_t __a) {
+ return ~__a; }
+__ai uint8x8_t vmvn_u8(uint8x8_t __a) {
+ return ~__a; }
+__ai uint16x4_t vmvn_u16(uint16x4_t __a) {
+ return ~__a; }
+__ai uint32x2_t vmvn_u32(uint32x2_t __a) {
+ return ~__a; }
+__ai poly8x8_t vmvn_p8(poly8x8_t __a) {
+ return ~__a; }
+__ai int8x16_t vmvnq_s8(int8x16_t __a) {
+ return ~__a; }
+__ai int16x8_t vmvnq_s16(int16x8_t __a) {
+ return ~__a; }
+__ai int32x4_t vmvnq_s32(int32x4_t __a) {
+ return ~__a; }
+__ai uint8x16_t vmvnq_u8(uint8x16_t __a) {
+ return ~__a; }
+__ai uint16x8_t vmvnq_u16(uint16x8_t __a) {
+ return ~__a; }
+__ai uint32x4_t vmvnq_u32(uint32x4_t __a) {
+ return ~__a; }
+__ai poly8x16_t vmvnq_p8(poly8x16_t __a) {
+ return ~__a; }
+
+__ai int8x8_t vneg_s8(int8x8_t __a) {
+ return -__a; }
+__ai int16x4_t vneg_s16(int16x4_t __a) {
+ return -__a; }
+__ai int32x2_t vneg_s32(int32x2_t __a) {
+ return -__a; }
+__ai float32x2_t vneg_f32(float32x2_t __a) {
+ return -__a; }
+__ai int8x16_t vnegq_s8(int8x16_t __a) {
+ return -__a; }
+__ai int16x8_t vnegq_s16(int16x8_t __a) {
+ return -__a; }
+__ai int32x4_t vnegq_s32(int32x4_t __a) {
+ return -__a; }
+__ai float32x4_t vnegq_f32(float32x4_t __a) {
+ return -__a; }
+
+__ai int8x8_t vorn_s8(int8x8_t __a, int8x8_t __b) {
+ return __a | ~__b; }
+__ai int16x4_t vorn_s16(int16x4_t __a, int16x4_t __b) {
+ return __a | ~__b; }
+__ai int32x2_t vorn_s32(int32x2_t __a, int32x2_t __b) {
+ return __a | ~__b; }
+__ai int64x1_t vorn_s64(int64x1_t __a, int64x1_t __b) {
+ return __a | ~__b; }
+__ai uint8x8_t vorn_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a | ~__b; }
+__ai uint16x4_t vorn_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a | ~__b; }
+__ai uint32x2_t vorn_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a | ~__b; }
+__ai uint64x1_t vorn_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a | ~__b; }
+__ai int8x16_t vornq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a | ~__b; }
+__ai int16x8_t vornq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a | ~__b; }
+__ai int32x4_t vornq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a | ~__b; }
+__ai int64x2_t vornq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a | ~__b; }
+__ai uint8x16_t vornq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a | ~__b; }
+__ai uint16x8_t vornq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a | ~__b; }
+__ai uint32x4_t vornq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a | ~__b; }
+__ai uint64x2_t vornq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a | ~__b; }
+
+__ai int8x8_t vorr_s8(int8x8_t __a, int8x8_t __b) {
+ return __a | __b; }
+__ai int16x4_t vorr_s16(int16x4_t __a, int16x4_t __b) {
+ return __a | __b; }
+__ai int32x2_t vorr_s32(int32x2_t __a, int32x2_t __b) {
+ return __a | __b; }
+__ai int64x1_t vorr_s64(int64x1_t __a, int64x1_t __b) {
+ return __a | __b; }
+__ai uint8x8_t vorr_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a | __b; }
+__ai uint16x4_t vorr_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a | __b; }
+__ai uint32x2_t vorr_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a | __b; }
+__ai uint64x1_t vorr_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a | __b; }
+__ai int8x16_t vorrq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a | __b; }
+__ai int16x8_t vorrq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a | __b; }
+__ai int32x4_t vorrq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a | __b; }
+__ai int64x2_t vorrq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a | __b; }
+__ai uint8x16_t vorrq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a | __b; }
+__ai uint16x8_t vorrq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a | __b; }
+__ai uint32x4_t vorrq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a | __b; }
+__ai uint64x2_t vorrq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a | __b; }
+
+__ai int16x4_t vpadal_s8(int16x4_t __a, int8x8_t __b) {
+ return (int16x4_t)__builtin_neon_vpadal_v((int8x8_t)__a, __b, 1); }
+__ai int32x2_t vpadal_s16(int32x2_t __a, int16x4_t __b) {
+ return (int32x2_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vpadal_s32(int64x1_t __a, int32x2_t __b) {
+ return (int64x1_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint16x4_t vpadal_u8(uint16x4_t __a, uint8x8_t __b) {
+ return (uint16x4_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vpadal_u16(uint32x2_t __a, uint16x4_t __b) {
+ return (uint32x2_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vpadal_u32(uint64x1_t __a, uint32x2_t __b) {
+ return (uint64x1_t)__builtin_neon_vpadal_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int16x8_t vpadalq_s8(int16x8_t __a, int8x16_t __b) {
+ return (int16x8_t)__builtin_neon_vpadalq_v((int8x16_t)__a, __b, 33); }
+__ai int32x4_t vpadalq_s16(int32x4_t __a, int16x8_t __b) {
+ return (int32x4_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vpadalq_s32(int64x2_t __a, int32x4_t __b) {
+ return (int64x2_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint16x8_t vpadalq_u8(uint16x8_t __a, uint8x16_t __b) {
+ return (uint16x8_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vpadalq_u16(uint32x4_t __a, uint16x8_t __b) {
+ return (uint32x4_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vpadalq_u32(uint64x2_t __a, uint32x4_t __b) {
+ return (uint64x2_t)__builtin_neon_vpadalq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai int8x8_t vpadd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vpadd_v(__a, __b, 0); }
+__ai int16x4_t vpadd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vpadd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vpadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vpadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vpadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vpadd_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vpadd_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+
+__ai int16x4_t vpaddl_s8(int8x8_t __a) {
+ return (int16x4_t)__builtin_neon_vpaddl_v(__a, 1); }
+__ai int32x2_t vpaddl_s16(int16x4_t __a) {
+ return (int32x2_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 2); }
+__ai int64x1_t vpaddl_s32(int32x2_t __a) {
+ return (int64x1_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 3); }
+__ai uint16x4_t vpaddl_u8(uint8x8_t __a) {
+ return (uint16x4_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vpaddl_u16(uint16x4_t __a) {
+ return (uint32x2_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vpaddl_u32(uint32x2_t __a) {
+ return (uint64x1_t)__builtin_neon_vpaddl_v((int8x8_t)__a, 19); }
+__ai int16x8_t vpaddlq_s8(int8x16_t __a) {
+ return (int16x8_t)__builtin_neon_vpaddlq_v(__a, 33); }
+__ai int32x4_t vpaddlq_s16(int16x8_t __a) {
+ return (int32x4_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 34); }
+__ai int64x2_t vpaddlq_s32(int32x4_t __a) {
+ return (int64x2_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 35); }
+__ai uint16x8_t vpaddlq_u8(uint8x16_t __a) {
+ return (uint16x8_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vpaddlq_u16(uint16x8_t __a) {
+ return (uint32x4_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vpaddlq_u32(uint32x4_t __a) {
+ return (uint64x2_t)__builtin_neon_vpaddlq_v((int8x16_t)__a, 51); }
+
+__ai int8x8_t vpmax_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vpmax_v(__a, __b, 0); }
+__ai int16x4_t vpmax_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vpmax_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vpmax_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vpmax_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vpmax_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vpmax_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vpmax_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+
+__ai int8x8_t vpmin_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vpmin_v(__a, __b, 0); }
+__ai int16x4_t vpmin_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vpmin_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vpmin_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vpmin_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vpmin_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai float32x2_t vpmin_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vpmin_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+
+__ai int8x8_t vqabs_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vqabs_v(__a, 0); }
+__ai int16x4_t vqabs_s16(int16x4_t __a) {
+ return (int16x4_t)__builtin_neon_vqabs_v((int8x8_t)__a, 1); }
+__ai int32x2_t vqabs_s32(int32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vqabs_v((int8x8_t)__a, 2); }
+__ai int8x16_t vqabsq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vqabsq_v(__a, 32); }
+__ai int16x8_t vqabsq_s16(int16x8_t __a) {
+ return (int16x8_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 33); }
+__ai int32x4_t vqabsq_s32(int32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 34); }
+
+__ai int8x8_t vqadd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqadd_v(__a, __b, 0); }
+__ai int16x4_t vqadd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqadd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vqadd_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vqadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vqadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vqadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vqadd_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vqadd_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vqaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqaddq_v(__a, __b, 32); }
+__ai int16x8_t vqaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vqaddq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vqaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vqaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vqaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vqaddq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vqaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai int32x4_t vqdmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return (int32x4_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 34); }
+__ai int64x2_t vqdmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return (int64x2_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 35); }
+
+#define vqdmlal_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ vqdmlal_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlal_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ vqdmlal_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vqdmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
+ return (int32x4_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int16x4_t){ __c, __c, __c, __c }, 34); }
+__ai int64x2_t vqdmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
+ return (int64x2_t)__builtin_neon_vqdmlal_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int32x2_t){ __c, __c }, 35); }
+
+__ai int32x4_t vqdmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c) {
+ return (int32x4_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 34); }
+__ai int64x2_t vqdmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c) {
+ return (int64x2_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)__c, 35); }
+
+#define vqdmlsl_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x4_t __c = (c); \
+ vqdmlsl_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlsl_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x2_t __c = (c); \
+ vqdmlsl_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vqdmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c) {
+ return (int32x4_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int16x4_t){ __c, __c, __c, __c }, 34); }
+__ai int64x2_t vqdmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c) {
+ return (int64x2_t)__builtin_neon_vqdmlsl_v((int8x16_t)__a, (int8x8_t)__b, (int8x8_t)(int32x2_t){ __c, __c }, 35); }
+
+__ai int16x4_t vqdmulh_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqdmulh_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int16x8_t vqdmulhq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqdmulhq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+
+#define vqdmulh_lane_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ vqdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmulh_lane_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ vqdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vqdmulhq_lane_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x4_t __b = (b); \
+ vqdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
+#define vqdmulhq_lane_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x2_t __b = (b); \
+ vqdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+
+__ai int16x4_t vqdmulh_n_s16(int16x4_t __a, int16_t __b) {
+ return (int16x4_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 1); }
+__ai int32x2_t vqdmulh_n_s32(int32x2_t __a, int32_t __b) {
+ return (int32x2_t)__builtin_neon_vqdmulh_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 2); }
+__ai int16x8_t vqdmulhq_n_s16(int16x8_t __a, int16_t __b) {
+ return (int16x8_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)(int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }, 33); }
+__ai int32x4_t vqdmulhq_n_s32(int32x4_t __a, int32_t __b) {
+ return (int32x4_t)__builtin_neon_vqdmulhq_v((int8x16_t)__a, (int8x16_t)(int32x4_t){ __b, __b, __b, __b }, 34); }
+
+__ai int32x4_t vqdmull_s16(int16x4_t __a, int16x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)__b, 34); }
+__ai int64x2_t vqdmull_s32(int32x2_t __a, int32x2_t __b) {
+ return (int64x2_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)__b, 35); }
+
+#define vqdmull_lane_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ vqdmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmull_lane_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ vqdmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+__ai int32x4_t vqdmull_n_s16(int16x4_t __a, int16_t __b) {
+ return (int32x4_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 34); }
+__ai int64x2_t vqdmull_n_s32(int32x2_t __a, int32_t __b) {
+ return (int64x2_t)__builtin_neon_vqdmull_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 35); }
+
+__ai int8x8_t vqmovn_s16(int16x8_t __a) {
+ return (int8x8_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 0); }
+__ai int16x4_t vqmovn_s32(int32x4_t __a) {
+ return (int16x4_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 1); }
+__ai int32x2_t vqmovn_s64(int64x2_t __a) {
+ return (int32x2_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 2); }
+__ai uint8x8_t vqmovn_u16(uint16x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 16); }
+__ai uint16x4_t vqmovn_u32(uint32x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 17); }
+__ai uint32x2_t vqmovn_u64(uint64x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vqmovn_v((int8x16_t)__a, 18); }
+
+__ai uint8x8_t vqmovun_s16(int16x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 16); }
+__ai uint16x4_t vqmovun_s32(int32x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 17); }
+__ai uint32x2_t vqmovun_s64(int64x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vqmovun_v((int8x16_t)__a, 18); }
+
+__ai int8x8_t vqneg_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vqneg_v(__a, 0); }
+__ai int16x4_t vqneg_s16(int16x4_t __a) {
+ return (int16x4_t)__builtin_neon_vqneg_v((int8x8_t)__a, 1); }
+__ai int32x2_t vqneg_s32(int32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vqneg_v((int8x8_t)__a, 2); }
+__ai int8x16_t vqnegq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vqnegq_v(__a, 32); }
+__ai int16x8_t vqnegq_s16(int16x8_t __a) {
+ return (int16x8_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 33); }
+__ai int32x4_t vqnegq_s32(int32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 34); }
+
+__ai int16x4_t vqrdmulh_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqrdmulh_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int16x8_t vqrdmulhq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqrdmulhq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+
+#define vqrdmulh_lane_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ vqrdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqrdmulh_lane_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ vqrdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vqrdmulhq_lane_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x4_t __b = (b); \
+ vqrdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
+#define vqrdmulhq_lane_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x2_t __b = (b); \
+ vqrdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+
+__ai int16x4_t vqrdmulh_n_s16(int16x4_t __a, int16_t __b) {
+ return (int16x4_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)(int16x4_t){ __b, __b, __b, __b }, 1); }
+__ai int32x2_t vqrdmulh_n_s32(int32x2_t __a, int32_t __b) {
+ return (int32x2_t)__builtin_neon_vqrdmulh_v((int8x8_t)__a, (int8x8_t)(int32x2_t){ __b, __b }, 2); }
+__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __a, int16_t __b) {
+ return (int16x8_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)(int16x8_t){ __b, __b, __b, __b, __b, __b, __b, __b }, 33); }
+__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __a, int32_t __b) {
+ return (int32x4_t)__builtin_neon_vqrdmulhq_v((int8x16_t)__a, (int8x16_t)(int32x4_t){ __b, __b, __b, __b }, 34); }
+
+__ai int8x8_t vqrshl_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqrshl_v(__a, __b, 0); }
+__ai int16x4_t vqrshl_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqrshl_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vqrshl_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vqrshl_u8(uint8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqrshl_v((int8x8_t)__a, __b, 16); }
+__ai uint16x4_t vqrshl_u16(uint16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vqrshl_u32(uint32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vqrshl_u64(uint64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vqrshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vqrshlq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqrshlq_v(__a, __b, 32); }
+__ai int16x8_t vqrshlq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqrshlq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vqrshlq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vqrshlq_u8(uint8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, __b, 48); }
+__ai uint16x8_t vqrshlq_u16(uint16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vqrshlq_u32(uint32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vqrshlq_u64(uint64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vqrshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+#define vqrshrn_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 0); })
+#define vqrshrn_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 1); })
+#define vqrshrn_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 2); })
+#define vqrshrn_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 16); })
+#define vqrshrn_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 17); })
+#define vqrshrn_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqrshrn_n_v((int8x16_t)__a, __b, 18); })
+
+#define vqrshrun_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 16); })
+#define vqrshrun_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 17); })
+#define vqrshrun_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqrshrun_n_v((int8x16_t)__a, __b, 18); })
+
+__ai int8x8_t vqshl_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqshl_v(__a, __b, 0); }
+__ai int16x4_t vqshl_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqshl_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vqshl_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vqshl_u8(uint8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqshl_v((int8x8_t)__a, __b, 16); }
+__ai uint16x4_t vqshl_u16(uint16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vqshl_u32(uint32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vqshl_u64(uint64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vqshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vqshlq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqshlq_v(__a, __b, 32); }
+__ai int16x8_t vqshlq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqshlq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vqshlq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vqshlq_u8(uint8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqshlq_v((int8x16_t)__a, __b, 48); }
+__ai uint16x8_t vqshlq_u16(uint16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vqshlq_u32(uint32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vqshlq_u64(uint64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vqshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+#define vqshlu_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqshlu_n_v(__a, __b, 16); })
+#define vqshlu_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 17); })
+#define vqshlu_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 18); })
+#define vqshlu_n_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vqshlu_n_v((int8x8_t)__a, __b, 19); })
+#define vqshluq_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (uint8x16_t)__builtin_neon_vqshluq_n_v(__a, __b, 48); })
+#define vqshluq_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 49); })
+#define vqshluq_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 50); })
+#define vqshluq_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vqshluq_n_v((int8x16_t)__a, __b, 51); })
+
+#define vqshl_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vqshl_n_v(__a, __b, 0); })
+#define vqshl_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 1); })
+#define vqshl_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 2); })
+#define vqshl_n_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64x1_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 3); })
+#define vqshl_n_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 16); })
+#define vqshl_n_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 17); })
+#define vqshl_n_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 18); })
+#define vqshl_n_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vqshl_n_v((int8x8_t)__a, __b, 19); })
+#define vqshlq_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8x16_t)__builtin_neon_vqshlq_n_v(__a, __b, 32); })
+#define vqshlq_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16x8_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 33); })
+#define vqshlq_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 34); })
+#define vqshlq_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 35); })
+#define vqshlq_n_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8x16_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 48); })
+#define vqshlq_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 49); })
+#define vqshlq_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 50); })
+#define vqshlq_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vqshlq_n_v((int8x16_t)__a, __b, 51); })
+
+#define vqshrn_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 0); })
+#define vqshrn_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 1); })
+#define vqshrn_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 2); })
+#define vqshrn_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 16); })
+#define vqshrn_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 17); })
+#define vqshrn_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqshrn_n_v((int8x16_t)__a, __b, 18); })
+
+#define vqshrun_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 16); })
+#define vqshrun_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 17); })
+#define vqshrun_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vqshrun_n_v((int8x16_t)__a, __b, 18); })
+
+__ai int8x8_t vqsub_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqsub_v(__a, __b, 0); }
+__ai int16x4_t vqsub_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vqsub_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vqsub_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vqsub_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vqsub_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vqsub_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vqsub_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vqsub_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vqsubq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqsubq_v(__a, __b, 32); }
+__ai int16x8_t vqsubq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vqsubq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vqsubq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vqsubq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vqsubq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vqsubq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vqsubq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vqsubq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai int8x8_t vraddhn_s16(int16x8_t __a, int16x8_t __b) {
+ return (int8x8_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
+__ai int16x4_t vraddhn_s32(int32x4_t __a, int32x4_t __b) {
+ return (int16x4_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
+__ai int32x2_t vraddhn_s64(int64x2_t __a, int64x2_t __b) {
+ return (int32x2_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
+__ai uint8x8_t vraddhn_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
+__ai uint16x4_t vraddhn_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
+__ai uint32x2_t vraddhn_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vraddhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
+
+__ai float32x2_t vrecpe_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 8); }
+__ai uint32x2_t vrecpe_u32(uint32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 18); }
+__ai float32x4_t vrecpeq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 40); }
+__ai uint32x4_t vrecpeq_u32(uint32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 50); }
+
+__ai float32x2_t vrecps_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vrecps_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float32x4_t vrecpsq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vrecpsq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+
+__ai int8x8_t vreinterpret_s8_s16(int16x4_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_s32(int32x2_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_s64(int64x1_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_f16(float16x4_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_f32(float32x2_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __a) {
+ return (int8x8_t)__a; }
+__ai int16x4_t vreinterpret_s16_s8(int8x8_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_s32(int32x2_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_s64(int64x1_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_f16(float16x4_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_f32(float32x2_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __a) {
+ return (int16x4_t)__a; }
+__ai int32x2_t vreinterpret_s32_s8(int8x8_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_s16(int16x4_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_s64(int64x1_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_f16(float16x4_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __a) {
+ return (int32x2_t)__a; }
+__ai int64x1_t vreinterpret_s64_s8(int8x8_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_s16(int16x4_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_s32(int32x2_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_f16(float16x4_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_f32(float32x2_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __a) {
+ return (int64x1_t)__a; }
+__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __a) {
+ return (uint64x1_t)__a; }
+__ai float16x4_t vreinterpret_f16_s8(int8x8_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_s16(int16x4_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_s32(int32x2_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_s64(int64x1_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_f32(float32x2_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __a) {
+ return (float16x4_t)__a; }
+__ai float32x2_t vreinterpret_f32_s8(int8x8_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_s16(int16x4_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_s32(int32x2_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_s64(int64x1_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_f16(float16x4_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __a) {
+ return (float32x2_t)__a; }
+__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __a) {
+ return (poly16x4_t)__a; }
+__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __a) {
+ return (int8x16_t)__a; }
+__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __a) {
+ return (int16x8_t)__a; }
+__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __a) {
+ return (int32x4_t)__a; }
+__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __a) {
+ return (int64x2_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __a) {
+ return (uint64x2_t)__a; }
+__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __a) {
+ return (float16x8_t)__a; }
+__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __a) {
+ return (float32x4_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __a) {
+ return (poly16x8_t)__a; }
+
+__ai int8x8_t vrev16_s8(int8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+__ai uint8x8_t vrev16_u8(uint8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+__ai poly8x8_t vrev16_p8(poly8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+__ai int8x16_t vrev16q_s8(int8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
+__ai uint8x16_t vrev16q_u8(uint8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
+__ai poly8x16_t vrev16q_p8(poly8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); }
+
+__ai int8x8_t vrev32_s8(int8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai int16x4_t vrev32_s16(int16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+__ai uint8x8_t vrev32_u8(uint8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai uint16x4_t vrev32_u16(uint16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+__ai poly8x8_t vrev32_p8(poly8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai poly16x4_t vrev32_p16(poly16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+__ai int8x16_t vrev32q_s8(int8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
+__ai int16x8_t vrev32q_s16(int16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+__ai uint8x16_t vrev32q_u8(uint8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
+__ai uint16x8_t vrev32q_u16(uint16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+__ai poly8x16_t vrev32q_p8(poly8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); }
+__ai poly16x8_t vrev32q_p16(poly16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2, 5, 4, 7, 6); }
+
+__ai int8x8_t vrev64_s8(int8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
+__ai int16x4_t vrev64_s16(int16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
+__ai int32x2_t vrev64_s32(int32x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0); }
+__ai uint8x8_t vrev64_u8(uint8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
+__ai uint16x4_t vrev64_u16(uint16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
+__ai uint32x2_t vrev64_u32(uint32x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0); }
+__ai poly8x8_t vrev64_p8(poly8x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); }
+__ai poly16x4_t vrev64_p16(poly16x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); }
+__ai float32x2_t vrev64_f32(float32x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0); }
+__ai int8x16_t vrev64q_s8(int8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
+__ai int16x8_t vrev64q_s16(int16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai int32x4_t vrev64q_s32(int32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+__ai uint8x16_t vrev64q_u8(uint8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
+__ai uint16x8_t vrev64q_u16(uint16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai uint32x4_t vrev64q_u32(uint32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+__ai poly8x16_t vrev64q_p8(poly8x16_t __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); }
+__ai poly16x8_t vrev64q_p16(poly16x8_t __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0, 7, 6, 5, 4); }
+__ai float32x4_t vrev64q_f32(float32x4_t __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0, 3, 2); }
+
+__ai int8x8_t vrhadd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vrhadd_v(__a, __b, 0); }
+__ai int16x4_t vrhadd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vrhadd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai uint8x8_t vrhadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vrhadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vrhadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vrhadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai int8x16_t vrhaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vrhaddq_v(__a, __b, 32); }
+__ai int16x8_t vrhaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vrhaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vrhaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vrhaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vrhaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vrhaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai int8x8_t vrshl_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vrshl_v(__a, __b, 0); }
+__ai int16x4_t vrshl_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vrshl_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vrshl_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vrshl_u8(uint8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vrshl_v((int8x8_t)__a, __b, 16); }
+__ai uint16x4_t vrshl_u16(uint16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vrshl_u32(uint32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vrshl_u64(uint64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vrshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vrshlq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vrshlq_v(__a, __b, 32); }
+__ai int16x8_t vrshlq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vrshlq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vrshlq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vrshlq_u8(uint8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vrshlq_v((int8x16_t)__a, __b, 48); }
+__ai uint16x8_t vrshlq_u16(uint16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vrshlq_u32(uint32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vrshlq_u64(uint64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vrshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+#define vrshrn_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 0); })
+#define vrshrn_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 1); })
+#define vrshrn_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 2); })
+#define vrshrn_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 16); })
+#define vrshrn_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 17); })
+#define vrshrn_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vrshrn_n_v((int8x16_t)__a, __b, 18); })
+
+#define vrshr_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vrshr_n_v(__a, __b, 0); })
+#define vrshr_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 1); })
+#define vrshr_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 2); })
+#define vrshr_n_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64x1_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 3); })
+#define vrshr_n_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 16); })
+#define vrshr_n_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 17); })
+#define vrshr_n_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 18); })
+#define vrshr_n_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vrshr_n_v((int8x8_t)__a, __b, 19); })
+#define vrshrq_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8x16_t)__builtin_neon_vrshrq_n_v(__a, __b, 32); })
+#define vrshrq_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16x8_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 33); })
+#define vrshrq_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 34); })
+#define vrshrq_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 35); })
+#define vrshrq_n_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8x16_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 48); })
+#define vrshrq_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 49); })
+#define vrshrq_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 50); })
+#define vrshrq_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vrshrq_n_v((int8x16_t)__a, __b, 51); })
+
+__ai float32x2_t vrsqrte_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 8); }
+__ai uint32x2_t vrsqrte_u32(uint32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 18); }
+__ai float32x4_t vrsqrteq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 40); }
+__ai uint32x4_t vrsqrteq_u32(uint32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 50); }
+
+__ai float32x2_t vrsqrts_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vrsqrts_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float32x4_t vrsqrtsq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vrsqrtsq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+
+#define vrsra_n_s8(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vrsra_n_v(__a, __b, __c, 0); })
+#define vrsra_n_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
+#define vrsra_n_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
+#define vrsra_n_s64(a, b, __c) __extension__ ({ \
+ int64x1_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
+#define vrsra_n_u8(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
+#define vrsra_n_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
+#define vrsra_n_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
+#define vrsra_n_u64(a, b, __c) __extension__ ({ \
+ uint64x1_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vrsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
+#define vrsraq_n_s8(a, b, __c) __extension__ ({ \
+ int8x16_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vrsraq_n_v(__a, __b, __c, 32); })
+#define vrsraq_n_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
+#define vrsraq_n_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
+#define vrsraq_n_s64(a, b, __c) __extension__ ({ \
+ int64x2_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
+#define vrsraq_n_u8(a, b, __c) __extension__ ({ \
+ uint8x16_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
+#define vrsraq_n_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
+#define vrsraq_n_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
+#define vrsraq_n_u64(a, b, __c) __extension__ ({ \
+ uint64x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vrsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
+
+__ai int8x8_t vrsubhn_s16(int16x8_t __a, int16x8_t __b) {
+ return (int8x8_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
+__ai int16x4_t vrsubhn_s32(int32x4_t __a, int32x4_t __b) {
+ return (int16x4_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
+__ai int32x2_t vrsubhn_s64(int64x2_t __a, int64x2_t __b) {
+ return (int32x2_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
+__ai uint8x8_t vrsubhn_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
+__ai uint16x4_t vrsubhn_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
+__ai uint32x2_t vrsubhn_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vrsubhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
+
+#define vset_lane_u8(a, b, __c) __extension__ ({ \
+ uint8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vset_lane_i8(__a, (int8x8_t)__b, __c); })
+#define vset_lane_u16(a, b, __c) __extension__ ({ \
+ uint16_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vset_lane_i16(__a, (int16x4_t)__b, __c); })
+#define vset_lane_u32(a, b, __c) __extension__ ({ \
+ uint32_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vset_lane_i32(__a, (int32x2_t)__b, __c); })
+#define vset_lane_s8(a, b, __c) __extension__ ({ \
+ int8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vset_lane_i8(__a, __b, __c); })
+#define vset_lane_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vset_lane_i16(__a, __b, __c); })
+#define vset_lane_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vset_lane_i32(__a, __b, __c); })
+#define vset_lane_p8(a, b, __c) __extension__ ({ \
+ poly8_t __a = (a); poly8x8_t __b = (b); \
+ (poly8x8_t)__builtin_neon_vset_lane_i8(__a, (int8x8_t)__b, __c); })
+#define vset_lane_p16(a, b, __c) __extension__ ({ \
+ poly16_t __a = (a); poly16x4_t __b = (b); \
+ (poly16x4_t)__builtin_neon_vset_lane_i16(__a, (int16x4_t)__b, __c); })
+#define vset_lane_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x2_t __b = (b); \
+ (float32x2_t)__builtin_neon_vset_lane_f32(__a, __b, __c); })
+#define vsetq_lane_u8(a, b, __c) __extension__ ({ \
+ uint8_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vsetq_lane_i8(__a, (int8x16_t)__b, __c); })
+#define vsetq_lane_u16(a, b, __c) __extension__ ({ \
+ uint16_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vsetq_lane_i16(__a, (int16x8_t)__b, __c); })
+#define vsetq_lane_u32(a, b, __c) __extension__ ({ \
+ uint32_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vsetq_lane_i32(__a, (int32x4_t)__b, __c); })
+#define vsetq_lane_s8(a, b, __c) __extension__ ({ \
+ int8_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vsetq_lane_i8(__a, __b, __c); })
+#define vsetq_lane_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vsetq_lane_i16(__a, __b, __c); })
+#define vsetq_lane_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vsetq_lane_i32(__a, __b, __c); })
+#define vsetq_lane_p8(a, b, __c) __extension__ ({ \
+ poly8_t __a = (a); poly8x16_t __b = (b); \
+ (poly8x16_t)__builtin_neon_vsetq_lane_i8(__a, (int8x16_t)__b, __c); })
+#define vsetq_lane_p16(a, b, __c) __extension__ ({ \
+ poly16_t __a = (a); poly16x8_t __b = (b); \
+ (poly16x8_t)__builtin_neon_vsetq_lane_i16(__a, (int16x8_t)__b, __c); })
+#define vsetq_lane_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x4_t __b = (b); \
+ (float32x4_t)__builtin_neon_vsetq_lane_f32(__a, __b, __c); })
+#define vset_lane_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vset_lane_i64(__a, __b, __c); })
+#define vset_lane_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vset_lane_i64(__a, (int64x1_t)__b, __c); })
+#define vsetq_lane_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vsetq_lane_i64(__a, __b, __c); })
+#define vsetq_lane_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vsetq_lane_i64(__a, (int64x2_t)__b, __c); })
+
+__ai int8x8_t vshl_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vshl_v(__a, __b, 0); }
+__ai int16x4_t vshl_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vshl_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vshl_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai uint8x8_t vshl_u8(uint8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vshl_v((int8x8_t)__a, __b, 16); }
+__ai uint16x4_t vshl_u16(uint16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vshl_u32(uint32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vshl_u64(uint64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vshl_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai int8x16_t vshlq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vshlq_v(__a, __b, 32); }
+__ai int16x8_t vshlq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vshlq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vshlq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vshlq_u8(uint8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vshlq_v((int8x16_t)__a, __b, 48); }
+__ai uint16x8_t vshlq_u16(uint16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vshlq_u32(uint32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vshlq_u64(uint64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vshlq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+#define vshll_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int16x8_t)__builtin_neon_vshll_n_v(__a, __b, 33); })
+#define vshll_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 34); })
+#define vshll_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 35); })
+#define vshll_n_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 49); })
+#define vshll_n_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 50); })
+#define vshll_n_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vshll_n_v((int8x8_t)__a, __b, 51); })
+
+#define vshl_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vshl_n_v(__a, __b, 0); })
+#define vshl_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 1); })
+#define vshl_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 2); })
+#define vshl_n_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64x1_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 3); })
+#define vshl_n_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 16); })
+#define vshl_n_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 17); })
+#define vshl_n_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 18); })
+#define vshl_n_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vshl_n_v((int8x8_t)__a, __b, 19); })
+#define vshlq_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8x16_t)__builtin_neon_vshlq_n_v(__a, __b, 32); })
+#define vshlq_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16x8_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 33); })
+#define vshlq_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 34); })
+#define vshlq_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 35); })
+#define vshlq_n_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8x16_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 48); })
+#define vshlq_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 49); })
+#define vshlq_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 50); })
+#define vshlq_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vshlq_n_v((int8x16_t)__a, __b, 51); })
+
+#define vshrn_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 0); })
+#define vshrn_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 1); })
+#define vshrn_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 2); })
+#define vshrn_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 16); })
+#define vshrn_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 17); })
+#define vshrn_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vshrn_n_v((int8x16_t)__a, __b, 18); })
+
+#define vshr_n_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8x8_t)__builtin_neon_vshr_n_v(__a, __b, 0); })
+#define vshr_n_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16x4_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 1); })
+#define vshr_n_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32x2_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 2); })
+#define vshr_n_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64x1_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 3); })
+#define vshr_n_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8x8_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 16); })
+#define vshr_n_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16x4_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 17); })
+#define vshr_n_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32x2_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 18); })
+#define vshr_n_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vshr_n_v((int8x8_t)__a, __b, 19); })
+#define vshrq_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8x16_t)__builtin_neon_vshrq_n_v(__a, __b, 32); })
+#define vshrq_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16x8_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 33); })
+#define vshrq_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32x4_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 34); })
+#define vshrq_n_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 35); })
+#define vshrq_n_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8x16_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 48); })
+#define vshrq_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16x8_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 49); })
+#define vshrq_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32x4_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 50); })
+#define vshrq_n_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vshrq_n_v((int8x16_t)__a, __b, 51); })
+
+#define vsli_n_s8(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vsli_n_v(__a, __b, __c, 0); })
+#define vsli_n_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
+#define vsli_n_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
+#define vsli_n_s64(a, b, __c) __extension__ ({ \
+ int64x1_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
+#define vsli_n_u8(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
+#define vsli_n_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
+#define vsli_n_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
+#define vsli_n_u64(a, b, __c) __extension__ ({ \
+ uint64x1_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
+#define vsli_n_p8(a, b, __c) __extension__ ({ \
+ poly8x8_t __a = (a); poly8x8_t __b = (b); \
+ (poly8x8_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
+#define vsli_n_p16(a, b, __c) __extension__ ({ \
+ poly16x4_t __a = (a); poly16x4_t __b = (b); \
+ (poly16x4_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
+#define vsliq_n_s8(a, b, __c) __extension__ ({ \
+ int8x16_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vsliq_n_v(__a, __b, __c, 32); })
+#define vsliq_n_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
+#define vsliq_n_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
+#define vsliq_n_s64(a, b, __c) __extension__ ({ \
+ int64x2_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
+#define vsliq_n_u8(a, b, __c) __extension__ ({ \
+ uint8x16_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
+#define vsliq_n_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
+#define vsliq_n_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
+#define vsliq_n_u64(a, b, __c) __extension__ ({ \
+ uint64x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
+#define vsliq_n_p8(a, b, __c) __extension__ ({ \
+ poly8x16_t __a = (a); poly8x16_t __b = (b); \
+ (poly8x16_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
+#define vsliq_n_p16(a, b, __c) __extension__ ({ \
+ poly16x8_t __a = (a); poly16x8_t __b = (b); \
+ (poly16x8_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
+
+#define vsra_n_s8(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vsra_n_v(__a, __b, __c, 0); })
+#define vsra_n_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
+#define vsra_n_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
+#define vsra_n_s64(a, b, __c) __extension__ ({ \
+ int64x1_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
+#define vsra_n_u8(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
+#define vsra_n_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
+#define vsra_n_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
+#define vsra_n_u64(a, b, __c) __extension__ ({ \
+ uint64x1_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vsra_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
+#define vsraq_n_s8(a, b, __c) __extension__ ({ \
+ int8x16_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vsraq_n_v(__a, __b, __c, 32); })
+#define vsraq_n_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
+#define vsraq_n_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
+#define vsraq_n_s64(a, b, __c) __extension__ ({ \
+ int64x2_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
+#define vsraq_n_u8(a, b, __c) __extension__ ({ \
+ uint8x16_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
+#define vsraq_n_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
+#define vsraq_n_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
+#define vsraq_n_u64(a, b, __c) __extension__ ({ \
+ uint64x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vsraq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
+
+#define vsri_n_s8(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int8x8_t __b = (b); \
+ (int8x8_t)__builtin_neon_vsri_n_v(__a, __b, __c, 0); })
+#define vsri_n_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); \
+ (int16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 1); })
+#define vsri_n_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); \
+ (int32x2_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 2); })
+#define vsri_n_s64(a, b, __c) __extension__ ({ \
+ int64x1_t __a = (a); int64x1_t __b = (b); \
+ (int64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 3); })
+#define vsri_n_u8(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint8x8_t __b = (b); \
+ (uint8x8_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 16); })
+#define vsri_n_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); \
+ (uint16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 17); })
+#define vsri_n_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); \
+ (uint32x2_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 18); })
+#define vsri_n_u64(a, b, __c) __extension__ ({ \
+ uint64x1_t __a = (a); uint64x1_t __b = (b); \
+ (uint64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 19); })
+#define vsri_n_p8(a, b, __c) __extension__ ({ \
+ poly8x8_t __a = (a); poly8x8_t __b = (b); \
+ (poly8x8_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 4); })
+#define vsri_n_p16(a, b, __c) __extension__ ({ \
+ poly16x4_t __a = (a); poly16x4_t __b = (b); \
+ (poly16x4_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 5); })
+#define vsriq_n_s8(a, b, __c) __extension__ ({ \
+ int8x16_t __a = (a); int8x16_t __b = (b); \
+ (int8x16_t)__builtin_neon_vsriq_n_v(__a, __b, __c, 32); })
+#define vsriq_n_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ (int16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 33); })
+#define vsriq_n_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ (int32x4_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 34); })
+#define vsriq_n_s64(a, b, __c) __extension__ ({ \
+ int64x2_t __a = (a); int64x2_t __b = (b); \
+ (int64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 35); })
+#define vsriq_n_u8(a, b, __c) __extension__ ({ \
+ uint8x16_t __a = (a); uint8x16_t __b = (b); \
+ (uint8x16_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 48); })
+#define vsriq_n_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 49); })
+#define vsriq_n_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint32x4_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 50); })
+#define vsriq_n_u64(a, b, __c) __extension__ ({ \
+ uint64x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 51); })
+#define vsriq_n_p8(a, b, __c) __extension__ ({ \
+ poly8x16_t __a = (a); poly8x16_t __b = (b); \
+ (poly8x16_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 36); })
+#define vsriq_n_p16(a, b, __c) __extension__ ({ \
+ poly16x8_t __a = (a); poly16x8_t __b = (b); \
+ (poly16x8_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 37); })
+
+#define vst1q_u8(__a, b) __extension__ ({ \
+ uint8x16_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 48); })
+#define vst1q_u16(__a, b) __extension__ ({ \
+ uint16x8_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 49); })
+#define vst1q_u32(__a, b) __extension__ ({ \
+ uint32x4_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 50); })
+#define vst1q_u64(__a, b) __extension__ ({ \
+ uint64x2_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 51); })
+#define vst1q_s8(__a, b) __extension__ ({ \
+ int8x16_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, __b, 32); })
+#define vst1q_s16(__a, b) __extension__ ({ \
+ int16x8_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 33); })
+#define vst1q_s32(__a, b) __extension__ ({ \
+ int32x4_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 34); })
+#define vst1q_s64(__a, b) __extension__ ({ \
+ int64x2_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 35); })
+#define vst1q_f16(__a, b) __extension__ ({ \
+ float16x8_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 39); })
+#define vst1q_f32(__a, b) __extension__ ({ \
+ float32x4_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 40); })
+#define vst1q_p8(__a, b) __extension__ ({ \
+ poly8x16_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 36); })
+#define vst1q_p16(__a, b) __extension__ ({ \
+ poly16x8_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 37); })
+#define vst1_u8(__a, b) __extension__ ({ \
+ uint8x8_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 16); })
+#define vst1_u16(__a, b) __extension__ ({ \
+ uint16x4_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 17); })
+#define vst1_u32(__a, b) __extension__ ({ \
+ uint32x2_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 18); })
+#define vst1_u64(__a, b) __extension__ ({ \
+ uint64x1_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 19); })
+#define vst1_s8(__a, b) __extension__ ({ \
+ int8x8_t __b = (b); \
+ __builtin_neon_vst1_v(__a, __b, 0); })
+#define vst1_s16(__a, b) __extension__ ({ \
+ int16x4_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 1); })
+#define vst1_s32(__a, b) __extension__ ({ \
+ int32x2_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 2); })
+#define vst1_s64(__a, b) __extension__ ({ \
+ int64x1_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 3); })
+#define vst1_f16(__a, b) __extension__ ({ \
+ float16x4_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 7); })
+#define vst1_f32(__a, b) __extension__ ({ \
+ float32x2_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 8); })
+#define vst1_p8(__a, b) __extension__ ({ \
+ poly8x8_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 4); })
+#define vst1_p16(__a, b) __extension__ ({ \
+ poly16x4_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 5); })
+
+#define vst1q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 48); })
+#define vst1q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 49); })
+#define vst1q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 50); })
+#define vst1q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 51); })
+#define vst1q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, __b, __c, 32); })
+#define vst1q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 33); })
+#define vst1q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 34); })
+#define vst1q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 35); })
+#define vst1q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 39); })
+#define vst1q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 40); })
+#define vst1q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 36); })
+#define vst1q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 37); })
+#define vst1_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 16); })
+#define vst1_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 17); })
+#define vst1_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 18); })
+#define vst1_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 19); })
+#define vst1_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, __b, __c, 0); })
+#define vst1_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 1); })
+#define vst1_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 2); })
+#define vst1_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 3); })
+#define vst1_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 7); })
+#define vst1_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 8); })
+#define vst1_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 4); })
+#define vst1_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 5); })
+
+#define vst2q_u8(__a, b) __extension__ ({ \
+ uint8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 48); })
+#define vst2q_u16(__a, b) __extension__ ({ \
+ uint16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 49); })
+#define vst2q_u32(__a, b) __extension__ ({ \
+ uint32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 50); })
+#define vst2q_s8(__a, b) __extension__ ({ \
+ int8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, __b.val[0], __b.val[1], 32); })
+#define vst2q_s16(__a, b) __extension__ ({ \
+ int16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 33); })
+#define vst2q_s32(__a, b) __extension__ ({ \
+ int32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 34); })
+#define vst2q_f16(__a, b) __extension__ ({ \
+ float16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 39); })
+#define vst2q_f32(__a, b) __extension__ ({ \
+ float32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 40); })
+#define vst2q_p8(__a, b) __extension__ ({ \
+ poly8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 36); })
+#define vst2q_p16(__a, b) __extension__ ({ \
+ poly16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 37); })
+#define vst2_u8(__a, b) __extension__ ({ \
+ uint8x8x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 16); })
+#define vst2_u16(__a, b) __extension__ ({ \
+ uint16x4x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 17); })
+#define vst2_u32(__a, b) __extension__ ({ \
+ uint32x2x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 18); })
+#define vst2_u64(__a, b) __extension__ ({ \
+ uint64x1x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 19); })
+#define vst2_s8(__a, b) __extension__ ({ \
+ int8x8x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, __b.val[0], __b.val[1], 0); })
+#define vst2_s16(__a, b) __extension__ ({ \
+ int16x4x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 1); })
+#define vst2_s32(__a, b) __extension__ ({ \
+ int32x2x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 2); })
+#define vst2_s64(__a, b) __extension__ ({ \
+ int64x1x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 3); })
+#define vst2_f16(__a, b) __extension__ ({ \
+ float16x4x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 7); })
+#define vst2_f32(__a, b) __extension__ ({ \
+ float32x2x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 8); })
+#define vst2_p8(__a, b) __extension__ ({ \
+ poly8x8x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 4); })
+#define vst2_p16(__a, b) __extension__ ({ \
+ poly16x4x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 5); })
+
+#define vst2q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 49); })
+#define vst2q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 50); })
+#define vst2q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 33); })
+#define vst2q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 34); })
+#define vst2q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 39); })
+#define vst2q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 40); })
+#define vst2q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 37); })
+#define vst2_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 16); })
+#define vst2_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 17); })
+#define vst2_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 18); })
+#define vst2_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, __b.val[0], __b.val[1], __c, 0); })
+#define vst2_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 1); })
+#define vst2_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 2); })
+#define vst2_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 7); })
+#define vst2_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 8); })
+#define vst2_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 4); })
+#define vst2_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 5); })
+
+#define vst3q_u8(__a, b) __extension__ ({ \
+ uint8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 48); })
+#define vst3q_u16(__a, b) __extension__ ({ \
+ uint16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 49); })
+#define vst3q_u32(__a, b) __extension__ ({ \
+ uint32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 50); })
+#define vst3q_s8(__a, b) __extension__ ({ \
+ int8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, __b.val[0], __b.val[1], __b.val[2], 32); })
+#define vst3q_s16(__a, b) __extension__ ({ \
+ int16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 33); })
+#define vst3q_s32(__a, b) __extension__ ({ \
+ int32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 34); })
+#define vst3q_f16(__a, b) __extension__ ({ \
+ float16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 39); })
+#define vst3q_f32(__a, b) __extension__ ({ \
+ float32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 40); })
+#define vst3q_p8(__a, b) __extension__ ({ \
+ poly8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 36); })
+#define vst3q_p16(__a, b) __extension__ ({ \
+ poly16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 37); })
+#define vst3_u8(__a, b) __extension__ ({ \
+ uint8x8x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 16); })
+#define vst3_u16(__a, b) __extension__ ({ \
+ uint16x4x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 17); })
+#define vst3_u32(__a, b) __extension__ ({ \
+ uint32x2x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 18); })
+#define vst3_u64(__a, b) __extension__ ({ \
+ uint64x1x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 19); })
+#define vst3_s8(__a, b) __extension__ ({ \
+ int8x8x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, __b.val[0], __b.val[1], __b.val[2], 0); })
+#define vst3_s16(__a, b) __extension__ ({ \
+ int16x4x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 1); })
+#define vst3_s32(__a, b) __extension__ ({ \
+ int32x2x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 2); })
+#define vst3_s64(__a, b) __extension__ ({ \
+ int64x1x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 3); })
+#define vst3_f16(__a, b) __extension__ ({ \
+ float16x4x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 7); })
+#define vst3_f32(__a, b) __extension__ ({ \
+ float32x2x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 8); })
+#define vst3_p8(__a, b) __extension__ ({ \
+ poly8x8x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 4); })
+#define vst3_p16(__a, b) __extension__ ({ \
+ poly16x4x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 5); })
+
+#define vst3q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 49); })
+#define vst3q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 50); })
+#define vst3q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 33); })
+#define vst3q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 34); })
+#define vst3q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 39); })
+#define vst3q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 40); })
+#define vst3q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 37); })
+#define vst3_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 16); })
+#define vst3_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 17); })
+#define vst3_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 18); })
+#define vst3_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); })
+#define vst3_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 1); })
+#define vst3_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 2); })
+#define vst3_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 7); })
+#define vst3_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 8); })
+#define vst3_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 4); })
+#define vst3_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 5); })
+
+#define vst4q_u8(__a, b) __extension__ ({ \
+ uint8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 48); })
+#define vst4q_u16(__a, b) __extension__ ({ \
+ uint16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 49); })
+#define vst4q_u32(__a, b) __extension__ ({ \
+ uint32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 50); })
+#define vst4q_s8(__a, b) __extension__ ({ \
+ int8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 32); })
+#define vst4q_s16(__a, b) __extension__ ({ \
+ int16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 33); })
+#define vst4q_s32(__a, b) __extension__ ({ \
+ int32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 34); })
+#define vst4q_f16(__a, b) __extension__ ({ \
+ float16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 39); })
+#define vst4q_f32(__a, b) __extension__ ({ \
+ float32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 40); })
+#define vst4q_p8(__a, b) __extension__ ({ \
+ poly8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 36); })
+#define vst4q_p16(__a, b) __extension__ ({ \
+ poly16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 37); })
+#define vst4_u8(__a, b) __extension__ ({ \
+ uint8x8x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 16); })
+#define vst4_u16(__a, b) __extension__ ({ \
+ uint16x4x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 17); })
+#define vst4_u32(__a, b) __extension__ ({ \
+ uint32x2x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 18); })
+#define vst4_u64(__a, b) __extension__ ({ \
+ uint64x1x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 19); })
+#define vst4_s8(__a, b) __extension__ ({ \
+ int8x8x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 0); })
+#define vst4_s16(__a, b) __extension__ ({ \
+ int16x4x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 1); })
+#define vst4_s32(__a, b) __extension__ ({ \
+ int32x2x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 2); })
+#define vst4_s64(__a, b) __extension__ ({ \
+ int64x1x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 3); })
+#define vst4_f16(__a, b) __extension__ ({ \
+ float16x4x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 7); })
+#define vst4_f32(__a, b) __extension__ ({ \
+ float32x2x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 8); })
+#define vst4_p8(__a, b) __extension__ ({ \
+ poly8x8x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 4); })
+#define vst4_p16(__a, b) __extension__ ({ \
+ poly16x4x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 5); })
+
+#define vst4q_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 49); })
+#define vst4q_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 50); })
+#define vst4q_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 33); })
+#define vst4q_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 34); })
+#define vst4q_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 39); })
+#define vst4q_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x4x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 40); })
+#define vst4q_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x8x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 37); })
+#define vst4_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x8x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 16); })
+#define vst4_lane_u16(__a, b, __c) __extension__ ({ \
+ uint16x4x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 17); })
+#define vst4_lane_u32(__a, b, __c) __extension__ ({ \
+ uint32x2x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 18); })
+#define vst4_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x8x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); })
+#define vst4_lane_s16(__a, b, __c) __extension__ ({ \
+ int16x4x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 1); })
+#define vst4_lane_s32(__a, b, __c) __extension__ ({ \
+ int32x2x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 2); })
+#define vst4_lane_f16(__a, b, __c) __extension__ ({ \
+ float16x4x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 7); })
+#define vst4_lane_f32(__a, b, __c) __extension__ ({ \
+ float32x2x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 8); })
+#define vst4_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x8x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 4); })
+#define vst4_lane_p16(__a, b, __c) __extension__ ({ \
+ poly16x4x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 5); })
+
+__ai int8x8_t vsub_s8(int8x8_t __a, int8x8_t __b) {
+ return __a - __b; }
+__ai int16x4_t vsub_s16(int16x4_t __a, int16x4_t __b) {
+ return __a - __b; }
+__ai int32x2_t vsub_s32(int32x2_t __a, int32x2_t __b) {
+ return __a - __b; }
+__ai int64x1_t vsub_s64(int64x1_t __a, int64x1_t __b) {
+ return __a - __b; }
+__ai float32x2_t vsub_f32(float32x2_t __a, float32x2_t __b) {
+ return __a - __b; }
+__ai uint8x8_t vsub_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __a - __b; }
+__ai uint16x4_t vsub_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __a - __b; }
+__ai uint32x2_t vsub_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __a - __b; }
+__ai uint64x1_t vsub_u64(uint64x1_t __a, uint64x1_t __b) {
+ return __a - __b; }
+__ai int8x16_t vsubq_s8(int8x16_t __a, int8x16_t __b) {
+ return __a - __b; }
+__ai int16x8_t vsubq_s16(int16x8_t __a, int16x8_t __b) {
+ return __a - __b; }
+__ai int32x4_t vsubq_s32(int32x4_t __a, int32x4_t __b) {
+ return __a - __b; }
+__ai int64x2_t vsubq_s64(int64x2_t __a, int64x2_t __b) {
+ return __a - __b; }
+__ai float32x4_t vsubq_f32(float32x4_t __a, float32x4_t __b) {
+ return __a - __b; }
+__ai uint8x16_t vsubq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __a - __b; }
+__ai uint16x8_t vsubq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __a - __b; }
+__ai uint32x4_t vsubq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __a - __b; }
+__ai uint64x2_t vsubq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __a - __b; }
+
+__ai int8x8_t vsubhn_s16(int16x8_t __a, int16x8_t __b) {
+ return (int8x8_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 0); }
+__ai int16x4_t vsubhn_s32(int32x4_t __a, int32x4_t __b) {
+ return (int16x4_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 1); }
+__ai int32x2_t vsubhn_s64(int64x2_t __a, int64x2_t __b) {
+ return (int32x2_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 2); }
+__ai uint8x8_t vsubhn_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 16); }
+__ai uint16x4_t vsubhn_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 17); }
+__ai uint32x2_t vsubhn_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vsubhn_v((int8x16_t)__a, (int8x16_t)__b, 18); }
+
+__ai int16x8_t vsubl_s8(int8x8_t __a, int8x8_t __b) {
+ return vmovl_s8(__a) - vmovl_s8(__b); }
+__ai int32x4_t vsubl_s16(int16x4_t __a, int16x4_t __b) {
+ return vmovl_s16(__a) - vmovl_s16(__b); }
+__ai int64x2_t vsubl_s32(int32x2_t __a, int32x2_t __b) {
+ return vmovl_s32(__a) - vmovl_s32(__b); }
+__ai uint16x8_t vsubl_u8(uint8x8_t __a, uint8x8_t __b) {
+ return vmovl_u8(__a) - vmovl_u8(__b); }
+__ai uint32x4_t vsubl_u16(uint16x4_t __a, uint16x4_t __b) {
+ return vmovl_u16(__a) - vmovl_u16(__b); }
+__ai uint64x2_t vsubl_u32(uint32x2_t __a, uint32x2_t __b) {
+ return vmovl_u32(__a) - vmovl_u32(__b); }
+
+__ai int16x8_t vsubw_s8(int16x8_t __a, int8x8_t __b) {
+ return __a - vmovl_s8(__b); }
+__ai int32x4_t vsubw_s16(int32x4_t __a, int16x4_t __b) {
+ return __a - vmovl_s16(__b); }
+__ai int64x2_t vsubw_s32(int64x2_t __a, int32x2_t __b) {
+ return __a - vmovl_s32(__b); }
+__ai uint16x8_t vsubw_u8(uint16x8_t __a, uint8x8_t __b) {
+ return __a - vmovl_u8(__b); }
+__ai uint32x4_t vsubw_u16(uint32x4_t __a, uint16x4_t __b) {
+ return __a - vmovl_u16(__b); }
+__ai uint64x2_t vsubw_u32(uint64x2_t __a, uint32x2_t __b) {
+ return __a - vmovl_u32(__b); }
+
+__ai uint8x8_t vtbl1_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtbl1_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai int8x8_t vtbl1_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vtbl1_v(__a, __b, 0); }
+__ai poly8x8_t vtbl1_p8(poly8x8_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vtbl1_v((int8x8_t)__a, (int8x8_t)__b, 4); }
+
+__ai uint8x8_t vtbl2_u8(uint8x8x2_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtbl2_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__b, 16); }
+__ai int8x8_t vtbl2_s8(int8x8x2_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vtbl2_v(__a.val[0], __a.val[1], __b, 0); }
+__ai poly8x8_t vtbl2_p8(poly8x8x2_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vtbl2_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__b, 4); }
+
+__ai uint8x8_t vtbl3_u8(uint8x8x3_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtbl3_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__b, 16); }
+__ai int8x8_t vtbl3_s8(int8x8x3_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vtbl3_v(__a.val[0], __a.val[1], __a.val[2], __b, 0); }
+__ai poly8x8_t vtbl3_p8(poly8x8x3_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vtbl3_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__b, 4); }
+
+__ai uint8x8_t vtbl4_u8(uint8x8x4_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtbl4_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__a.val[3], (int8x8_t)__b, 16); }
+__ai int8x8_t vtbl4_s8(int8x8x4_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vtbl4_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 0); }
+__ai poly8x8_t vtbl4_p8(poly8x8x4_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vtbl4_v((int8x8_t)__a.val[0], (int8x8_t)__a.val[1], (int8x8_t)__a.val[2], (int8x8_t)__a.val[3], (int8x8_t)__b, 4); }
+
+__ai uint8x8_t vtbx1_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vtbx1_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 16); }
+__ai int8x8_t vtbx1_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vtbx1_v(__a, __b, __c, 0); }
+__ai poly8x8_t vtbx1_p8(poly8x8_t __a, poly8x8_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vtbx1_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 4); }
+
+__ai uint8x8_t vtbx2_u8(uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vtbx2_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__c, 16); }
+__ai int8x8_t vtbx2_s8(int8x8_t __a, int8x8x2_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vtbx2_v(__a, __b.val[0], __b.val[1], __c, 0); }
+__ai poly8x8_t vtbx2_p8(poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vtbx2_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__c, 4); }
+
+__ai uint8x8_t vtbx3_u8(uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vtbx3_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__c, 16); }
+__ai int8x8_t vtbx3_s8(int8x8_t __a, int8x8x3_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vtbx3_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); }
+__ai poly8x8_t vtbx3_p8(poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vtbx3_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__c, 4); }
+
+__ai uint8x8_t vtbx4_u8(uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vtbx4_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], (int8x8_t)__c, 16); }
+__ai int8x8_t vtbx4_s8(int8x8_t __a, int8x8x4_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vtbx4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); }
+__ai poly8x8_t vtbx4_p8(poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vtbx4_v((int8x8_t)__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], (int8x8_t)__c, 4); }
+
+__ai int8x8x2_t vtrn_s8(int8x8_t __a, int8x8_t __b) {
+ int8x8x2_t r; __builtin_neon_vtrn_v(&r, __a, __b, 0); return r; }
+__ai int16x4x2_t vtrn_s16(int16x4_t __a, int16x4_t __b) {
+ int16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
+__ai int32x2x2_t vtrn_s32(int32x2_t __a, int32x2_t __b) {
+ int32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
+__ai uint8x8x2_t vtrn_u8(uint8x8_t __a, uint8x8_t __b) {
+ uint8x8x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
+__ai uint16x4x2_t vtrn_u16(uint16x4_t __a, uint16x4_t __b) {
+ uint16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
+__ai uint32x2x2_t vtrn_u32(uint32x2_t __a, uint32x2_t __b) {
+ uint32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
+__ai float32x2x2_t vtrn_f32(float32x2_t __a, float32x2_t __b) {
+ float32x2x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
+__ai poly8x8x2_t vtrn_p8(poly8x8_t __a, poly8x8_t __b) {
+ poly8x8x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
+__ai poly16x4x2_t vtrn_p16(poly16x4_t __a, poly16x4_t __b) {
+ poly16x4x2_t r; __builtin_neon_vtrn_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
+__ai int8x16x2_t vtrnq_s8(int8x16_t __a, int8x16_t __b) {
+ int8x16x2_t r; __builtin_neon_vtrnq_v(&r, __a, __b, 32); return r; }
+__ai int16x8x2_t vtrnq_s16(int16x8_t __a, int16x8_t __b) {
+ int16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
+__ai int32x4x2_t vtrnq_s32(int32x4_t __a, int32x4_t __b) {
+ int32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
+__ai uint8x16x2_t vtrnq_u8(uint8x16_t __a, uint8x16_t __b) {
+ uint8x16x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
+__ai uint16x8x2_t vtrnq_u16(uint16x8_t __a, uint16x8_t __b) {
+ uint16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
+__ai uint32x4x2_t vtrnq_u32(uint32x4_t __a, uint32x4_t __b) {
+ uint32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
+__ai float32x4x2_t vtrnq_f32(float32x4_t __a, float32x4_t __b) {
+ float32x4x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
+__ai poly8x16x2_t vtrnq_p8(poly8x16_t __a, poly8x16_t __b) {
+ poly8x16x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
+__ai poly16x8x2_t vtrnq_p16(poly16x8_t __a, poly16x8_t __b) {
+ poly16x8x2_t r; __builtin_neon_vtrnq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
+
+__ai uint8x8_t vtst_s8(int8x8_t __a, int8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtst_v(__a, __b, 16); }
+__ai uint16x4_t vtst_s16(int16x4_t __a, int16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vtst_s32(int32x2_t __a, int32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint8x8_t vtst_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vtst_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vtst_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint8x8_t vtst_p8(poly8x8_t __a, poly8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vtst_p16(poly16x4_t __a, poly16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint8x16_t vtstq_s8(int8x16_t __a, int8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vtstq_v(__a, __b, 48); }
+__ai uint16x8_t vtstq_s16(int16x8_t __a, int16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vtstq_s32(int32x4_t __a, int32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint8x16_t vtstq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vtstq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vtstq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint8x16_t vtstq_p8(poly8x16_t __a, poly8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vtstq_p16(poly16x8_t __a, poly16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+
+__ai int8x8x2_t vuzp_s8(int8x8_t __a, int8x8_t __b) {
+ int8x8x2_t r; __builtin_neon_vuzp_v(&r, __a, __b, 0); return r; }
+__ai int16x4x2_t vuzp_s16(int16x4_t __a, int16x4_t __b) {
+ int16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
+__ai int32x2x2_t vuzp_s32(int32x2_t __a, int32x2_t __b) {
+ int32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
+__ai uint8x8x2_t vuzp_u8(uint8x8_t __a, uint8x8_t __b) {
+ uint8x8x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
+__ai uint16x4x2_t vuzp_u16(uint16x4_t __a, uint16x4_t __b) {
+ uint16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
+__ai uint32x2x2_t vuzp_u32(uint32x2_t __a, uint32x2_t __b) {
+ uint32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
+__ai float32x2x2_t vuzp_f32(float32x2_t __a, float32x2_t __b) {
+ float32x2x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
+__ai poly8x8x2_t vuzp_p8(poly8x8_t __a, poly8x8_t __b) {
+ poly8x8x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
+__ai poly16x4x2_t vuzp_p16(poly16x4_t __a, poly16x4_t __b) {
+ poly16x4x2_t r; __builtin_neon_vuzp_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
+__ai int8x16x2_t vuzpq_s8(int8x16_t __a, int8x16_t __b) {
+ int8x16x2_t r; __builtin_neon_vuzpq_v(&r, __a, __b, 32); return r; }
+__ai int16x8x2_t vuzpq_s16(int16x8_t __a, int16x8_t __b) {
+ int16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
+__ai int32x4x2_t vuzpq_s32(int32x4_t __a, int32x4_t __b) {
+ int32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
+__ai uint8x16x2_t vuzpq_u8(uint8x16_t __a, uint8x16_t __b) {
+ uint8x16x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
+__ai uint16x8x2_t vuzpq_u16(uint16x8_t __a, uint16x8_t __b) {
+ uint16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
+__ai uint32x4x2_t vuzpq_u32(uint32x4_t __a, uint32x4_t __b) {
+ uint32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
+__ai float32x4x2_t vuzpq_f32(float32x4_t __a, float32x4_t __b) {
+ float32x4x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
+__ai poly8x16x2_t vuzpq_p8(poly8x16_t __a, poly8x16_t __b) {
+ poly8x16x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
+__ai poly16x8x2_t vuzpq_p16(poly16x8_t __a, poly16x8_t __b) {
+ poly16x8x2_t r; __builtin_neon_vuzpq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
+
+__ai int8x8x2_t vzip_s8(int8x8_t __a, int8x8_t __b) {
+ int8x8x2_t r; __builtin_neon_vzip_v(&r, __a, __b, 0); return r; }
+__ai int16x4x2_t vzip_s16(int16x4_t __a, int16x4_t __b) {
+ int16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 1); return r; }
+__ai int32x2x2_t vzip_s32(int32x2_t __a, int32x2_t __b) {
+ int32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 2); return r; }
+__ai uint8x8x2_t vzip_u8(uint8x8_t __a, uint8x8_t __b) {
+ uint8x8x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 16); return r; }
+__ai uint16x4x2_t vzip_u16(uint16x4_t __a, uint16x4_t __b) {
+ uint16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 17); return r; }
+__ai uint32x2x2_t vzip_u32(uint32x2_t __a, uint32x2_t __b) {
+ uint32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 18); return r; }
+__ai float32x2x2_t vzip_f32(float32x2_t __a, float32x2_t __b) {
+ float32x2x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 8); return r; }
+__ai poly8x8x2_t vzip_p8(poly8x8_t __a, poly8x8_t __b) {
+ poly8x8x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 4); return r; }
+__ai poly16x4x2_t vzip_p16(poly16x4_t __a, poly16x4_t __b) {
+ poly16x4x2_t r; __builtin_neon_vzip_v(&r, (int8x8_t)__a, (int8x8_t)__b, 5); return r; }
+__ai int8x16x2_t vzipq_s8(int8x16_t __a, int8x16_t __b) {
+ int8x16x2_t r; __builtin_neon_vzipq_v(&r, __a, __b, 32); return r; }
+__ai int16x8x2_t vzipq_s16(int16x8_t __a, int16x8_t __b) {
+ int16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 33); return r; }
+__ai int32x4x2_t vzipq_s32(int32x4_t __a, int32x4_t __b) {
+ int32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 34); return r; }
+__ai uint8x16x2_t vzipq_u8(uint8x16_t __a, uint8x16_t __b) {
+ uint8x16x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 48); return r; }
+__ai uint16x8x2_t vzipq_u16(uint16x8_t __a, uint16x8_t __b) {
+ uint16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 49); return r; }
+__ai uint32x4x2_t vzipq_u32(uint32x4_t __a, uint32x4_t __b) {
+ uint32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 50); return r; }
+__ai float32x4x2_t vzipq_f32(float32x4_t __a, float32x4_t __b) {
+ float32x4x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 40); return r; }
+__ai poly8x16x2_t vzipq_p8(poly8x16_t __a, poly8x16_t __b) {
+ poly8x16x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 36); return r; }
+__ai poly16x8x2_t vzipq_p16(poly16x8_t __a, poly16x8_t __b) {
+ poly16x8x2_t r; __builtin_neon_vzipq_v(&r, (int8x16_t)__a, (int8x16_t)__b, 37); return r; }
+
+#ifdef __aarch64__
+__ai int16x8_t vmovl_high_s8(int8x16_t __a) {
+ int8x8_t __a1 = vget_high_s8(__a);
+ return (int16x8_t)vshll_n_s8(__a1, 0); }
+__ai int32x4_t vmovl_high_s16(int16x8_t __a) {
+ int16x4_t __a1 = vget_high_s16(__a);
+ return (int32x4_t)vshll_n_s16(__a1, 0); }
+__ai int64x2_t vmovl_high_s32(int32x4_t __a) {
+ int32x2_t __a1 = vget_high_s32(__a);
+ return (int64x2_t)vshll_n_s32(__a1, 0); }
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __a) {
+ uint8x8_t __a1 = vget_high_u8(__a);
+ return (uint16x8_t)vshll_n_u8(__a1, 0); }
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __a) {
+ uint16x4_t __a1 = vget_high_u16(__a);
+ return (uint32x4_t)vshll_n_u16(__a1, 0); }
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __a) {
+ uint32x2_t __a1 = vget_high_u32(__a);
+ return (uint64x2_t)vshll_n_u32(__a1, 0); }
+
+__ai int16x8_t vmull_high_s8(int8x16_t __a, int8x16_t __b) {
+ return vmull_s8(vget_high_s8(__a), vget_high_s8(__b)); }
+__ai int32x4_t vmull_high_s16(int16x8_t __a, int16x8_t __b) {
+ return vmull_s16(vget_high_s16(__a), vget_high_s16(__b)); }
+__ai int64x2_t vmull_high_s32(int32x4_t __a, int32x4_t __b) {
+ return vmull_s32(vget_high_s32(__a), vget_high_s32(__b)); }
+__ai uint16x8_t vmull_high_u8(uint8x16_t __a, uint8x16_t __b) {
+ return vmull_u8(vget_high_u8(__a), vget_high_u8(__b)); }
+__ai uint32x4_t vmull_high_u16(uint16x8_t __a, uint16x8_t __b) {
+ return vmull_u16(vget_high_u16(__a), vget_high_u16(__b)); }
+__ai uint64x2_t vmull_high_u32(uint32x4_t __a, uint32x4_t __b) {
+ return vmull_u32(vget_high_u32(__a), vget_high_u32(__b)); }
+__ai poly16x8_t vmull_high_p8(poly8x16_t __a, poly8x16_t __b) {
+ return vmull_p8(vget_high_p8(__a), vget_high_p8(__b)); }
+
+__ai int16x8_t vabdl_high_s8(int8x16_t __a, int8x16_t __b) {
+ return vabdl_s8(vget_high_s8(__a), vget_high_s8(__b)); }
+__ai int32x4_t vabdl_high_s16(int16x8_t __a, int16x8_t __b) {
+ return vabdl_s16(vget_high_s16(__a), vget_high_s16(__b)); }
+__ai int64x2_t vabdl_high_s32(int32x4_t __a, int32x4_t __b) {
+ return vabdl_s32(vget_high_s32(__a), vget_high_s32(__b)); }
+__ai uint16x8_t vabdl_high_u8(uint8x16_t __a, uint8x16_t __b) {
+ return vabdl_u8(vget_high_u8(__a), vget_high_u8(__b)); }
+__ai uint32x4_t vabdl_high_u16(uint16x8_t __a, uint16x8_t __b) {
+ return vabdl_u16(vget_high_u16(__a), vget_high_u16(__b)); }
+__ai uint64x2_t vabdl_high_u32(uint32x4_t __a, uint32x4_t __b) {
+ return vabdl_u32(vget_high_u32(__a), vget_high_u32(__b)); }
+
+__ai float64x1_t vabd_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vabd_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float64x2_t vabdq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vabdq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai int64x1_t vabs_s64(int64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vabs_v((int8x8_t)__a, 3); }
+__ai float64x1_t vabs_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vabs_v((int8x8_t)__a, 9); }
+__ai int64x2_t vabsq_s64(int64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vabsq_v((int8x16_t)__a, 35); }
+__ai float64x2_t vabsq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vabsq_v((int8x16_t)__a, 41); }
+
+__ai float64x1_t vadd_f64(float64x1_t __a, float64x1_t __b) {
+ return __a + __b; }
+__ai float64x2_t vaddq_f64(float64x2_t __a, float64x2_t __b) {
+ return __a + __b; }
+
+__ai int8x16_t vpaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vpaddq_v(__a, __b, 32); }
+__ai int16x8_t vpaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vpaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vpaddq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+__ai uint8x16_t vpaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vpaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vpaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vpaddq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+__ai float32x4_t vpaddq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vpaddq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vpaddq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float64x1_t vbsl_f64(uint64x1_t __a, float64x1_t __b, float64x1_t __c) {
+ return (float64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
+__ai float64x2_t vbslq_f64(uint64x2_t __a, float64x2_t __b, float64x2_t __c) {
+ return (float64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
+__ai poly64x1_t vbsl_p64(uint64x1_t __a, poly64x1_t __b, poly64x1_t __c) {
+ return (poly64x1_t)__builtin_neon_vbsl_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 6); }
+__ai poly64x2_t vbslq_p64(uint64x2_t __a, poly64x2_t __b, poly64x2_t __c) {
+ return (poly64x2_t)__builtin_neon_vbslq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 38); }
+
+__ai uint64x1_t vceq_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)(__a == __b); }
+__ai uint64x1_t vceq_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)(__a == __b); }
+__ai uint64x1_t vceq_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)(__a == __b); }
+__ai uint64x2_t vceqq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)(__a == __b); }
+__ai uint64x2_t vceqq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)(__a == __b); }
+__ai uint64x2_t vceqq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)(__a == __b); }
+__ai uint64x1_t vceq_p64(poly64x1_t __a, poly64x1_t __b) {
+ return (uint64x1_t)(__a == __b); }
+__ai uint64x2_t vceqq_p64(poly64x2_t __a, poly64x2_t __b) {
+ return (uint64x2_t)(__a == __b); }
+
+__ai uint64x1_t vcge_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)(__a >= __b); }
+__ai uint64x1_t vcge_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)(__a >= __b); }
+__ai uint64x2_t vcgeq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)(__a >= __b); }
+__ai uint64x2_t vcgeq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)(__a >= __b); }
+__ai uint64x1_t vcge_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)(__a >= __b); }
+__ai uint64x2_t vcgeq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)(__a >= __b); }
+
+__ai uint64x1_t vcgt_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)(__a > __b); }
+__ai uint64x1_t vcgt_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)(__a > __b); }
+__ai uint64x2_t vcgtq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)(__a > __b); }
+__ai uint64x2_t vcgtq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)(__a > __b); }
+__ai uint64x1_t vcgt_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)(__a > __b); }
+__ai uint64x2_t vcgtq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)(__a > __b); }
+
+__ai uint64x1_t vcle_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)(__a <= __b); }
+__ai uint64x1_t vcle_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)(__a <= __b); }
+__ai uint64x2_t vcleq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)(__a <= __b); }
+__ai uint64x2_t vcleq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)(__a <= __b); }
+__ai uint64x1_t vcle_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)(__a <= __b); }
+__ai uint64x2_t vcleq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)(__a <= __b); }
+
+__ai uint64x1_t vclt_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)(__a < __b); }
+__ai uint64x1_t vclt_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)(__a < __b); }
+__ai uint64x2_t vcltq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)(__a < __b); }
+__ai uint64x2_t vcltq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)(__a < __b); }
+__ai uint64x1_t vclt_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)(__a < __b); }
+__ai uint64x2_t vcltq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)(__a < __b); }
+
+__ai uint8x8_t vceqz_s8(int8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vceqz_v(__a, 16); }
+__ai uint16x4_t vceqz_s16(int16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vceqz_s32(int32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vceqz_s64(int64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
+__ai uint32x2_t vceqz_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
+__ai uint8x8_t vceqz_u8(uint8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vceqz_v((int8x8_t)__a, 16); }
+__ai uint16x4_t vceqz_u16(uint16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vceqz_u32(uint32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vceqz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vceqz_u64(uint64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
+__ai uint8x8_t vceqz_p8(poly8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vceqz_v((int8x8_t)__a, 16); }
+__ai uint16x4_t vceqz_p16(poly16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vceqz_v((int8x8_t)__a, 17); }
+__ai uint64x1_t vceqz_p64(poly64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
+__ai uint8x16_t vceqzq_s8(int8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vceqzq_v(__a, 48); }
+__ai uint16x8_t vceqzq_s16(int16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vceqzq_s32(int32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vceqzq_s64(int64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
+__ai uint32x4_t vceqzq_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
+__ai uint8x16_t vceqzq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 48); }
+__ai uint16x8_t vceqzq_u16(uint16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vceqzq_u32(uint32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vceqzq_u64(uint64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
+__ai uint8x16_t vceqzq_p8(poly8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 48); }
+__ai uint16x8_t vceqzq_p16(poly16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 49); }
+__ai uint64x1_t vceqz_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vceqz_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vceqzq_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
+__ai uint64x2_t vceqzq_p64(poly64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vceqzq_v((int8x16_t)__a, 51); }
+
+__ai uint8x8_t vcgez_s8(int8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vcgez_v(__a, 16); }
+__ai uint16x4_t vcgez_s16(int16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vcgez_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vcgez_s32(int32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcgez_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcgez_s64(int64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcgez_v((int8x8_t)__a, 19); }
+__ai uint32x2_t vcgez_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcgez_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcgez_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcgez_v((int8x8_t)__a, 19); }
+__ai uint8x16_t vcgezq_s8(int8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vcgezq_v(__a, 48); }
+__ai uint16x8_t vcgezq_s16(int16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vcgezq_s32(int32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcgezq_s64(int64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 51); }
+__ai uint32x4_t vcgezq_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcgezq_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcgezq_v((int8x16_t)__a, 51); }
+
+__ai uint8x8_t vcgtz_s8(int8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vcgtz_v(__a, 16); }
+__ai uint16x4_t vcgtz_s16(int16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vcgtz_s32(int32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcgtz_s64(int64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 19); }
+__ai uint32x2_t vcgtz_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcgtz_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcgtz_v((int8x8_t)__a, 19); }
+__ai uint8x16_t vcgtzq_s8(int8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vcgtzq_v(__a, 48); }
+__ai uint16x8_t vcgtzq_s16(int16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vcgtzq_s32(int32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcgtzq_s64(int64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 51); }
+__ai uint32x4_t vcgtzq_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcgtzq_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcgtzq_v((int8x16_t)__a, 51); }
+
+__ai uint8x8_t vclez_s8(int8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vclez_v(__a, 16); }
+__ai uint16x4_t vclez_s16(int16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vclez_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vclez_s32(int32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vclez_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vclez_s64(int64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vclez_v((int8x8_t)__a, 19); }
+__ai uint32x2_t vclez_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vclez_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vclez_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vclez_v((int8x8_t)__a, 19); }
+__ai uint8x16_t vclezq_s8(int8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vclezq_v(__a, 48); }
+__ai uint16x8_t vclezq_s16(int16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vclezq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vclezq_s32(int32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vclezq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vclezq_s64(int64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vclezq_v((int8x16_t)__a, 51); }
+__ai uint32x4_t vclezq_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vclezq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vclezq_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vclezq_v((int8x16_t)__a, 51); }
+
+__ai uint8x8_t vcltz_s8(int8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vcltz_v(__a, 16); }
+__ai uint16x4_t vcltz_s16(int16x4_t __a) {
+ return (uint16x4_t)__builtin_neon_vcltz_v((int8x8_t)__a, 17); }
+__ai uint32x2_t vcltz_s32(int32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcltz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcltz_s64(int64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcltz_v((int8x8_t)__a, 19); }
+__ai uint32x2_t vcltz_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcltz_v((int8x8_t)__a, 18); }
+__ai uint64x1_t vcltz_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcltz_v((int8x8_t)__a, 19); }
+__ai uint8x16_t vcltzq_s8(int8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vcltzq_v(__a, 48); }
+__ai uint16x8_t vcltzq_s16(int16x8_t __a) {
+ return (uint16x8_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 49); }
+__ai uint32x4_t vcltzq_s32(int32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcltzq_s64(int64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 51); }
+__ai uint32x4_t vcltzq_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 50); }
+__ai uint64x2_t vcltzq_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcltzq_v((int8x16_t)__a, 51); }
+
+__ai uint64x1_t vtst_s64(int64x1_t __a, int64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x1_t vtst_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vtstq_s64(int64x2_t __a, int64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+__ai uint64x2_t vtstq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+__ai uint64x1_t vtst_p64(poly64x1_t __a, poly64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vtst_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vtstq_p64(poly64x2_t __a, poly64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vtstq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai float64x2_t vcombine_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+__ai poly64x2_t vcombine_p64(poly64x1_t __a, poly64x1_t __b) {
+ return (poly64x2_t)__builtin_shufflevector((int64x1_t)__a, (int64x1_t)__b, 0, 1); }
+
+#define vcopyq_lane_s8(a1, __b1, c1, __d1) __extension__ ({ \
+ int8x16_t __a1 = (a1); int8x8_t __c1 = (c1); \
+ int8_t __c2 = vget_lane_s8(__c1, __d1); \
+ vsetq_lane_s8(__c2, __a1, __b1); })
+#define vcopyq_lane_s16(a1, __b1, c1, __d1) __extension__ ({ \
+ int16x8_t __a1 = (a1); int16x4_t __c1 = (c1); \
+ int16_t __c2 = vget_lane_s16(__c1, __d1); \
+ vsetq_lane_s16(__c2, __a1, __b1); })
+#define vcopyq_lane_s32(a1, __b1, c1, __d1) __extension__ ({ \
+ int32x4_t __a1 = (a1); int32x2_t __c1 = (c1); \
+ int32_t __c2 = vget_lane_s32(__c1, __d1); \
+ vsetq_lane_s32(__c2, __a1, __b1); })
+#define vcopyq_lane_s64(a1, __b1, c1, __d1) __extension__ ({ \
+ int64x2_t __a1 = (a1); int64x1_t __c1 = (c1); \
+ int64_t __c2 = vget_lane_s64(__c1, __d1); \
+ vsetq_lane_s64(__c2, __a1, __b1); })
+#define vcopyq_lane_u8(a1, __b1, c1, __d1) __extension__ ({ \
+ uint8x16_t __a1 = (a1); uint8x8_t __c1 = (c1); \
+ uint8_t __c2 = vget_lane_u8(__c1, __d1); \
+ vsetq_lane_u8(__c2, __a1, __b1); })
+#define vcopyq_lane_u16(a1, __b1, c1, __d1) __extension__ ({ \
+ uint16x8_t __a1 = (a1); uint16x4_t __c1 = (c1); \
+ uint16_t __c2 = vget_lane_u16(__c1, __d1); \
+ vsetq_lane_u16(__c2, __a1, __b1); })
+#define vcopyq_lane_u32(a1, __b1, c1, __d1) __extension__ ({ \
+ uint32x4_t __a1 = (a1); uint32x2_t __c1 = (c1); \
+ uint32_t __c2 = vget_lane_u32(__c1, __d1); \
+ vsetq_lane_u32(__c2, __a1, __b1); })
+#define vcopyq_lane_u64(a1, __b1, c1, __d1) __extension__ ({ \
+ uint64x2_t __a1 = (a1); uint64x1_t __c1 = (c1); \
+ uint64_t __c2 = vget_lane_u64(__c1, __d1); \
+ vsetq_lane_u64(__c2, __a1, __b1); })
+#define vcopyq_lane_p8(a1, __b1, c1, __d1) __extension__ ({ \
+ poly8x16_t __a1 = (a1); poly8x8_t __c1 = (c1); \
+ poly8_t __c2 = vget_lane_p8(__c1, __d1); \
+ vsetq_lane_p8(__c2, __a1, __b1); })
+#define vcopyq_lane_p16(a1, __b1, c1, __d1) __extension__ ({ \
+ poly16x8_t __a1 = (a1); poly16x4_t __c1 = (c1); \
+ poly16_t __c2 = vget_lane_p16(__c1, __d1); \
+ vsetq_lane_p16(__c2, __a1, __b1); })
+#define vcopyq_lane_f32(a1, __b1, c1, __d1) __extension__ ({ \
+ float32x4_t __a1 = (a1); float32x2_t __c1 = (c1); \
+ float32_t __c2 = vget_lane_f32(__c1, __d1); \
+ vsetq_lane_f32(__c2, __a1, __b1); })
+#define vcopyq_lane_f64(a1, __b1, c1, __d1) __extension__ ({ \
+ float64x2_t __a1 = (a1); float64x1_t __c1 = (c1); \
+ float64_t __c2 = vget_lane_f64(__c1, __d1); \
+ vsetq_lane_f64(__c2, __a1, __b1); })
+#define vcopyq_lane_p64(a1, __b1, c1, __d1) __extension__ ({ \
+ poly64x2_t __a1 = (a1); poly64x1_t __c1 = (c1); \
+ poly64_t __c2 = vget_lane_p64(__c1, __d1); \
+ vsetq_lane_p64(__c2, __a1, __b1); })
+
+#define vcopyq_laneq_s8(a1, __b1, c1, __d1) __extension__ ({ \
+ int8x16_t __a1 = (a1); int8x16_t __c1 = (c1); \
+ int8_t __c2 = vgetq_lane_s8(__c1, __d1); \
+ vsetq_lane_s8(__c2, __a1, __b1); })
+#define vcopyq_laneq_s16(a1, __b1, c1, __d1) __extension__ ({ \
+ int16x8_t __a1 = (a1); int16x8_t __c1 = (c1); \
+ int16_t __c2 = vgetq_lane_s16(__c1, __d1); \
+ vsetq_lane_s16(__c2, __a1, __b1); })
+#define vcopyq_laneq_s32(a1, __b1, c1, __d1) __extension__ ({ \
+ int32x4_t __a1 = (a1); int32x4_t __c1 = (c1); \
+ int32_t __c2 = vgetq_lane_s32(__c1, __d1); \
+ vsetq_lane_s32(__c2, __a1, __b1); })
+#define vcopyq_laneq_s64(a1, __b1, c1, __d1) __extension__ ({ \
+ int64x2_t __a1 = (a1); int64x2_t __c1 = (c1); \
+ int64_t __c2 = vgetq_lane_s64(__c1, __d1); \
+ vsetq_lane_s64(__c2, __a1, __b1); })
+#define vcopyq_laneq_u8(a1, __b1, c1, __d1) __extension__ ({ \
+ uint8x16_t __a1 = (a1); uint8x16_t __c1 = (c1); \
+ uint8_t __c2 = vgetq_lane_u8(__c1, __d1); \
+ vsetq_lane_u8(__c2, __a1, __b1); })
+#define vcopyq_laneq_u16(a1, __b1, c1, __d1) __extension__ ({ \
+ uint16x8_t __a1 = (a1); uint16x8_t __c1 = (c1); \
+ uint16_t __c2 = vgetq_lane_u16(__c1, __d1); \
+ vsetq_lane_u16(__c2, __a1, __b1); })
+#define vcopyq_laneq_u32(a1, __b1, c1, __d1) __extension__ ({ \
+ uint32x4_t __a1 = (a1); uint32x4_t __c1 = (c1); \
+ uint32_t __c2 = vgetq_lane_u32(__c1, __d1); \
+ vsetq_lane_u32(__c2, __a1, __b1); })
+#define vcopyq_laneq_u64(a1, __b1, c1, __d1) __extension__ ({ \
+ uint64x2_t __a1 = (a1); uint64x2_t __c1 = (c1); \
+ uint64_t __c2 = vgetq_lane_u64(__c1, __d1); \
+ vsetq_lane_u64(__c2, __a1, __b1); })
+#define vcopyq_laneq_p8(a1, __b1, c1, __d1) __extension__ ({ \
+ poly8x16_t __a1 = (a1); poly8x16_t __c1 = (c1); \
+ poly8_t __c2 = vgetq_lane_p8(__c1, __d1); \
+ vsetq_lane_p8(__c2, __a1, __b1); })
+#define vcopyq_laneq_p16(a1, __b1, c1, __d1) __extension__ ({ \
+ poly16x8_t __a1 = (a1); poly16x8_t __c1 = (c1); \
+ poly16_t __c2 = vgetq_lane_p16(__c1, __d1); \
+ vsetq_lane_p16(__c2, __a1, __b1); })
+#define vcopyq_laneq_f32(a1, __b1, c1, __d1) __extension__ ({ \
+ float32x4_t __a1 = (a1); float32x4_t __c1 = (c1); \
+ float32_t __c2 = vgetq_lane_f32(__c1, __d1); \
+ vsetq_lane_f32(__c2, __a1, __b1); })
+#define vcopyq_laneq_f64(a1, __b1, c1, __d1) __extension__ ({ \
+ float64x2_t __a1 = (a1); float64x2_t __c1 = (c1); \
+ float64_t __c2 = vgetq_lane_f64(__c1, __d1); \
+ vsetq_lane_f64(__c2, __a1, __b1); })
+#define vcopyq_laneq_p64(a1, __b1, c1, __d1) __extension__ ({ \
+ poly64x2_t __a1 = (a1); poly64x2_t __c1 = (c1); \
+ poly64_t __c2 = vgetq_lane_p64(__c1, __d1); \
+ vsetq_lane_p64(__c2, __a1, __b1); })
+
+#define vcopy_lane_s8(a1, __b1, c1, __d1) __extension__ ({ \
+ int8x8_t __a1 = (a1); int8x8_t __c1 = (c1); \
+ int8_t __c2 = vget_lane_s8(__c1, __d1); \
+ vset_lane_s8(__c2, __a1, __b1); })
+#define vcopy_lane_s16(a1, __b1, c1, __d1) __extension__ ({ \
+ int16x4_t __a1 = (a1); int16x4_t __c1 = (c1); \
+ int16_t __c2 = vget_lane_s16(__c1, __d1); \
+ vset_lane_s16(__c2, __a1, __b1); })
+#define vcopy_lane_s32(a1, __b1, c1, __d1) __extension__ ({ \
+ int32x2_t __a1 = (a1); int32x2_t __c1 = (c1); \
+ int32_t __c2 = vget_lane_s32(__c1, __d1); \
+ vset_lane_s32(__c2, __a1, __b1); })
+#define vcopy_lane_s64(a1, __b1, c1, __d1) __extension__ ({ \
+ int64x1_t __a1 = (a1); int64x1_t __c1 = (c1); \
+ int64_t __c2 = vget_lane_s64(__c1, __d1); \
+ vset_lane_s64(__c2, __a1, __b1); })
+#define vcopy_lane_p8(a1, __b1, c1, __d1) __extension__ ({ \
+ poly8x8_t __a1 = (a1); poly8x8_t __c1 = (c1); \
+ poly8_t __c2 = vget_lane_p8(__c1, __d1); \
+ vset_lane_p8(__c2, __a1, __b1); })
+#define vcopy_lane_p16(a1, __b1, c1, __d1) __extension__ ({ \
+ poly16x4_t __a1 = (a1); poly16x4_t __c1 = (c1); \
+ poly16_t __c2 = vget_lane_p16(__c1, __d1); \
+ vset_lane_p16(__c2, __a1, __b1); })
+#define vcopy_lane_u8(a1, __b1, c1, __d1) __extension__ ({ \
+ uint8x8_t __a1 = (a1); uint8x8_t __c1 = (c1); \
+ uint8_t __c2 = vget_lane_u8(__c1, __d1); \
+ vset_lane_u8(__c2, __a1, __b1); })
+#define vcopy_lane_u16(a1, __b1, c1, __d1) __extension__ ({ \
+ uint16x4_t __a1 = (a1); uint16x4_t __c1 = (c1); \
+ uint16_t __c2 = vget_lane_u16(__c1, __d1); \
+ vset_lane_u16(__c2, __a1, __b1); })
+#define vcopy_lane_u32(a1, __b1, c1, __d1) __extension__ ({ \
+ uint32x2_t __a1 = (a1); uint32x2_t __c1 = (c1); \
+ uint32_t __c2 = vget_lane_u32(__c1, __d1); \
+ vset_lane_u32(__c2, __a1, __b1); })
+#define vcopy_lane_u64(a1, __b1, c1, __d1) __extension__ ({ \
+ uint64x1_t __a1 = (a1); uint64x1_t __c1 = (c1); \
+ uint64_t __c2 = vget_lane_u64(__c1, __d1); \
+ vset_lane_u64(__c2, __a1, __b1); })
+#define vcopy_lane_p64(a1, __b1, c1, __d1) __extension__ ({ \
+ poly64x1_t __a1 = (a1); poly64x1_t __c1 = (c1); \
+ poly64_t __c2 = vget_lane_p64(__c1, __d1); \
+ vset_lane_p64(__c2, __a1, __b1); })
+#define vcopy_lane_f32(a1, __b1, c1, __d1) __extension__ ({ \
+ float32x2_t __a1 = (a1); float32x2_t __c1 = (c1); \
+ float32_t __c2 = vget_lane_f32(__c1, __d1); \
+ vset_lane_f32(__c2, __a1, __b1); })
+#define vcopy_lane_f64(a1, __b1, c1, __d1) __extension__ ({ \
+ float64x1_t __a1 = (a1); float64x1_t __c1 = (c1); \
+ float64_t __c2 = vget_lane_f64(__c1, __d1); \
+ vset_lane_f64(__c2, __a1, __b1); })
+
+#define vcopy_laneq_s8(a1, __b1, c1, __d1) __extension__ ({ \
+ int8x8_t __a1 = (a1); int8x16_t __c1 = (c1); \
+ int8_t __c2 = vgetq_lane_s8(__c1, __d1); \
+ vset_lane_s8(__c2, __a1, __b1); })
+#define vcopy_laneq_s16(a1, __b1, c1, __d1) __extension__ ({ \
+ int16x4_t __a1 = (a1); int16x8_t __c1 = (c1); \
+ int16_t __c2 = vgetq_lane_s16(__c1, __d1); \
+ vset_lane_s16(__c2, __a1, __b1); })
+#define vcopy_laneq_s32(a1, __b1, c1, __d1) __extension__ ({ \
+ int32x2_t __a1 = (a1); int32x4_t __c1 = (c1); \
+ int32_t __c2 = vgetq_lane_s32(__c1, __d1); \
+ vset_lane_s32(__c2, __a1, __b1); })
+#define vcopy_laneq_s64(a1, __b1, c1, __d1) __extension__ ({ \
+ int64x1_t __a1 = (a1); int64x2_t __c1 = (c1); \
+ int64_t __c2 = vgetq_lane_s64(__c1, __d1); \
+ vset_lane_s64(__c2, __a1, __b1); })
+#define vcopy_laneq_p8(a1, __b1, c1, __d1) __extension__ ({ \
+ poly8x8_t __a1 = (a1); poly8x16_t __c1 = (c1); \
+ poly8_t __c2 = vgetq_lane_p8(__c1, __d1); \
+ vset_lane_p8(__c2, __a1, __b1); })
+#define vcopy_laneq_p16(a1, __b1, c1, __d1) __extension__ ({ \
+ poly16x4_t __a1 = (a1); poly16x8_t __c1 = (c1); \
+ poly16_t __c2 = vgetq_lane_p16(__c1, __d1); \
+ vset_lane_p16(__c2, __a1, __b1); })
+#define vcopy_laneq_p64(a1, __b1, c1, __d1) __extension__ ({ \
+ poly64x1_t __a1 = (a1); poly64x2_t __c1 = (c1); \
+ poly64_t __c2 = vgetq_lane_p64(__c1, __d1); \
+ vset_lane_p64(__c2, __a1, __b1); })
+#define vcopy_laneq_u8(a1, __b1, c1, __d1) __extension__ ({ \
+ uint8x8_t __a1 = (a1); uint8x16_t __c1 = (c1); \
+ uint8_t __c2 = vgetq_lane_u8(__c1, __d1); \
+ vset_lane_u8(__c2, __a1, __b1); })
+#define vcopy_laneq_u16(a1, __b1, c1, __d1) __extension__ ({ \
+ uint16x4_t __a1 = (a1); uint16x8_t __c1 = (c1); \
+ uint16_t __c2 = vgetq_lane_u16(__c1, __d1); \
+ vset_lane_u16(__c2, __a1, __b1); })
+#define vcopy_laneq_u32(a1, __b1, c1, __d1) __extension__ ({ \
+ uint32x2_t __a1 = (a1); uint32x4_t __c1 = (c1); \
+ uint32_t __c2 = vgetq_lane_u32(__c1, __d1); \
+ vset_lane_u32(__c2, __a1, __b1); })
+#define vcopy_laneq_u64(a1, __b1, c1, __d1) __extension__ ({ \
+ uint64x1_t __a1 = (a1); uint64x2_t __c1 = (c1); \
+ uint64_t __c2 = vgetq_lane_u64(__c1, __d1); \
+ vset_lane_u64(__c2, __a1, __b1); })
+#define vcopy_laneq_f32(a1, __b1, c1, __d1) __extension__ ({ \
+ float32x2_t __a1 = (a1); float32x4_t __c1 = (c1); \
+ float32_t __c2 = vgetq_lane_f32(__c1, __d1); \
+ vset_lane_f32(__c2, __a1, __b1); })
+#define vcopy_laneq_f64(a1, __b1, c1, __d1) __extension__ ({ \
+ float64x1_t __a1 = (a1); float64x2_t __c1 = (c1); \
+ float64_t __c2 = vgetq_lane_f64(__c1, __d1); \
+ vset_lane_f64(__c2, __a1, __b1); })
+
+__ai float64x1_t vcreate_f64(uint64_t __a) {
+ return (float64x1_t)__a; }
+__ai poly64x1_t vcreate_p64(uint64_t __a) {
+ return (poly64x1_t)__a; }
+
+#define vcvt_n_f64_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (float64x1_t)__builtin_neon_vcvt_n_f64_v((int8x8_t)__a, __b, 3); })
+#define vcvt_n_f64_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (float64x1_t)__builtin_neon_vcvt_n_f64_v((int8x8_t)__a, __b, 19); })
+#define vcvtq_n_f64_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (float64x2_t)__builtin_neon_vcvtq_n_f64_v((int8x16_t)__a, __b, 35); })
+#define vcvtq_n_f64_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (float64x2_t)__builtin_neon_vcvtq_n_f64_v((int8x16_t)__a, __b, 51); })
+
+__ai float64x1_t vdup_n_f64(float64_t __a) {
+ return (float64x1_t){ __a }; }
+__ai float64x2_t vdupq_n_f64(float64_t __a) {
+ return (float64x2_t){ __a, __a }; }
+__ai poly64x1_t vdup_n_p64(poly64_t __a) {
+ return (poly64x1_t){ __a }; }
+__ai poly64x2_t vdupq_n_p64(poly64_t __a) {
+ return (poly64x2_t){ __a, __a }; }
+
+__ai uint64x1_t vcage_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vcage_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vcageq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vcageq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai uint64x1_t vcagt_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vcagt_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vcagtq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vcagtq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai uint64x1_t vcale_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vcale_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vcaleq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vcaleq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai uint64x1_t vcalt_f64(float64x1_t __a, float64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vcalt_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint64x2_t vcaltq_f64(float64x2_t __a, float64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vcaltq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai int32x2_t vcvta_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcvta_s32_v((int8x8_t)__a, 2); }
+__ai int32x4_t vcvtaq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vcvtaq_s32_v((int8x16_t)__a, 34); }
+
+__ai int64x1_t vcvta_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vcvta_s64_v((int8x8_t)__a, 3); }
+__ai int64x2_t vcvtaq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vcvtaq_s64_v((int8x16_t)__a, 35); }
+
+__ai uint32x2_t vcvta_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcvta_u32_v((int8x8_t)__a, 18); }
+__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcvtaq_u32_v((int8x16_t)__a, 50); }
+
+__ai uint64x1_t vcvta_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcvta_u64_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcvtaq_u64_v((int8x16_t)__a, 51); }
+
+__ai int32x2_t vcvtm_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcvtm_s32_v((int8x8_t)__a, 2); }
+__ai int32x4_t vcvtmq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vcvtmq_s32_v((int8x16_t)__a, 34); }
+
+__ai int64x1_t vcvtm_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vcvtm_s64_v((int8x8_t)__a, 3); }
+__ai int64x2_t vcvtmq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vcvtmq_s64_v((int8x16_t)__a, 35); }
+
+__ai uint32x2_t vcvtm_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcvtm_u32_v((int8x8_t)__a, 18); }
+__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcvtmq_u32_v((int8x16_t)__a, 50); }
+
+__ai uint64x1_t vcvtm_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcvtm_u64_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcvtmq_u64_v((int8x16_t)__a, 51); }
+
+__ai int32x2_t vcvtn_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcvtn_s32_v((int8x8_t)__a, 2); }
+__ai int32x4_t vcvtnq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vcvtnq_s32_v((int8x16_t)__a, 34); }
+
+__ai int64x1_t vcvtn_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vcvtn_s64_v((int8x8_t)__a, 3); }
+__ai int64x2_t vcvtnq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vcvtnq_s64_v((int8x16_t)__a, 35); }
+
+__ai uint32x2_t vcvtn_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcvtn_u32_v((int8x8_t)__a, 18); }
+__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcvtnq_u32_v((int8x16_t)__a, 50); }
+
+__ai uint64x1_t vcvtn_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcvtn_u64_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcvtnq_u64_v((int8x16_t)__a, 51); }
+
+__ai int32x2_t vcvtp_s32_f32(float32x2_t __a) {
+ return (int32x2_t)__builtin_neon_vcvtp_s32_v((int8x8_t)__a, 2); }
+__ai int32x4_t vcvtpq_s32_f32(float32x4_t __a) {
+ return (int32x4_t)__builtin_neon_vcvtpq_s32_v((int8x16_t)__a, 34); }
+
+__ai int64x1_t vcvtp_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vcvtp_s64_v((int8x8_t)__a, 3); }
+__ai int64x2_t vcvtpq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vcvtpq_s64_v((int8x16_t)__a, 35); }
+
+__ai uint32x2_t vcvtp_u32_f32(float32x2_t __a) {
+ return (uint32x2_t)__builtin_neon_vcvtp_u32_v((int8x8_t)__a, 18); }
+__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __a) {
+ return (uint32x4_t)__builtin_neon_vcvtpq_u32_v((int8x16_t)__a, 50); }
+
+__ai uint64x1_t vcvtp_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcvtp_u64_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcvtpq_u64_v((int8x16_t)__a, 51); }
+
+#define vcvt_n_s64_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ (int64x1_t)__builtin_neon_vcvt_n_s64_v((int8x8_t)__a, __b, 3); })
+#define vcvtq_n_s64_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ (int64x2_t)__builtin_neon_vcvtq_n_s64_v((int8x16_t)__a, __b, 35); })
+
+#define vcvt_n_u64_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ (uint64x1_t)__builtin_neon_vcvt_n_u64_v((int8x8_t)__a, __b, 19); })
+#define vcvtq_n_u64_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ (uint64x2_t)__builtin_neon_vcvtq_n_u64_v((int8x16_t)__a, __b, 51); })
+
+__ai float32x2_t vdiv_f32(float32x2_t __a, float32x2_t __b) {
+ return __a / __b; }
+__ai float64x1_t vdiv_f64(float64x1_t __a, float64x1_t __b) {
+ return __a / __b; }
+__ai float32x4_t vdivq_f32(float32x4_t __a, float32x4_t __b) {
+ return __a / __b; }
+__ai float64x2_t vdivq_f64(float64x2_t __a, float64x2_t __b) {
+ return __a / __b; }
+
+__ai float32x2_t vmaxnm_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float64x1_t vmaxnm_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float32x4_t vmaxnmq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vmaxnmq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32x2_t vpmaxnm_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vpmaxnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float32x4_t vpmaxnmq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vpmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vpmaxnmq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vpmaxnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32_t vmaxnmv_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vmaxnmv_f32(__a); }
+__ai float32_t vmaxnmvq_f32(float32x4_t __a) {
+ return (float32_t)__builtin_neon_vmaxnmvq_f32(__a); }
+__ai float64_t vmaxnmvq_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vmaxnmvq_f64(__a); }
+
+__ai float32x2_t vminnm_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vminnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float64x1_t vminnm_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vminnm_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float32x4_t vminnmq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vminnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vminnmq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vminnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32x2_t vpminnm_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vpminnm_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float32x4_t vpminnmq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vpminnmq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vpminnmq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vpminnmq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32_t vminnmv_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vminnmv_f32(__a); }
+__ai float32_t vminnmvq_f32(float32x4_t __a) {
+ return (float32_t)__builtin_neon_vminnmvq_f32(__a); }
+__ai float64_t vminnmvq_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vminnmvq_f64(__a); }
+
+__ai float64x1_t vfma_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
+ return (float64x1_t)__builtin_neon_vfma_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
+__ai float64x2_t vfmaq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
+ return (float64x2_t)__builtin_neon_vfmaq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
+
+__ai float32x2_t vfma_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
+ return vfma_f32(__a, __b, (float32x2_t){ __c, __c }); }
+__ai float32x4_t vfmaq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
+ return vfmaq_f32(__a, __b, (float32x4_t){ __c, __c, __c, __c }); }
+
+__ai float32x2_t vfms_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c) {
+ return (float32x2_t)__builtin_neon_vfms_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 8); }
+__ai float64x1_t vfms_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
+ return (float64x1_t)__builtin_neon_vfms_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, 9); }
+__ai float32x4_t vfmsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c) {
+ return (float32x4_t)__builtin_neon_vfmsq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 40); }
+__ai float64x2_t vfmsq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
+ return (float64x2_t)__builtin_neon_vfmsq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 41); }
+
+__ai float32x2_t vfms_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c) {
+ return vfms_f32(__a, __b, (float32x2_t){ __c, __c }); }
+__ai float32x4_t vfmsq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c) {
+ return vfmsq_f32(__a, __b, (float32x4_t){ __c, __c, __c, __c }); }
+
+__ai float64x1_t vrecpe_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrecpe_v((int8x8_t)__a, 9); }
+__ai float64x2_t vrecpeq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrecpeq_v((int8x16_t)__a, 41); }
+
+__ai float64x1_t vrecps_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vrecps_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float64x2_t vrecpsq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vrecpsq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32x2_t vrnda_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrnda_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrnda_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrnda_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndaq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndaq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndaq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndaq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrndi_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrndi_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrndi_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrndi_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndiq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndiq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndiq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndiq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrndm_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrndm_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrndm_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrndm_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndmq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndmq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndmq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndmq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrndn_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrndn_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrndn_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrndn_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndnq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndnq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndnq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndnq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrndp_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrndp_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrndp_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrndp_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndpq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndpq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndpq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndpq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrndx_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrndx_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrndx_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrndx_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndxq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndxq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndxq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndxq_v((int8x16_t)__a, 41); }
+
+__ai float32x2_t vrnd_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vrnd_v((int8x8_t)__a, 8); }
+__ai float64x1_t vrnd_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrnd_v((int8x8_t)__a, 9); }
+__ai float32x4_t vrndq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vrndq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vrndq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrndq_v((int8x16_t)__a, 41); }
+
+__ai float64x1_t vrsqrte_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vrsqrte_v((int8x8_t)__a, 9); }
+__ai float64x2_t vrsqrteq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vrsqrteq_v((int8x16_t)__a, 41); }
+
+__ai float64x1_t vrsqrts_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vrsqrts_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float64x2_t vrsqrtsq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vrsqrtsq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float32x2_t vsqrt_f32(float32x2_t __a) {
+ return (float32x2_t)__builtin_neon_vsqrt_v((int8x8_t)__a, 8); }
+__ai float64x1_t vsqrt_f64(float64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vsqrt_v((int8x8_t)__a, 9); }
+__ai float32x4_t vsqrtq_f32(float32x4_t __a) {
+ return (float32x4_t)__builtin_neon_vsqrtq_v((int8x16_t)__a, 40); }
+__ai float64x2_t vsqrtq_f64(float64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vsqrtq_v((int8x16_t)__a, 41); }
+
+#define vget_lane_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ (float64_t)__builtin_neon_vget_lane_f64(__a, __b); })
+#define vgetq_lane_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ (float64_t)__builtin_neon_vgetq_lane_f64(__a, __b); })
+#define vget_lane_p64(a, __b) __extension__ ({ \
+ poly64x1_t __a = (a); \
+ (poly64_t)__builtin_neon_vget_lane_i64((int64x1_t)__a, __b); })
+#define vgetq_lane_p64(a, __b) __extension__ ({ \
+ poly64x2_t __a = (a); \
+ (poly64_t)__builtin_neon_vgetq_lane_i64((int64x2_t)__a, __b); })
+
+#define vld1q_f64(__a) __extension__ ({ \
+ (float64x2_t)__builtin_neon_vld1q_v(__a, 41); })
+#define vld1_f64(__a) __extension__ ({ \
+ (float64x1_t)__builtin_neon_vld1_v(__a, 9); })
+#define vld1_p64(__a) __extension__ ({ \
+ (poly64x1_t)__builtin_neon_vld1_v(__a, 6); })
+#define vld1q_p64(__a) __extension__ ({ \
+ (poly64x2_t)__builtin_neon_vld1q_v(__a, 38); })
+
+#define vld1q_dup_f64(__a) __extension__ ({ \
+ (float64x2_t)__builtin_neon_vld1q_dup_v(__a, 41); })
+#define vld1q_dup_p64(__a) __extension__ ({ \
+ (poly64x2_t)__builtin_neon_vld1q_dup_v(__a, 38); })
+#define vld1_dup_f64(__a) __extension__ ({ \
+ (float64x1_t)__builtin_neon_vld1_dup_v(__a, 9); })
+#define vld1_dup_p64(__a) __extension__ ({ \
+ (poly64x1_t)__builtin_neon_vld1_dup_v(__a, 6); })
+
+#define vld1q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2_t __b = (b); \
+ (float64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 41); })
+#define vld1q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2_t __b = (b); \
+ (poly64x2_t)__builtin_neon_vld1q_lane_v(__a, (int8x16_t)__b, __c, 38); })
+#define vld1_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1_t __b = (b); \
+ (float64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 9); })
+#define vld1_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1_t __b = (b); \
+ (poly64x1_t)__builtin_neon_vld1_lane_v(__a, (int8x8_t)__b, __c, 6); })
+
+#define vld1q_u8_x2(__a) __extension__ ({ \
+ uint8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 48); r; })
+#define vld1q_u16_x2(__a) __extension__ ({ \
+ uint16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 49); r; })
+#define vld1q_u32_x2(__a) __extension__ ({ \
+ uint32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 50); r; })
+#define vld1q_u64_x2(__a) __extension__ ({ \
+ uint64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 51); r; })
+#define vld1q_s8_x2(__a) __extension__ ({ \
+ int8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 32); r; })
+#define vld1q_s16_x2(__a) __extension__ ({ \
+ int16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 33); r; })
+#define vld1q_s32_x2(__a) __extension__ ({ \
+ int32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 34); r; })
+#define vld1q_s64_x2(__a) __extension__ ({ \
+ int64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 35); r; })
+#define vld1q_f16_x2(__a) __extension__ ({ \
+ float16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 39); r; })
+#define vld1q_f32_x2(__a) __extension__ ({ \
+ float32x4x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 40); r; })
+#define vld1q_f64_x2(__a) __extension__ ({ \
+ float64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 41); r; })
+#define vld1q_p8_x2(__a) __extension__ ({ \
+ poly8x16x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 36); r; })
+#define vld1q_p16_x2(__a) __extension__ ({ \
+ poly16x8x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 37); r; })
+#define vld1q_p64_x2(__a) __extension__ ({ \
+ poly64x2x2_t r; __builtin_neon_vld1q_x2_v(&r, __a, 38); r; })
+#define vld1_u8_x2(__a) __extension__ ({ \
+ uint8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 16); r; })
+#define vld1_u16_x2(__a) __extension__ ({ \
+ uint16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 17); r; })
+#define vld1_u32_x2(__a) __extension__ ({ \
+ uint32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 18); r; })
+#define vld1_u64_x2(__a) __extension__ ({ \
+ uint64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 19); r; })
+#define vld1_s8_x2(__a) __extension__ ({ \
+ int8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 0); r; })
+#define vld1_s16_x2(__a) __extension__ ({ \
+ int16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 1); r; })
+#define vld1_s32_x2(__a) __extension__ ({ \
+ int32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 2); r; })
+#define vld1_s64_x2(__a) __extension__ ({ \
+ int64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 3); r; })
+#define vld1_f16_x2(__a) __extension__ ({ \
+ float16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 7); r; })
+#define vld1_f32_x2(__a) __extension__ ({ \
+ float32x2x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 8); r; })
+#define vld1_f64_x2(__a) __extension__ ({ \
+ float64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 9); r; })
+#define vld1_p8_x2(__a) __extension__ ({ \
+ poly8x8x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 4); r; })
+#define vld1_p16_x2(__a) __extension__ ({ \
+ poly16x4x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 5); r; })
+#define vld1_p64_x2(__a) __extension__ ({ \
+ poly64x1x2_t r; __builtin_neon_vld1_x2_v(&r, __a, 6); r; })
+
+#define vld2q_u64(__a) __extension__ ({ \
+ uint64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 51); r; })
+#define vld2q_s64(__a) __extension__ ({ \
+ int64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 35); r; })
+#define vld2q_f64(__a) __extension__ ({ \
+ float64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 41); r; })
+#define vld2_f64(__a) __extension__ ({ \
+ float64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 9); r; })
+#define vld2_p64(__a) __extension__ ({ \
+ poly64x1x2_t r; __builtin_neon_vld2_v(&r, __a, 6); r; })
+#define vld2q_p64(__a) __extension__ ({ \
+ poly64x2x2_t r; __builtin_neon_vld2q_v(&r, __a, 38); r; })
+
+#define vld2q_dup_u8(__a) __extension__ ({ \
+ uint8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 48); r; })
+#define vld2q_dup_u16(__a) __extension__ ({ \
+ uint16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 49); r; })
+#define vld2q_dup_u32(__a) __extension__ ({ \
+ uint32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 50); r; })
+#define vld2q_dup_u64(__a) __extension__ ({ \
+ uint64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 51); r; })
+#define vld2q_dup_s8(__a) __extension__ ({ \
+ int8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 32); r; })
+#define vld2q_dup_s16(__a) __extension__ ({ \
+ int16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 33); r; })
+#define vld2q_dup_s32(__a) __extension__ ({ \
+ int32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 34); r; })
+#define vld2q_dup_s64(__a) __extension__ ({ \
+ int64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 35); r; })
+#define vld2q_dup_f16(__a) __extension__ ({ \
+ float16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 39); r; })
+#define vld2q_dup_f32(__a) __extension__ ({ \
+ float32x4x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 40); r; })
+#define vld2q_dup_f64(__a) __extension__ ({ \
+ float64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 41); r; })
+#define vld2q_dup_p8(__a) __extension__ ({ \
+ poly8x16x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 36); r; })
+#define vld2q_dup_p16(__a) __extension__ ({ \
+ poly16x8x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 37); r; })
+#define vld2q_dup_p64(__a) __extension__ ({ \
+ poly64x2x2_t r; __builtin_neon_vld2q_dup_v(&r, __a, 38); r; })
+#define vld2_dup_f64(__a) __extension__ ({ \
+ float64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 9); r; })
+#define vld2_dup_p64(__a) __extension__ ({ \
+ poly64x1x2_t r; __builtin_neon_vld2_dup_v(&r, __a, 6); r; })
+
+#define vld2q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x2_t __b = (b); \
+ uint8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 48); r; })
+#define vld2q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x2_t __b = (b); \
+ uint64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 51); r; })
+#define vld2q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x2_t __b = (b); \
+ int8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, __b.val[0], __b.val[1], __c, 32); r; })
+#define vld2q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x2_t __b = (b); \
+ int64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 35); r; })
+#define vld2q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x2_t __b = (b); \
+ float64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 41); r; })
+#define vld2q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x2_t __b = (b); \
+ poly8x16x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 36); r; })
+#define vld2q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x2_t __b = (b); \
+ poly64x2x2_t r; __builtin_neon_vld2q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 38); r; })
+#define vld2_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x2_t __b = (b); \
+ uint64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 19); r; })
+#define vld2_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x2_t __b = (b); \
+ int64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 3); r; })
+#define vld2_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x2_t __b = (b); \
+ float64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 9); r; })
+#define vld2_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x2_t __b = (b); \
+ poly64x1x2_t r; __builtin_neon_vld2_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 6); r; })
+
+#define vld3q_u64(__a) __extension__ ({ \
+ uint64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 51); r; })
+#define vld3q_s64(__a) __extension__ ({ \
+ int64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 35); r; })
+#define vld3q_f64(__a) __extension__ ({ \
+ float64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 41); r; })
+#define vld3_f64(__a) __extension__ ({ \
+ float64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 9); r; })
+#define vld3_p64(__a) __extension__ ({ \
+ poly64x1x3_t r; __builtin_neon_vld3_v(&r, __a, 6); r; })
+#define vld3q_p64(__a) __extension__ ({ \
+ poly64x2x3_t r; __builtin_neon_vld3q_v(&r, __a, 38); r; })
+
+#define vld3q_dup_u8(__a) __extension__ ({ \
+ uint8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 48); r; })
+#define vld3q_dup_u16(__a) __extension__ ({ \
+ uint16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 49); r; })
+#define vld3q_dup_u32(__a) __extension__ ({ \
+ uint32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 50); r; })
+#define vld3q_dup_u64(__a) __extension__ ({ \
+ uint64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 51); r; })
+#define vld3q_dup_s8(__a) __extension__ ({ \
+ int8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 32); r; })
+#define vld3q_dup_s16(__a) __extension__ ({ \
+ int16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 33); r; })
+#define vld3q_dup_s32(__a) __extension__ ({ \
+ int32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 34); r; })
+#define vld3q_dup_s64(__a) __extension__ ({ \
+ int64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 35); r; })
+#define vld3q_dup_f16(__a) __extension__ ({ \
+ float16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 39); r; })
+#define vld3q_dup_f32(__a) __extension__ ({ \
+ float32x4x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 40); r; })
+#define vld3q_dup_f64(__a) __extension__ ({ \
+ float64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 41); r; })
+#define vld3q_dup_p8(__a) __extension__ ({ \
+ poly8x16x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 36); r; })
+#define vld3q_dup_p16(__a) __extension__ ({ \
+ poly16x8x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 37); r; })
+#define vld3q_dup_p64(__a) __extension__ ({ \
+ poly64x2x3_t r; __builtin_neon_vld3q_dup_v(&r, __a, 38); r; })
+#define vld3_dup_f64(__a) __extension__ ({ \
+ float64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 9); r; })
+#define vld3_dup_p64(__a) __extension__ ({ \
+ poly64x1x3_t r; __builtin_neon_vld3_dup_v(&r, __a, 6); r; })
+
+#define vld3q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x3_t __b = (b); \
+ uint8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 48); r; })
+#define vld3q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x3_t __b = (b); \
+ uint64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 51); r; })
+#define vld3q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x3_t __b = (b); \
+ int8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __c, 32); r; })
+#define vld3q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x3_t __b = (b); \
+ int64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 35); r; })
+#define vld3q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x3_t __b = (b); \
+ float64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 41); r; })
+#define vld3q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x3_t __b = (b); \
+ poly8x16x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 36); r; })
+#define vld3q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x3_t __b = (b); \
+ poly64x2x3_t r; __builtin_neon_vld3q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 38); r; })
+#define vld3_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x3_t __b = (b); \
+ uint64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 19); r; })
+#define vld3_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x3_t __b = (b); \
+ int64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 3); r; })
+#define vld3_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x3_t __b = (b); \
+ float64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 9); r; })
+#define vld3_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x3_t __b = (b); \
+ poly64x1x3_t r; __builtin_neon_vld3_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 6); r; })
+
+#define vld1q_u8_x3(__a) __extension__ ({ \
+ uint8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 48); r; })
+#define vld1q_u16_x3(__a) __extension__ ({ \
+ uint16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 49); r; })
+#define vld1q_u32_x3(__a) __extension__ ({ \
+ uint32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 50); r; })
+#define vld1q_u64_x3(__a) __extension__ ({ \
+ uint64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 51); r; })
+#define vld1q_s8_x3(__a) __extension__ ({ \
+ int8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 32); r; })
+#define vld1q_s16_x3(__a) __extension__ ({ \
+ int16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 33); r; })
+#define vld1q_s32_x3(__a) __extension__ ({ \
+ int32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 34); r; })
+#define vld1q_s64_x3(__a) __extension__ ({ \
+ int64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 35); r; })
+#define vld1q_f16_x3(__a) __extension__ ({ \
+ float16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 39); r; })
+#define vld1q_f32_x3(__a) __extension__ ({ \
+ float32x4x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 40); r; })
+#define vld1q_f64_x3(__a) __extension__ ({ \
+ float64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 41); r; })
+#define vld1q_p8_x3(__a) __extension__ ({ \
+ poly8x16x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 36); r; })
+#define vld1q_p16_x3(__a) __extension__ ({ \
+ poly16x8x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 37); r; })
+#define vld1q_p64_x3(__a) __extension__ ({ \
+ poly64x2x3_t r; __builtin_neon_vld1q_x3_v(&r, __a, 38); r; })
+#define vld1_u8_x3(__a) __extension__ ({ \
+ uint8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 16); r; })
+#define vld1_u16_x3(__a) __extension__ ({ \
+ uint16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 17); r; })
+#define vld1_u32_x3(__a) __extension__ ({ \
+ uint32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 18); r; })
+#define vld1_u64_x3(__a) __extension__ ({ \
+ uint64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 19); r; })
+#define vld1_s8_x3(__a) __extension__ ({ \
+ int8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 0); r; })
+#define vld1_s16_x3(__a) __extension__ ({ \
+ int16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 1); r; })
+#define vld1_s32_x3(__a) __extension__ ({ \
+ int32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 2); r; })
+#define vld1_s64_x3(__a) __extension__ ({ \
+ int64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 3); r; })
+#define vld1_f16_x3(__a) __extension__ ({ \
+ float16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 7); r; })
+#define vld1_f32_x3(__a) __extension__ ({ \
+ float32x2x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 8); r; })
+#define vld1_f64_x3(__a) __extension__ ({ \
+ float64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 9); r; })
+#define vld1_p8_x3(__a) __extension__ ({ \
+ poly8x8x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 4); r; })
+#define vld1_p16_x3(__a) __extension__ ({ \
+ poly16x4x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 5); r; })
+#define vld1_p64_x3(__a) __extension__ ({ \
+ poly64x1x3_t r; __builtin_neon_vld1_x3_v(&r, __a, 6); r; })
+
+#define vld4q_u64(__a) __extension__ ({ \
+ uint64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 51); r; })
+#define vld4q_s64(__a) __extension__ ({ \
+ int64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 35); r; })
+#define vld4q_f64(__a) __extension__ ({ \
+ float64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 41); r; })
+#define vld4_f64(__a) __extension__ ({ \
+ float64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 9); r; })
+#define vld4_p64(__a) __extension__ ({ \
+ poly64x1x4_t r; __builtin_neon_vld4_v(&r, __a, 6); r; })
+#define vld4q_p64(__a) __extension__ ({ \
+ poly64x2x4_t r; __builtin_neon_vld4q_v(&r, __a, 38); r; })
+
+#define vld4q_dup_u8(__a) __extension__ ({ \
+ uint8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 48); r; })
+#define vld4q_dup_u16(__a) __extension__ ({ \
+ uint16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 49); r; })
+#define vld4q_dup_u32(__a) __extension__ ({ \
+ uint32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 50); r; })
+#define vld4q_dup_u64(__a) __extension__ ({ \
+ uint64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 51); r; })
+#define vld4q_dup_s8(__a) __extension__ ({ \
+ int8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 32); r; })
+#define vld4q_dup_s16(__a) __extension__ ({ \
+ int16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 33); r; })
+#define vld4q_dup_s32(__a) __extension__ ({ \
+ int32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 34); r; })
+#define vld4q_dup_s64(__a) __extension__ ({ \
+ int64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 35); r; })
+#define vld4q_dup_f16(__a) __extension__ ({ \
+ float16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 39); r; })
+#define vld4q_dup_f32(__a) __extension__ ({ \
+ float32x4x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 40); r; })
+#define vld4q_dup_f64(__a) __extension__ ({ \
+ float64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 41); r; })
+#define vld4q_dup_p8(__a) __extension__ ({ \
+ poly8x16x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 36); r; })
+#define vld4q_dup_p16(__a) __extension__ ({ \
+ poly16x8x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 37); r; })
+#define vld4q_dup_p64(__a) __extension__ ({ \
+ poly64x2x4_t r; __builtin_neon_vld4q_dup_v(&r, __a, 38); r; })
+#define vld4_dup_f64(__a) __extension__ ({ \
+ float64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 9); r; })
+#define vld4_dup_p64(__a) __extension__ ({ \
+ poly64x1x4_t r; __builtin_neon_vld4_dup_v(&r, __a, 6); r; })
+
+#define vld4q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x4_t __b = (b); \
+ uint8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 48); r; })
+#define vld4q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x4_t __b = (b); \
+ uint64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 51); r; })
+#define vld4q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x4_t __b = (b); \
+ int8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); r; })
+#define vld4q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x4_t __b = (b); \
+ int64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 35); r; })
+#define vld4q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x4_t __b = (b); \
+ float64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 41); r; })
+#define vld4q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x4_t __b = (b); \
+ poly8x16x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 36); r; })
+#define vld4q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x4_t __b = (b); \
+ poly64x2x4_t r; __builtin_neon_vld4q_lane_v(&r, __a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 38); r; })
+#define vld4_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x4_t __b = (b); \
+ uint64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 19); r; })
+#define vld4_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x4_t __b = (b); \
+ int64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 3); r; })
+#define vld4_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x4_t __b = (b); \
+ float64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 9); r; })
+#define vld4_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x4_t __b = (b); \
+ poly64x1x4_t r; __builtin_neon_vld4_lane_v(&r, __a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 6); r; })
+
+#define vld1q_u8_x4(__a) __extension__ ({ \
+ uint8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 48); r; })
+#define vld1q_u16_x4(__a) __extension__ ({ \
+ uint16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 49); r; })
+#define vld1q_u32_x4(__a) __extension__ ({ \
+ uint32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 50); r; })
+#define vld1q_u64_x4(__a) __extension__ ({ \
+ uint64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 51); r; })
+#define vld1q_s8_x4(__a) __extension__ ({ \
+ int8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 32); r; })
+#define vld1q_s16_x4(__a) __extension__ ({ \
+ int16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 33); r; })
+#define vld1q_s32_x4(__a) __extension__ ({ \
+ int32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 34); r; })
+#define vld1q_s64_x4(__a) __extension__ ({ \
+ int64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 35); r; })
+#define vld1q_f16_x4(__a) __extension__ ({ \
+ float16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 39); r; })
+#define vld1q_f32_x4(__a) __extension__ ({ \
+ float32x4x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 40); r; })
+#define vld1q_f64_x4(__a) __extension__ ({ \
+ float64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 41); r; })
+#define vld1q_p8_x4(__a) __extension__ ({ \
+ poly8x16x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 36); r; })
+#define vld1q_p16_x4(__a) __extension__ ({ \
+ poly16x8x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 37); r; })
+#define vld1q_p64_x4(__a) __extension__ ({ \
+ poly64x2x4_t r; __builtin_neon_vld1q_x4_v(&r, __a, 38); r; })
+#define vld1_u8_x4(__a) __extension__ ({ \
+ uint8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 16); r; })
+#define vld1_u16_x4(__a) __extension__ ({ \
+ uint16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 17); r; })
+#define vld1_u32_x4(__a) __extension__ ({ \
+ uint32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 18); r; })
+#define vld1_u64_x4(__a) __extension__ ({ \
+ uint64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 19); r; })
+#define vld1_s8_x4(__a) __extension__ ({ \
+ int8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 0); r; })
+#define vld1_s16_x4(__a) __extension__ ({ \
+ int16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 1); r; })
+#define vld1_s32_x4(__a) __extension__ ({ \
+ int32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 2); r; })
+#define vld1_s64_x4(__a) __extension__ ({ \
+ int64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 3); r; })
+#define vld1_f16_x4(__a) __extension__ ({ \
+ float16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 7); r; })
+#define vld1_f32_x4(__a) __extension__ ({ \
+ float32x2x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 8); r; })
+#define vld1_f64_x4(__a) __extension__ ({ \
+ float64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 9); r; })
+#define vld1_p8_x4(__a) __extension__ ({ \
+ poly8x8x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 4); r; })
+#define vld1_p16_x4(__a) __extension__ ({ \
+ poly16x4x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 5); r; })
+#define vld1_p64_x4(__a) __extension__ ({ \
+ poly64x1x4_t r; __builtin_neon_vld1_x4_v(&r, __a, 6); r; })
+
+__ai float64x1_t vmax_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vmax_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float64x2_t vmaxq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vmaxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai int8x16_t vpmaxq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vpmaxq_v(__a, __b, 32); }
+__ai int16x8_t vpmaxq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vpmaxq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vpmaxq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vpmaxq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vpmaxq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai float32x4_t vpmaxq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vpmaxq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vpmaxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float64x1_t vmin_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vmin_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float64x2_t vminq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vminq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai int8x16_t vpminq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vpminq_v(__a, __b, 32); }
+__ai int16x8_t vpminq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vpminq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai uint8x16_t vpminq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vpminq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vpminq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai float32x4_t vpminq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vpminq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vpminq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai float64x1_t vmla_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
+ return __a + (__b * __c); }
+__ai float64x2_t vmlaq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
+ return __a + (__b * __c); }
+
+__ai float64x1_t vmls_f64(float64x1_t __a, float64x1_t __b, float64x1_t __c) {
+ return __a - (__b * __c); }
+__ai float64x2_t vmlsq_f64(float64x2_t __a, float64x2_t __b, float64x2_t __c) {
+ return __a - (__b * __c); }
+
+__ai float64x1_t vmov_n_f64(float64_t __a) {
+ return (float64x1_t){ __a }; }
+__ai float64x2_t vmovq_n_f64(float64_t __a) {
+ return (float64x2_t){ __a, __a }; }
+
+__ai float64x1_t vmul_f64(float64x1_t __a, float64x1_t __b) {
+ return __a * __b; }
+__ai float64x2_t vmulq_f64(float64x2_t __a, float64x2_t __b) {
+ return __a * __b; }
+
+__ai float32x2_t vmulx_f32(float32x2_t __a, float32x2_t __b) {
+ return (float32x2_t)__builtin_neon_vmulx_v((int8x8_t)__a, (int8x8_t)__b, 8); }
+__ai float64x1_t vmulx_f64(float64x1_t __a, float64x1_t __b) {
+ return (float64x1_t)__builtin_neon_vmulx_v((int8x8_t)__a, (int8x8_t)__b, 9); }
+__ai float32x4_t vmulxq_f32(float32x4_t __a, float32x4_t __b) {
+ return (float32x4_t)__builtin_neon_vmulxq_v((int8x16_t)__a, (int8x16_t)__b, 40); }
+__ai float64x2_t vmulxq_f64(float64x2_t __a, float64x2_t __b) {
+ return (float64x2_t)__builtin_neon_vmulxq_v((int8x16_t)__a, (int8x16_t)__b, 41); }
+
+__ai int64x1_t vneg_s64(int64x1_t __a) {
+ return -__a; }
+__ai float64x1_t vneg_f64(float64x1_t __a) {
+ return -__a; }
+__ai float64x2_t vnegq_f64(float64x2_t __a) {
+ return -__a; }
+__ai int64x2_t vnegq_s64(int64x2_t __a) {
+ return -__a; }
+
+__ai int64x1_t vqabs_s64(int64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vqabs_v((int8x8_t)__a, 3); }
+__ai int64x2_t vqabsq_s64(int64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vqabsq_v((int8x16_t)__a, 35); }
+
+__ai int64x1_t vqneg_s64(int64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vqneg_v((int8x8_t)__a, 3); }
+__ai int64x2_t vqnegq_s64(int64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vqnegq_v((int8x16_t)__a, 35); }
+
+#define vqrshrn_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vqrshrn_n_s16(__b, __c)); })
+#define vqrshrn_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vqrshrn_n_s32(__b, __c)); })
+#define vqrshrn_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vqrshrn_n_s64(__b, __c)); })
+#define vqrshrn_high_n_u16(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint8x16_t)vcombine_u16(__a, vqrshrn_n_u16(__b, __c)); })
+#define vqrshrn_high_n_u32(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint16x8_t)vcombine_u32(__a, vqrshrn_n_u32(__b, __c)); })
+#define vqrshrn_high_n_u64(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint32x4_t)vcombine_u64(__a, vqrshrn_n_u64(__b, __c)); })
+
+#define vqrshrun_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vqrshrun_n_s16(__b, __c)); })
+#define vqrshrun_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vqrshrun_n_s32(__b, __c)); })
+#define vqrshrun_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vqrshrun_n_s64(__b, __c)); })
+
+#define vqshrn_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vqshrn_n_s16(__b, __c)); })
+#define vqshrn_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vqshrn_n_s32(__b, __c)); })
+#define vqshrn_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vqshrn_n_s64(__b, __c)); })
+#define vqshrn_high_n_u16(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint8x16_t)vcombine_u16(__a, vqshrn_n_u16(__b, __c)); })
+#define vqshrn_high_n_u32(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint16x8_t)vcombine_u32(__a, vqshrn_n_u32(__b, __c)); })
+#define vqshrn_high_n_u64(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint32x4_t)vcombine_u64(__a, vqshrn_n_u64(__b, __c)); })
+
+#define vqshrun_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vqshrun_n_s16(__b, __c)); })
+#define vqshrun_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vqshrun_n_s32(__b, __c)); })
+#define vqshrun_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vqshrun_n_s64(__b, __c)); })
+
+__ai int8x16_t vqmovn_high_s16(int8x8_t __a, int16x8_t __b) {
+ int8x8_t __a1 = vqmovn_s16(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai int16x8_t vqmovn_high_s32(int16x4_t __a, int32x4_t __b) {
+ int16x4_t __a1 = vqmovn_s32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai int32x4_t vqmovn_high_s64(int32x2_t __a, int64x2_t __b) {
+ int32x2_t __a1 = vqmovn_s64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+__ai uint8x16_t vqmovn_high_u16(uint8x8_t __a, uint16x8_t __b) {
+ uint8x8_t __a1 = vqmovn_u16(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai uint16x8_t vqmovn_high_u32(uint16x4_t __a, uint32x4_t __b) {
+ uint16x4_t __a1 = vqmovn_u32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai uint32x4_t vqmovn_high_u64(uint32x2_t __a, uint64x2_t __b) {
+ uint32x2_t __a1 = vqmovn_u64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+
+__ai int8x8_t vrbit_s8(int8x8_t __a) {
+ return (int8x8_t)__builtin_neon_vrbit_v(__a, 0); }
+__ai uint8x8_t vrbit_u8(uint8x8_t __a) {
+ return (uint8x8_t)__builtin_neon_vrbit_v((int8x8_t)__a, 16); }
+__ai poly8x8_t vrbit_p8(poly8x8_t __a) {
+ return (poly8x8_t)__builtin_neon_vrbit_v((int8x8_t)__a, 4); }
+__ai int8x16_t vrbitq_s8(int8x16_t __a) {
+ return (int8x16_t)__builtin_neon_vrbitq_v(__a, 32); }
+__ai uint8x16_t vrbitq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vrbitq_v((int8x16_t)__a, 48); }
+__ai poly8x16_t vrbitq_p8(poly8x16_t __a) {
+ return (poly8x16_t)__builtin_neon_vrbitq_v((int8x16_t)__a, 36); }
+
+__ai int8x8_t vreinterpret_s8_f64(float64x1_t __a) {
+ return (int8x8_t)__a; }
+__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __a) {
+ return (int8x8_t)__a; }
+__ai int16x4_t vreinterpret_s16_f64(float64x1_t __a) {
+ return (int16x4_t)__a; }
+__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __a) {
+ return (int16x4_t)__a; }
+__ai int32x2_t vreinterpret_s32_f64(float64x1_t __a) {
+ return (int32x2_t)__a; }
+__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __a) {
+ return (int32x2_t)__a; }
+__ai int64x1_t vreinterpret_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__a; }
+__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __a) {
+ return (int64x1_t)__a; }
+__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __a) {
+ return (uint8x8_t)__a; }
+__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __a) {
+ return (uint16x4_t)__a; }
+__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __a) {
+ return (uint32x2_t)__a; }
+__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__a; }
+__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __a) {
+ return (uint64x1_t)__a; }
+__ai float16x4_t vreinterpret_f16_f64(float64x1_t __a) {
+ return (float16x4_t)__a; }
+__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __a) {
+ return (float16x4_t)__a; }
+__ai float32x2_t vreinterpret_f32_f64(float64x1_t __a) {
+ return (float32x2_t)__a; }
+__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __a) {
+ return (float32x2_t)__a; }
+__ai float64x1_t vreinterpret_f64_s8(int8x8_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_s16(int16x4_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_s32(int32x2_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_s64(int64x1_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_f16(float16x4_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_f32(float32x2_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __a) {
+ return (float64x1_t)__a; }
+__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __a) {
+ return (float64x1_t)__a; }
+__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __a) {
+ return (poly8x8_t)__a; }
+__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __a) {
+ return (poly16x4_t)__a; }
+__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __a) {
+ return (poly64x1_t)__a; }
+__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __a) {
+ return (poly64x1_t)__a; }
+__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __a) {
+ return (int8x16_t)__a; }
+__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __a) {
+ return (int8x16_t)__a; }
+__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __a) {
+ return (int16x8_t)__a; }
+__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __a) {
+ return (int16x8_t)__a; }
+__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __a) {
+ return (int32x4_t)__a; }
+__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __a) {
+ return (int32x4_t)__a; }
+__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__a; }
+__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __a) {
+ return (int64x2_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __a) {
+ return (uint8x16_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __a) {
+ return (uint16x8_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __a) {
+ return (uint32x4_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__a; }
+__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __a) {
+ return (uint64x2_t)__a; }
+__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __a) {
+ return (float16x8_t)__a; }
+__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __a) {
+ return (float16x8_t)__a; }
+__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __a) {
+ return (float32x4_t)__a; }
+__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __a) {
+ return (float32x4_t)__a; }
+__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __a) {
+ return (float64x2_t)__a; }
+__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __a) {
+ return (float64x2_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __a) {
+ return (poly8x16_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __a) {
+ return (poly16x8_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __a) {
+ return (poly64x2_t)__a; }
+__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __a) {
+ return (poly64x2_t)__a; }
+
+#define vrshrn_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vrshrn_n_s16(__b, __c)); })
+#define vrshrn_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vrshrn_n_s32(__b, __c)); })
+#define vrshrn_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vrshrn_n_s64(__b, __c)); })
+#define vrshrn_high_n_u16(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint8x16_t)vcombine_u16(__a, vrshrn_n_u16(__b, __c)); })
+#define vrshrn_high_n_u32(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint16x8_t)vcombine_u32(__a, vrshrn_n_u32(__b, __c)); })
+#define vrshrn_high_n_u64(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint32x4_t)vcombine_u64(__a, vrshrn_n_u64(__b, __c)); })
+
+__ai float32_t vabds_f32(float32_t __a, float32_t __b) {
+ return (float32_t)__builtin_neon_vabds_f32(__a, __b); }
+__ai float64_t vabdd_f64(float64_t __a, float64_t __b) {
+ return (float64_t)__builtin_neon_vabdd_f64(__a, __b); }
+
+__ai int64_t vabsd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vabsd_s64(__a); }
+
+__ai int64_t vaddd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vaddd_s64(__a, __b); }
+__ai uint64_t vaddd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vaddd_u64(__a, __b); }
+
+__ai float32_t vpadds_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vpadds_f32(__a); }
+__ai int64_t vpaddd_s64(int64x2_t __a) {
+ return (int64_t)__builtin_neon_vpaddd_s64(__a); }
+__ai float64_t vpaddd_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vpaddd_f64(__a); }
+__ai uint64_t vpaddd_u64(uint64x2_t __a) {
+ return (uint64_t)__builtin_neon_vpaddd_u64((int64x2_t)__a); }
+
+__ai int64_t vceqd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vceqd_s64(__a, __b); }
+__ai uint64_t vceqd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vceqd_u64(__a, __b); }
+
+__ai int64_t vceqzd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vceqzd_s64(__a); }
+__ai uint64_t vceqzd_u64(uint64_t __a) {
+ return (uint64_t)__builtin_neon_vceqzd_u64(__a); }
+
+__ai int64_t vcged_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vcged_s64(__a, __b); }
+
+__ai int64_t vcgezd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vcgezd_s64(__a); }
+
+__ai int64_t vcgtd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vcgtd_s64(__a, __b); }
+
+__ai int64_t vcgtzd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vcgtzd_s64(__a); }
+
+__ai uint64_t vcgtd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vcgtd_u64(__a, __b); }
+
+__ai uint64_t vcged_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vcged_u64(__a, __b); }
+
+__ai int64_t vcled_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vcled_s64(__a, __b); }
+__ai uint64_t vcled_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vcled_u64(__a, __b); }
+
+__ai int64_t vclezd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vclezd_s64(__a); }
+
+__ai int64_t vcltd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vcltd_s64(__a, __b); }
+__ai uint64_t vcltd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vcltd_u64(__a, __b); }
+
+__ai int64_t vcltzd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vcltzd_s64(__a); }
+
+__ai int64_t vtstd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vtstd_s64(__a, __b); }
+__ai uint64_t vtstd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vtstd_u64(__a, __b); }
+
+__ai uint32_t vcages_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcages_f32(__a, __b); }
+__ai uint64_t vcaged_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcaged_f64(__a, __b); }
+
+__ai uint32_t vcagts_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcagts_f32(__a, __b); }
+__ai uint64_t vcagtd_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcagtd_f64(__a, __b); }
+
+__ai uint32_t vcales_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcales_f32(__a, __b); }
+__ai uint64_t vcaled_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcaled_f64(__a, __b); }
+
+__ai uint32_t vcalts_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcalts_f32(__a, __b); }
+__ai uint64_t vcaltd_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcaltd_f64(__a, __b); }
+
+__ai uint32_t vceqs_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vceqs_f32(__a, __b); }
+__ai uint64_t vceqd_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vceqd_f64(__a, __b); }
+
+__ai uint32_t vceqzs_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vceqzs_f32(__a); }
+__ai uint64_t vceqzd_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vceqzd_f64(__a); }
+
+__ai uint32_t vcges_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcges_f32(__a, __b); }
+__ai uint64_t vcged_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcged_f64(__a, __b); }
+
+__ai uint32_t vcgezs_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcgezs_f32(__a); }
+__ai uint64_t vcgezd_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcgezd_f64(__a); }
+
+__ai uint32_t vcgts_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcgts_f32(__a, __b); }
+__ai uint64_t vcgtd_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcgtd_f64(__a, __b); }
+
+__ai uint32_t vcgtzs_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcgtzs_f32(__a); }
+__ai uint64_t vcgtzd_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcgtzd_f64(__a); }
+
+__ai uint32_t vcles_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vcles_f32(__a, __b); }
+__ai uint64_t vcled_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcled_f64(__a, __b); }
+
+__ai uint32_t vclezs_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vclezs_f32(__a); }
+__ai uint64_t vclezd_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vclezd_f64(__a); }
+
+__ai uint32_t vclts_f32(float32_t __a, float32_t __b) {
+ return (uint32_t)__builtin_neon_vclts_f32(__a, __b); }
+__ai uint64_t vcltd_f64(float64_t __a, float64_t __b) {
+ return (uint64_t)__builtin_neon_vcltd_f64(__a, __b); }
+
+__ai uint32_t vcltzs_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcltzs_f32(__a); }
+__ai uint64_t vcltzd_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcltzd_f64(__a); }
+
+__ai int64_t vcvtad_s64_f64(float64_t __a) {
+ return (int64_t)__builtin_neon_vcvtad_s64_f64(__a); }
+
+__ai int32_t vcvtas_s32_f32(float32_t __a) {
+ return (int32_t)__builtin_neon_vcvtas_s32_f32(__a); }
+
+__ai uint64_t vcvtad_u64_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcvtad_u64_f64(__a); }
+
+__ai uint32_t vcvtas_u32_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcvtas_u32_f32(__a); }
+
+__ai int64_t vcvtmd_s64_f64(float64_t __a) {
+ return (int64_t)__builtin_neon_vcvtmd_s64_f64(__a); }
+
+__ai int32_t vcvtms_s32_f32(float32_t __a) {
+ return (int32_t)__builtin_neon_vcvtms_s32_f32(__a); }
+
+__ai uint64_t vcvtmd_u64_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcvtmd_u64_f64(__a); }
+
+__ai uint32_t vcvtms_u32_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcvtms_u32_f32(__a); }
+
+__ai int64_t vcvtnd_s64_f64(float64_t __a) {
+ return (int64_t)__builtin_neon_vcvtnd_s64_f64(__a); }
+
+__ai int32_t vcvtns_s32_f32(float32_t __a) {
+ return (int32_t)__builtin_neon_vcvtns_s32_f32(__a); }
+
+__ai uint64_t vcvtnd_u64_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcvtnd_u64_f64(__a); }
+
+__ai uint32_t vcvtns_u32_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcvtns_u32_f32(__a); }
+
+__ai int64_t vcvtpd_s64_f64(float64_t __a) {
+ return (int64_t)__builtin_neon_vcvtpd_s64_f64(__a); }
+
+__ai int32_t vcvtps_s32_f32(float32_t __a) {
+ return (int32_t)__builtin_neon_vcvtps_s32_f32(__a); }
+
+__ai uint64_t vcvtpd_u64_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcvtpd_u64_f64(__a); }
+
+__ai uint32_t vcvtps_u32_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcvtps_u32_f32(__a); }
+
+__ai float32_t vcvtxd_f32_f64(float64_t __a) {
+ return (float32_t)__builtin_neon_vcvtxd_f32_f64(__a); }
+
+__ai int64_t vcvtd_s64_f64(float64_t __a) {
+ return (int64_t)__builtin_neon_vcvtd_s64_f64(__a); }
+
+__ai int32_t vcvts_s32_f32(float32_t __a) {
+ return (int32_t)__builtin_neon_vcvts_s32_f32(__a); }
+
+#define vcvts_n_s32_f32(a, __b) __extension__ ({ \
+ float32_t __a = (a); \
+ (int32_t)__builtin_neon_vcvts_n_s32_f32(__a, __b); })
+
+#define vcvtd_n_s64_f64(a, __b) __extension__ ({ \
+ float64_t __a = (a); \
+ (int64_t)__builtin_neon_vcvtd_n_s64_f64(__a, __b); })
+
+__ai uint64_t vcvtd_u64_f64(float64_t __a) {
+ return (uint64_t)__builtin_neon_vcvtd_u64_f64(__a); }
+
+__ai uint32_t vcvts_u32_f32(float32_t __a) {
+ return (uint32_t)__builtin_neon_vcvts_u32_f32(__a); }
+
+#define vcvts_n_u32_f32(a, __b) __extension__ ({ \
+ float32_t __a = (a); \
+ (uint32_t)__builtin_neon_vcvts_n_u32_f32(__a, __b); })
+
+#define vcvtd_n_u64_f64(a, __b) __extension__ ({ \
+ float64_t __a = (a); \
+ (uint64_t)__builtin_neon_vcvtd_n_u64_f64(__a, __b); })
+
+__ai float32_t vpmaxnms_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vpmaxnms_f32(__a); }
+__ai float64_t vpmaxnmqd_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vpmaxnmqd_f64(__a); }
+
+__ai float32_t vpmaxs_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vpmaxs_f32(__a); }
+__ai float64_t vpmaxqd_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vpmaxqd_f64(__a); }
+
+__ai float32_t vpminnms_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vpminnms_f32(__a); }
+__ai float64_t vpminnmqd_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vpminnmqd_f64(__a); }
+
+__ai float32_t vpmins_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vpmins_f32(__a); }
+__ai float64_t vpminqd_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vpminqd_f64(__a); }
+
+#define vfmas_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32_t __a = (a); float32_t __b = (b); float32x2_t __c = (c); \
+ (float32_t)__builtin_neon_vfmas_lane_f32(__a, __b, __c, __d); })
+#define vfmad_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64_t __a = (a); float64_t __b = (b); float64x1_t __c = (c); \
+ (float64_t)__builtin_neon_vfmad_lane_f64(__a, __b, __c, __d); })
+
+#define vfmas_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32_t __a = (a); float32_t __b = (b); float32x4_t __c = (c); \
+ (float32_t)__builtin_neon_vfmas_laneq_f32(__a, __b, __c, __d); })
+#define vfmad_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64_t __a = (a); float64_t __b = (b); float64x2_t __c = (c); \
+ (float64_t)__builtin_neon_vfmad_laneq_f64(__a, __b, __c, __d); })
+
+#define vfmss_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32_t __a = (a); float32_t __b = (b); float32x2_t __c = (c); \
+ float32_t __a1 = __a; \
+ float32_t __b1 = __b; \
+ float32x2_t __c1 = __c; \
+ vfmas_lane_f32(__a1, __b1, -__c1, __d); })
+#define vfmsd_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64_t __a = (a); float64_t __b = (b); float64x1_t __c = (c); \
+ float64_t __a1 = __a; \
+ float64_t __b1 = __b; \
+ float64x1_t __c1 = __c; \
+ vfmad_lane_f64(__a1, __b1, -__c1, __d); })
+
+#define vfmss_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32_t __a = (a); float32_t __b = (b); float32x4_t __c = (c); \
+ float32_t __a1 = __a; \
+ float32_t __b1 = __b; \
+ float32x4_t __c1 = __c; \
+ vfmas_laneq_f32(__a1, __b1, -__c1, __d); })
+#define vfmsd_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64_t __a = (a); float64_t __b = (b); float64x2_t __c = (c); \
+ float64_t __a1 = __a; \
+ float64_t __b1 = __b; \
+ float64x2_t __c1 = __c; \
+ vfmad_laneq_f64(__a1, __b1, -__c1, __d); })
+
+__ai float32_t vmulxs_f32(float32_t __a, float32_t __b) {
+ return (float32_t)__builtin_neon_vmulxs_f32(__a, __b); }
+__ai float64_t vmulxd_f64(float64_t __a, float64_t __b) {
+ return (float64_t)__builtin_neon_vmulxd_f64(__a, __b); }
+
+#define vmulxs_lane_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x2_t __b = (b); \
+ float32_t __d1 = vget_lane_f32(__b, __c);\
+ vmulxs_f32(__a, __d1); })
+#define vmulxd_lane_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x1_t __b = (b); \
+ float64_t __d1 = vget_lane_f64(__b, __c);\
+ vmulxd_f64(__a, __d1); })
+
+#define vmulxs_laneq_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x4_t __b = (b); \
+ float32_t __d1 = vgetq_lane_f32(__b, __c);\
+ vmulxs_f32(__a, __d1); })
+#define vmulxd_laneq_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x2_t __b = (b); \
+ float64_t __d1 = vgetq_lane_f64(__b, __c);\
+ vmulxd_f64(__a, __d1); })
+
+#define vmuls_lane_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x2_t __b = (b); \
+ float32_t __d1 = vget_lane_f32(__b, __c);\
+ __a * __d1; })
+#define vmuld_lane_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x1_t __b = (b); \
+ float64_t __d1 = vget_lane_f64(__b, __c);\
+ __a * __d1; })
+
+#define vmuls_laneq_f32(a, b, __c) __extension__ ({ \
+ float32_t __a = (a); float32x4_t __b = (b); \
+ float32_t __d1 = vgetq_lane_f32(__b, __c);\
+ __a * __d1; })
+#define vmuld_laneq_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x2_t __b = (b); \
+ float64_t __d1 = vgetq_lane_f64(__b, __c);\
+ __a * __d1; })
+
+__ai float32_t vrecpes_f32(float32_t __a) {
+ return (float32_t)__builtin_neon_vrecpes_f32(__a); }
+__ai float64_t vrecped_f64(float64_t __a) {
+ return (float64_t)__builtin_neon_vrecped_f64(__a); }
+
+__ai float32_t vrecpss_f32(float32_t __a, float32_t __b) {
+ return (float32_t)__builtin_neon_vrecpss_f32(__a, __b); }
+__ai float64_t vrecpsd_f64(float64_t __a, float64_t __b) {
+ return (float64_t)__builtin_neon_vrecpsd_f64(__a, __b); }
+
+__ai float32_t vrecpxs_f32(float32_t __a) {
+ return (float32_t)__builtin_neon_vrecpxs_f32(__a); }
+__ai float64_t vrecpxd_f64(float64_t __a) {
+ return (float64_t)__builtin_neon_vrecpxd_f64(__a); }
+
+__ai float32_t vrsqrtes_f32(float32_t __a) {
+ return (float32_t)__builtin_neon_vrsqrtes_f32(__a); }
+__ai float64_t vrsqrted_f64(float64_t __a) {
+ return (float64_t)__builtin_neon_vrsqrted_f64(__a); }
+
+__ai float32_t vrsqrtss_f32(float32_t __a, float32_t __b) {
+ return (float32_t)__builtin_neon_vrsqrtss_f32(__a, __b); }
+__ai float64_t vrsqrtsd_f64(float64_t __a, float64_t __b) {
+ return (float64_t)__builtin_neon_vrsqrtsd_f64(__a, __b); }
+
+#define vget_lane_f16(a, __b) __extension__ ({ \
+ float16x4_t __a = (a); \
+ int16x4_t __a1 = vreinterpret_s16_f16(__a);\
+ vget_lane_s16(__a1, __b); })
+#define vgetq_lane_f16(a, __b) __extension__ ({ \
+ float16x8_t __a = (a); \
+ int16x8_t __a1 = vreinterpretq_s16_f16(__a);\
+ vgetq_lane_s16(__a1, __b); })
+
+__ai int64_t vnegd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vnegd_s64(__a); }
+
+__ai int8_t vqaddb_s8(int8_t __a, int8_t __b) {
+ return (int8_t)__builtin_neon_vqaddb_s8(__a, __b); }
+__ai int16_t vqaddh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqaddh_s16(__a, __b); }
+__ai int32_t vqadds_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqadds_s32(__a, __b); }
+__ai int64_t vqaddd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vqaddd_s64(__a, __b); }
+__ai uint8_t vqaddb_u8(uint8_t __a, uint8_t __b) {
+ return (uint8_t)__builtin_neon_vqaddb_u8(__a, __b); }
+__ai uint16_t vqaddh_u16(uint16_t __a, uint16_t __b) {
+ return (uint16_t)__builtin_neon_vqaddh_u16(__a, __b); }
+__ai uint32_t vqadds_u32(uint32_t __a, uint32_t __b) {
+ return (uint32_t)__builtin_neon_vqadds_u32(__a, __b); }
+__ai uint64_t vqaddd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vqaddd_u64(__a, __b); }
+
+__ai int8_t vqrshlb_s8(int8_t __a, int8_t __b) {
+ return (int8_t)__builtin_neon_vqrshlb_s8(__a, __b); }
+__ai int16_t vqrshlh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqrshlh_s16(__a, __b); }
+__ai int32_t vqrshls_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqrshls_s32(__a, __b); }
+__ai int64_t vqrshld_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vqrshld_s64(__a, __b); }
+__ai uint8_t vqrshlb_u8(uint8_t __a, uint8_t __b) {
+ return (uint8_t)__builtin_neon_vqrshlb_u8(__a, __b); }
+__ai uint16_t vqrshlh_u16(uint16_t __a, uint16_t __b) {
+ return (uint16_t)__builtin_neon_vqrshlh_u16(__a, __b); }
+__ai uint32_t vqrshls_u32(uint32_t __a, uint32_t __b) {
+ return (uint32_t)__builtin_neon_vqrshls_u32(__a, __b); }
+__ai uint64_t vqrshld_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vqrshld_u64(__a, __b); }
+
+__ai int8_t vqshlb_s8(int8_t __a, int8_t __b) {
+ return (int8_t)__builtin_neon_vqshlb_s8(__a, __b); }
+__ai int16_t vqshlh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqshlh_s16(__a, __b); }
+__ai int32_t vqshls_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqshls_s32(__a, __b); }
+__ai int64_t vqshld_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vqshld_s64(__a, __b); }
+__ai uint8_t vqshlb_u8(uint8_t __a, uint8_t __b) {
+ return (uint8_t)__builtin_neon_vqshlb_u8(__a, __b); }
+__ai uint16_t vqshlh_u16(uint16_t __a, uint16_t __b) {
+ return (uint16_t)__builtin_neon_vqshlh_u16(__a, __b); }
+__ai uint32_t vqshls_u32(uint32_t __a, uint32_t __b) {
+ return (uint32_t)__builtin_neon_vqshls_u32(__a, __b); }
+__ai uint64_t vqshld_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vqshld_u64(__a, __b); }
+
+__ai int8_t vqsubb_s8(int8_t __a, int8_t __b) {
+ return (int8_t)__builtin_neon_vqsubb_s8(__a, __b); }
+__ai int16_t vqsubh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqsubh_s16(__a, __b); }
+__ai int32_t vqsubs_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqsubs_s32(__a, __b); }
+__ai int64_t vqsubd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vqsubd_s64(__a, __b); }
+__ai uint8_t vqsubb_u8(uint8_t __a, uint8_t __b) {
+ return (uint8_t)__builtin_neon_vqsubb_u8(__a, __b); }
+__ai uint16_t vqsubh_u16(uint16_t __a, uint16_t __b) {
+ return (uint16_t)__builtin_neon_vqsubh_u16(__a, __b); }
+__ai uint32_t vqsubs_u32(uint32_t __a, uint32_t __b) {
+ return (uint32_t)__builtin_neon_vqsubs_u32(__a, __b); }
+__ai uint64_t vqsubd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vqsubd_u64(__a, __b); }
+
+__ai int64_t vrshld_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vrshld_s64(__a, __b); }
+__ai uint64_t vrshld_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vrshld_u64(__a, __b); }
+
+__ai float64_t vcvtd_f64_s64(int64_t __a) {
+ return (float64_t)__builtin_neon_vcvtd_f64_s64(__a); }
+
+__ai float32_t vcvts_f32_s32(int32_t __a) {
+ return (float32_t)__builtin_neon_vcvts_f32_s32(__a); }
+
+#define vcvts_n_f32_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (float32_t)__builtin_neon_vcvts_n_f32_s32(__a, __b); })
+#define vcvts_n_f32_u32(a, __b) __extension__ ({ \
+ uint32_t __a = (a); \
+ (float32_t)__builtin_neon_vcvts_n_f32_u32(__a, __b); })
+
+#define vcvtd_n_f64_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (float64_t)__builtin_neon_vcvtd_n_f64_s64(__a, __b); })
+#define vcvtd_n_f64_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (float64_t)__builtin_neon_vcvtd_n_f64_u64(__a, __b); })
+
+#define vset_lane_f16(a, b, __c) __extension__ ({ \
+ float16_t __a = (a); float16x4_t __b = (b); \
+ int16_t __a1 = (int16_t)__a;\
+ int16x4_t __b1 = vreinterpret_s16_f16(b);\
+ int16x4_t __b2 = vset_lane_s16(__a1, __b1, __c);\
+ vreinterpret_f16_s16(__b2); })
+#define vsetq_lane_f16(a, b, __c) __extension__ ({ \
+ float16_t __a = (a); float16x8_t __b = (b); \
+ int16_t __a1 = (int16_t)__a;\
+ int16x8_t __b1 = vreinterpretq_s16_f16(b);\
+ int16x8_t __b2 = vsetq_lane_s16(__a1, __b1, __c);\
+ vreinterpretq_f16_s16(__b2); })
+
+__ai int64_t vshld_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vshld_s64(__a, __b); }
+__ai uint64_t vshld_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vshld_u64(__a, __b); }
+
+#define vshld_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int64_t)__builtin_neon_vshld_n_s64(__a, __b); })
+#define vshld_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint64_t)__builtin_neon_vshld_n_u64(__a, __b); })
+
+#define vslid_n_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64_t __b = (b); \
+ (int64_t)__builtin_neon_vslid_n_s64(__a, __b, __c); })
+#define vslid_n_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64_t __b = (b); \
+ (uint64_t)__builtin_neon_vslid_n_u64(__a, __b, __c); })
+
+__ai int8_t vqabsb_s8(int8_t __a) {
+ return (int8_t)__builtin_neon_vqabsb_s8(__a); }
+__ai int16_t vqabsh_s16(int16_t __a) {
+ return (int16_t)__builtin_neon_vqabsh_s16(__a); }
+__ai int32_t vqabss_s32(int32_t __a) {
+ return (int32_t)__builtin_neon_vqabss_s32(__a); }
+__ai int64_t vqabsd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vqabsd_s64(__a); }
+
+__ai int32_t vqdmlalh_s16(int32_t __a, int16_t __b, int16_t __c) {
+ return (int32_t)__builtin_neon_vqdmlalh_s16(__a, __b, __c); }
+__ai int64_t vqdmlals_s32(int64_t __a, int32_t __b, int32_t __c) {
+ return (int64_t)__builtin_neon_vqdmlals_s32(__a, __b, __c); }
+
+#define vqdmlalh_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32_t __a = (a); int16_t __b = (b); int16x4_t __c = (c); \
+ (int32_t)__builtin_neon_vqdmlalh_lane_s16(__a, __b, __c, __d); })
+#define vqdmlals_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64_t __a = (a); int32_t __b = (b); int32x2_t __c = (c); \
+ (int64_t)__builtin_neon_vqdmlals_lane_s32(__a, __b, __c, __d); })
+
+#define vqdmlalh_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32_t __a = (a); int16_t __b = (b); int16x8_t __c = (c); \
+ (int32_t)__builtin_neon_vqdmlalh_laneq_s16(__a, __b, __c, __d); })
+#define vqdmlals_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64_t __a = (a); int32_t __b = (b); int32x4_t __c = (c); \
+ (int64_t)__builtin_neon_vqdmlals_laneq_s32(__a, __b, __c, __d); })
+
+__ai int32_t vqdmlslh_s16(int32_t __a, int16_t __b, int16_t __c) {
+ return (int32_t)__builtin_neon_vqdmlslh_s16(__a, __b, __c); }
+__ai int64_t vqdmlsls_s32(int64_t __a, int32_t __b, int32_t __c) {
+ return (int64_t)__builtin_neon_vqdmlsls_s32(__a, __b, __c); }
+
+#define vqdmlslh_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32_t __a = (a); int16_t __b = (b); int16x4_t __c = (c); \
+ (int32_t)__builtin_neon_vqdmlslh_lane_s16(__a, __b, __c, __d); })
+#define vqdmlsls_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64_t __a = (a); int32_t __b = (b); int32x2_t __c = (c); \
+ (int64_t)__builtin_neon_vqdmlsls_lane_s32(__a, __b, __c, __d); })
+
+#define vqdmlslh_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32_t __a = (a); int16_t __b = (b); int16x8_t __c = (c); \
+ (int32_t)__builtin_neon_vqdmlslh_laneq_s16(__a, __b, __c, __d); })
+#define vqdmlsls_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64_t __a = (a); int32_t __b = (b); int32x4_t __c = (c); \
+ (int64_t)__builtin_neon_vqdmlsls_laneq_s32(__a, __b, __c, __d); })
+
+__ai int16_t vqdmulhh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqdmulhh_s16(__a, __b); }
+__ai int32_t vqdmulhs_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqdmulhs_s32(__a, __b); }
+
+#define vqdmulhh_lane_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x4_t __b = (b); \
+ vqdmulhh_s16(__a, vget_lane_s16(__b, __c)); })
+#define vqdmulhs_lane_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x2_t __b = (b); \
+ vqdmulhs_s32(__a, vget_lane_s32(__b, __c)); })
+
+#define vqdmulhh_laneq_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x8_t __b = (b); \
+ vqdmulhh_s16(__a, vgetq_lane_s16(__b, __c)); })
+#define vqdmulhs_laneq_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x4_t __b = (b); \
+ vqdmulhs_s32(__a, vgetq_lane_s32(__b, __c)); })
+
+__ai int32_t vqdmullh_s16(int16_t __a, int16_t __b) {
+ return (int32_t)__builtin_neon_vqdmullh_s16(__a, __b); }
+__ai int64_t vqdmulls_s32(int32_t __a, int32_t __b) {
+ return (int64_t)__builtin_neon_vqdmulls_s32(__a, __b); }
+
+#define vqdmullh_lane_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x4_t __b = (b); \
+ vqdmullh_s16(__a, vget_lane_s16(b, __c)); })
+#define vqdmulls_lane_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x2_t __b = (b); \
+ vqdmulls_s32(__a, vget_lane_s32(b, __c)); })
+
+#define vqdmullh_laneq_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x8_t __b = (b); \
+ vqdmullh_s16(__a, vgetq_lane_s16(b, __c)); })
+#define vqdmulls_laneq_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x4_t __b = (b); \
+ vqdmulls_s32(__a, vgetq_lane_s32(b, __c)); })
+
+__ai int8_t vqnegb_s8(int8_t __a) {
+ return (int8_t)__builtin_neon_vqnegb_s8(__a); }
+__ai int16_t vqnegh_s16(int16_t __a) {
+ return (int16_t)__builtin_neon_vqnegh_s16(__a); }
+__ai int32_t vqnegs_s32(int32_t __a) {
+ return (int32_t)__builtin_neon_vqnegs_s32(__a); }
+__ai int64_t vqnegd_s64(int64_t __a) {
+ return (int64_t)__builtin_neon_vqnegd_s64(__a); }
+
+__ai int16_t vqrdmulhh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vqrdmulhh_s16(__a, __b); }
+__ai int32_t vqrdmulhs_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vqrdmulhs_s32(__a, __b); }
+
+#define vqrdmulhh_lane_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x4_t __b = (b); \
+ vqrdmulhh_s16(__a, vget_lane_s16(__b, __c)); })
+#define vqrdmulhs_lane_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x2_t __b = (b); \
+ vqrdmulhs_s32(__a, vget_lane_s32(__b, __c)); })
+
+#define vqrdmulhh_laneq_s16(a, b, __c) __extension__ ({ \
+ int16_t __a = (a); int16x8_t __b = (b); \
+ vqrdmulhh_s16(__a, vgetq_lane_s16(__b, __c)); })
+#define vqrdmulhs_laneq_s32(a, b, __c) __extension__ ({ \
+ int32_t __a = (a); int32x4_t __b = (b); \
+ vqrdmulhs_s32(__a, vgetq_lane_s32(__b, __c)); })
+
+#define vqrshrnh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int8_t)__builtin_neon_vqrshrnh_n_s16(__a, __b); })
+#define vqrshrns_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int16_t)__builtin_neon_vqrshrns_n_s32(__a, __b); })
+#define vqrshrnd_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int32_t)__builtin_neon_vqrshrnd_n_s64(__a, __b); })
+#define vqrshrnh_n_u16(a, __b) __extension__ ({ \
+ uint16_t __a = (a); \
+ (uint8_t)__builtin_neon_vqrshrnh_n_u16(__a, __b); })
+#define vqrshrns_n_u32(a, __b) __extension__ ({ \
+ uint32_t __a = (a); \
+ (uint16_t)__builtin_neon_vqrshrns_n_u32(__a, __b); })
+#define vqrshrnd_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint32_t)__builtin_neon_vqrshrnd_n_u64(__a, __b); })
+
+#define vqrshrunh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int8_t)__builtin_neon_vqrshrunh_n_s16(__a, __b); })
+#define vqrshruns_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int16_t)__builtin_neon_vqrshruns_n_s32(__a, __b); })
+#define vqrshrund_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int32_t)__builtin_neon_vqrshrund_n_s64(__a, __b); })
+
+#define vqshlub_n_s8(a, __b) __extension__ ({ \
+ int8_t __a = (a); \
+ (int8_t)__builtin_neon_vqshlub_n_s8(__a, __b); })
+#define vqshluh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int16_t)__builtin_neon_vqshluh_n_s16(__a, __b); })
+#define vqshlus_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int32_t)__builtin_neon_vqshlus_n_s32(__a, __b); })
+#define vqshlud_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int64_t)__builtin_neon_vqshlud_n_s64(__a, __b); })
+
+#define vqshlb_n_s8(a, __b) __extension__ ({ \
+ int8_t __a = (a); \
+ (int8_t)__builtin_neon_vqshlb_n_s8(__a, __b); })
+#define vqshlh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int16_t)__builtin_neon_vqshlh_n_s16(__a, __b); })
+#define vqshls_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int32_t)__builtin_neon_vqshls_n_s32(__a, __b); })
+#define vqshld_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int64_t)__builtin_neon_vqshld_n_s64(__a, __b); })
+#define vqshlb_n_u8(a, __b) __extension__ ({ \
+ uint8_t __a = (a); \
+ (uint8_t)__builtin_neon_vqshlb_n_u8(__a, __b); })
+#define vqshlh_n_u16(a, __b) __extension__ ({ \
+ uint16_t __a = (a); \
+ (uint16_t)__builtin_neon_vqshlh_n_u16(__a, __b); })
+#define vqshls_n_u32(a, __b) __extension__ ({ \
+ uint32_t __a = (a); \
+ (uint32_t)__builtin_neon_vqshls_n_u32(__a, __b); })
+#define vqshld_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint64_t)__builtin_neon_vqshld_n_u64(__a, __b); })
+
+#define vqshrnh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int8_t)__builtin_neon_vqshrnh_n_s16(__a, __b); })
+#define vqshrns_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int16_t)__builtin_neon_vqshrns_n_s32(__a, __b); })
+#define vqshrnd_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int32_t)__builtin_neon_vqshrnd_n_s64(__a, __b); })
+#define vqshrnh_n_u16(a, __b) __extension__ ({ \
+ uint16_t __a = (a); \
+ (uint8_t)__builtin_neon_vqshrnh_n_u16(__a, __b); })
+#define vqshrns_n_u32(a, __b) __extension__ ({ \
+ uint32_t __a = (a); \
+ (uint16_t)__builtin_neon_vqshrns_n_u32(__a, __b); })
+#define vqshrnd_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint32_t)__builtin_neon_vqshrnd_n_u64(__a, __b); })
+
+#define vqshrunh_n_s16(a, __b) __extension__ ({ \
+ int16_t __a = (a); \
+ (int8_t)__builtin_neon_vqshrunh_n_s16(__a, __b); })
+#define vqshruns_n_s32(a, __b) __extension__ ({ \
+ int32_t __a = (a); \
+ (int16_t)__builtin_neon_vqshruns_n_s32(__a, __b); })
+#define vqshrund_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int32_t)__builtin_neon_vqshrund_n_s64(__a, __b); })
+
+__ai int8_t vqmovnh_s16(int16_t __a) {
+ return (int8_t)__builtin_neon_vqmovnh_s16(__a); }
+__ai int16_t vqmovns_s32(int32_t __a) {
+ return (int16_t)__builtin_neon_vqmovns_s32(__a); }
+__ai int32_t vqmovnd_s64(int64_t __a) {
+ return (int32_t)__builtin_neon_vqmovnd_s64(__a); }
+
+__ai int8_t vqmovunh_s16(int16_t __a) {
+ return (int8_t)__builtin_neon_vqmovunh_s16(__a); }
+__ai int16_t vqmovuns_s32(int32_t __a) {
+ return (int16_t)__builtin_neon_vqmovuns_s32(__a); }
+__ai int32_t vqmovund_s64(int64_t __a) {
+ return (int32_t)__builtin_neon_vqmovund_s64(__a); }
+
+#define vsrid_n_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64_t __b = (b); \
+ (int64_t)__builtin_neon_vsrid_n_s64(__a, __b, __c); })
+#define vsrid_n_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64_t __b = (b); \
+ (uint64_t)__builtin_neon_vsrid_n_u64(__a, __b, __c); })
+
+#define vrshrd_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int64_t)__builtin_neon_vrshrd_n_s64(__a, __b); })
+#define vrshrd_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint64_t)__builtin_neon_vrshrd_n_u64(__a, __b); })
+
+#define vrsrad_n_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64_t __b = (b); \
+ (int64_t)__builtin_neon_vrsrad_n_s64(__a, __b, __c); })
+#define vrsrad_n_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64_t __b = (b); \
+ (uint64_t)__builtin_neon_vrsrad_n_u64(__a, __b, __c); })
+
+#define vshrd_n_s64(a, __b) __extension__ ({ \
+ int64_t __a = (a); \
+ (int64_t)__builtin_neon_vshrd_n_s64(__a, __b); })
+#define vshrd_n_u64(a, __b) __extension__ ({ \
+ uint64_t __a = (a); \
+ (uint64_t)__builtin_neon_vshrd_n_u64(__a, __b); })
+
+#define vsrad_n_s64(a, b, __c) __extension__ ({ \
+ int64_t __a = (a); int64_t __b = (b); \
+ (int64_t)__builtin_neon_vsrad_n_s64(__a, __b, __c); })
+#define vsrad_n_u64(a, b, __c) __extension__ ({ \
+ uint64_t __a = (a); uint64_t __b = (b); \
+ (uint64_t)__builtin_neon_vsrad_n_u64(__a, __b, __c); })
+
+__ai int64_t vsubd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vsubd_s64(__a, __b); }
+__ai uint64_t vsubd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vsubd_u64(__a, __b); }
+
+__ai int8_t vuqaddb_s8(int8_t __a, int8_t __b) {
+ return (int8_t)__builtin_neon_vuqaddb_s8(__a, __b); }
+__ai int16_t vuqaddh_s16(int16_t __a, int16_t __b) {
+ return (int16_t)__builtin_neon_vuqaddh_s16(__a, __b); }
+__ai int32_t vuqadds_s32(int32_t __a, int32_t __b) {
+ return (int32_t)__builtin_neon_vuqadds_s32(__a, __b); }
+__ai int64_t vuqaddd_s64(int64_t __a, int64_t __b) {
+ return (int64_t)__builtin_neon_vuqaddd_s64(__a, __b); }
+
+__ai float64_t vcvtd_f64_u64(uint64_t __a) {
+ return (float64_t)__builtin_neon_vcvtd_f64_u64(__a); }
+
+__ai float32_t vcvts_f32_u32(uint32_t __a) {
+ return (float32_t)__builtin_neon_vcvts_f32_u32(__a); }
+
+__ai uint8_t vqmovnh_u16(uint16_t __a) {
+ return (uint8_t)__builtin_neon_vqmovnh_u16(__a); }
+__ai uint16_t vqmovns_u32(uint32_t __a) {
+ return (uint16_t)__builtin_neon_vqmovns_u32(__a); }
+__ai uint32_t vqmovnd_u64(uint64_t __a) {
+ return (uint32_t)__builtin_neon_vqmovnd_u64(__a); }
+
+__ai uint8_t vsqaddb_u8(uint8_t __a, uint8_t __b) {
+ return (uint8_t)__builtin_neon_vsqaddb_u8(__a, __b); }
+__ai uint16_t vsqaddh_u16(uint16_t __a, uint16_t __b) {
+ return (uint16_t)__builtin_neon_vsqaddh_u16(__a, __b); }
+__ai uint32_t vsqadds_u32(uint32_t __a, uint32_t __b) {
+ return (uint32_t)__builtin_neon_vsqadds_u32(__a, __b); }
+__ai uint64_t vsqaddd_u64(uint64_t __a, uint64_t __b) {
+ return (uint64_t)__builtin_neon_vsqaddd_u64(__a, __b); }
+
+#define vdupb_lane_s8(a, __b) __extension__ ({ \
+ int8x8_t __a = (a); \
+ (int8_t)__builtin_neon_vdupb_lane_i8(__a, __b); })
+#define vduph_lane_s16(a, __b) __extension__ ({ \
+ int16x4_t __a = (a); \
+ (int16_t)__builtin_neon_vduph_lane_i16(__a, __b); })
+#define vdups_lane_s32(a, __b) __extension__ ({ \
+ int32x2_t __a = (a); \
+ (int32_t)__builtin_neon_vdups_lane_i32(__a, __b); })
+#define vdupd_lane_s64(a, __b) __extension__ ({ \
+ int64x1_t __a = (a); \
+ (int64_t)__builtin_neon_vdupd_lane_i64(__a, __b); })
+#define vdups_lane_f32(a, __b) __extension__ ({ \
+ float32x2_t __a = (a); \
+ (float32_t)__builtin_neon_vdups_lane_f32(__a, __b); })
+#define vdupd_lane_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ (float64_t)__builtin_neon_vdupd_lane_f64(__a, __b); })
+#define vdupb_lane_u8(a, __b) __extension__ ({ \
+ uint8x8_t __a = (a); \
+ (uint8_t)__builtin_neon_vdupb_lane_i8((int8x8_t)__a, __b); })
+#define vduph_lane_u16(a, __b) __extension__ ({ \
+ uint16x4_t __a = (a); \
+ (uint16_t)__builtin_neon_vduph_lane_i16((int16x4_t)__a, __b); })
+#define vdups_lane_u32(a, __b) __extension__ ({ \
+ uint32x2_t __a = (a); \
+ (uint32_t)__builtin_neon_vdups_lane_i32((int32x2_t)__a, __b); })
+#define vdupd_lane_u64(a, __b) __extension__ ({ \
+ uint64x1_t __a = (a); \
+ (uint64_t)__builtin_neon_vdupd_lane_i64((int64x1_t)__a, __b); })
+#define vdupb_lane_p8(a, __b) __extension__ ({ \
+ poly8x8_t __a = (a); \
+ (poly8_t)__builtin_neon_vdupb_lane_i8((int8x8_t)__a, __b); })
+#define vduph_lane_p16(a, __b) __extension__ ({ \
+ poly16x4_t __a = (a); \
+ (poly16_t)__builtin_neon_vduph_lane_i16((int16x4_t)__a, __b); })
+
+#define vdupb_laneq_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ (int8_t)__builtin_neon_vdupb_laneq_i8(__a, __b); })
+#define vduph_laneq_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ (int16_t)__builtin_neon_vduph_laneq_i16(__a, __b); })
+#define vdups_laneq_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ (int32_t)__builtin_neon_vdups_laneq_i32(__a, __b); })
+#define vdupd_laneq_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ (int64_t)__builtin_neon_vdupd_laneq_i64(__a, __b); })
+#define vdups_laneq_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ (float32_t)__builtin_neon_vdups_laneq_f32(__a, __b); })
+#define vdupd_laneq_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ (float64_t)__builtin_neon_vdupd_laneq_f64(__a, __b); })
+#define vdupb_laneq_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ (uint8_t)__builtin_neon_vdupb_laneq_i8((int8x16_t)__a, __b); })
+#define vduph_laneq_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ (uint16_t)__builtin_neon_vduph_laneq_i16((int16x8_t)__a, __b); })
+#define vdups_laneq_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ (uint32_t)__builtin_neon_vdups_laneq_i32((int32x4_t)__a, __b); })
+#define vdupd_laneq_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ (uint64_t)__builtin_neon_vdupd_laneq_i64((int64x2_t)__a, __b); })
+#define vdupb_laneq_p8(a, __b) __extension__ ({ \
+ poly8x16_t __a = (a); \
+ (poly8_t)__builtin_neon_vdupb_laneq_i8((int8x16_t)__a, __b); })
+#define vduph_laneq_p16(a, __b) __extension__ ({ \
+ poly16x8_t __a = (a); \
+ (poly16_t)__builtin_neon_vduph_laneq_i16((int16x8_t)__a, __b); })
+
+#define vmulx_lane_f64(a, b, __c) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); \
+ float64_t __d1 = vget_lane_f64(__a, 0);\
+ float64_t __e1 = vget_lane_f64(__b, __c);\
+ float64_t __f1 = vmulxd_f64(__d1, __e1);\
+ float64x1_t __g1;\
+ vset_lane_f64(__f1, __g1, __c); })
+
+#define vmulx_laneq_f64(a, b, __c) __extension__ ({ \
+ float64x1_t __a = (a); float64x2_t __b = (b); \
+ float64_t __d1 = vget_lane_f64(__a, 0);\
+ float64_t __e1 = vgetq_lane_f64(__b, __c);\
+ float64_t __f1 = vmulxd_f64(__d1, __e1);\
+ float64x1_t __g1;\
+ vset_lane_f64(__f1, __g1, 0); })
+
+#define vmul_lane_f64(a, b, __c) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); \
+ (float64x1_t)__builtin_neon_vmul_lane_v((int8x8_t)__a, (int8x8_t)__b, __c, 9); })
+
+#define vmul_laneq_f64(a, b, __c) __extension__ ({ \
+ float64x1_t __a = (a); float64x2_t __b = (b); \
+ (float64x1_t)__builtin_neon_vmul_laneq_v((int8x8_t)__a, (int8x16_t)__b, __c, 9); })
+
+__ai float64x1_t vmul_n_f64(float64x1_t __a, float64_t __b) {
+ return (float64x1_t)__builtin_neon_vmul_n_f64(__a, __b); }
+
+#define vset_lane_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x1_t __b = (b); \
+ (float64x1_t)__builtin_neon_vset_lane_f64(__a, __b, __c); })
+#define vsetq_lane_f64(a, b, __c) __extension__ ({ \
+ float64_t __a = (a); float64x2_t __b = (b); \
+ (float64x2_t)__builtin_neon_vsetq_lane_f64(__a, __b, __c); })
+#define vset_lane_p64(a, b, __c) __extension__ ({ \
+ poly64_t __a = (a); poly64x1_t __b = (b); \
+ (poly64x1_t)__builtin_neon_vset_lane_i64(__a, (int64x1_t)__b, __c); })
+#define vsetq_lane_p64(a, b, __c) __extension__ ({ \
+ poly64_t __a = (a); poly64x2_t __b = (b); \
+ (poly64x2_t)__builtin_neon_vsetq_lane_i64(__a, (int64x2_t)__b, __c); })
+
+#define vshll_high_n_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ int8x8_t __a1 = vget_high_s8(__a); \
+ (int16x8_t)vshll_n_s8(__a1, __b); })
+#define vshll_high_n_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ int16x4_t __a1 = vget_high_s16(__a); \
+ (int32x4_t)vshll_n_s16(__a1, __b); })
+#define vshll_high_n_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ int32x2_t __a1 = vget_high_s32(__a); \
+ (int64x2_t)vshll_n_s32(__a1, __b); })
+#define vshll_high_n_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ uint8x8_t __a1 = vget_high_u8(__a); \
+ (uint16x8_t)vshll_n_u8(__a1, __b); })
+#define vshll_high_n_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ uint16x4_t __a1 = vget_high_u16(__a); \
+ (uint32x4_t)vshll_n_u16(__a1, __b); })
+#define vshll_high_n_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ uint32x2_t __a1 = vget_high_u32(__a); \
+ (uint64x2_t)vshll_n_u32(__a1, __b); })
+
+#define vshrn_high_n_s16(a, b, __c) __extension__ ({ \
+ int8x8_t __a = (a); int16x8_t __b = (b); \
+ (int8x16_t)vcombine_s16(__a, vshrn_n_s16(__b, __c)); })
+#define vshrn_high_n_s32(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int32x4_t __b = (b); \
+ (int16x8_t)vcombine_s32(__a, vshrn_n_s32(__b, __c)); })
+#define vshrn_high_n_s64(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int64x2_t __b = (b); \
+ (int32x4_t)vcombine_s64(__a, vshrn_n_s64(__b, __c)); })
+#define vshrn_high_n_u16(a, b, __c) __extension__ ({ \
+ uint8x8_t __a = (a); uint16x8_t __b = (b); \
+ (uint8x16_t)vcombine_u16(__a, vshrn_n_u16(__b, __c)); })
+#define vshrn_high_n_u32(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint32x4_t __b = (b); \
+ (uint16x8_t)vcombine_u32(__a, vshrn_n_u32(__b, __c)); })
+#define vshrn_high_n_u64(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint64x2_t __b = (b); \
+ (uint32x4_t)vcombine_u64(__a, vshrn_n_u64(__b, __c)); })
+
+#define vsli_n_p64(a, b, __c) __extension__ ({ \
+ poly64x1_t __a = (a); poly64x1_t __b = (b); \
+ (poly64x1_t)__builtin_neon_vsli_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
+#define vsliq_n_p64(a, b, __c) __extension__ ({ \
+ poly64x2_t __a = (a); poly64x2_t __b = (b); \
+ (poly64x2_t)__builtin_neon_vsliq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
+
+__ai int8x16_t vqmovun_high_s16(int8x8_t __a, int16x8_t __b) {
+ int8x8_t __a1 = vqmovun_s16(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai int16x8_t vqmovun_high_s32(int16x4_t __a, int32x4_t __b) {
+ int16x4_t __a1 = vqmovun_s32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai int32x4_t vqmovun_high_s64(int32x2_t __a, int64x2_t __b) {
+ int32x2_t __a1 = vqmovun_s64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+
+#define vsri_n_p64(a, b, __c) __extension__ ({ \
+ poly64x1_t __a = (a); poly64x1_t __b = (b); \
+ (poly64x1_t)__builtin_neon_vsri_n_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
+#define vsriq_n_p64(a, b, __c) __extension__ ({ \
+ poly64x2_t __a = (a); poly64x2_t __b = (b); \
+ (poly64x2_t)__builtin_neon_vsriq_n_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
+
+#define vst1q_f64(__a, b) __extension__ ({ \
+ float64x2_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 41); })
+#define vst1_f64(__a, b) __extension__ ({ \
+ float64x1_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 9); })
+#define vst1_p64(__a, b) __extension__ ({ \
+ poly64x1_t __b = (b); \
+ __builtin_neon_vst1_v(__a, (int8x8_t)__b, 6); })
+#define vst1q_p64(__a, b) __extension__ ({ \
+ poly64x2_t __b = (b); \
+ __builtin_neon_vst1q_v(__a, (int8x16_t)__b, 38); })
+
+#define vst1q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 41); })
+#define vst1q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2_t __b = (b); \
+ __builtin_neon_vst1q_lane_v(__a, (int8x16_t)__b, __c, 38); })
+#define vst1_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 9); })
+#define vst1_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1_t __b = (b); \
+ __builtin_neon_vst1_lane_v(__a, (int8x8_t)__b, __c, 6); })
+
+#define vst1q_u8_x2(__a, b) __extension__ ({ \
+ uint8x16x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 48); })
+#define vst1q_u16_x2(__a, b) __extension__ ({ \
+ uint16x8x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 49); })
+#define vst1q_u32_x2(__a, b) __extension__ ({ \
+ uint32x4x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 50); })
+#define vst1q_u64_x2(__a, b) __extension__ ({ \
+ uint64x2x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 51); })
+#define vst1q_s8_x2(__a, b) __extension__ ({ \
+ int8x16x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, __b.val[0], __b.val[1], 32); })
+#define vst1q_s16_x2(__a, b) __extension__ ({ \
+ int16x8x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 33); })
+#define vst1q_s32_x2(__a, b) __extension__ ({ \
+ int32x4x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 34); })
+#define vst1q_s64_x2(__a, b) __extension__ ({ \
+ int64x2x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 35); })
+#define vst1q_f16_x2(__a, b) __extension__ ({ \
+ float16x8x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 39); })
+#define vst1q_f32_x2(__a, b) __extension__ ({ \
+ float32x4x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 40); })
+#define vst1q_f64_x2(__a, b) __extension__ ({ \
+ float64x2x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 41); })
+#define vst1q_p8_x2(__a, b) __extension__ ({ \
+ poly8x16x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 36); })
+#define vst1q_p16_x2(__a, b) __extension__ ({ \
+ poly16x8x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 37); })
+#define vst1q_p64_x2(__a, b) __extension__ ({ \
+ poly64x2x2_t __b = (b); \
+ __builtin_neon_vst1q_x2_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 38); })
+#define vst1_u8_x2(__a, b) __extension__ ({ \
+ uint8x8x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 16); })
+#define vst1_u16_x2(__a, b) __extension__ ({ \
+ uint16x4x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 17); })
+#define vst1_u32_x2(__a, b) __extension__ ({ \
+ uint32x2x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 18); })
+#define vst1_u64_x2(__a, b) __extension__ ({ \
+ uint64x1x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 19); })
+#define vst1_s8_x2(__a, b) __extension__ ({ \
+ int8x8x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, __b.val[0], __b.val[1], 0); })
+#define vst1_s16_x2(__a, b) __extension__ ({ \
+ int16x4x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 1); })
+#define vst1_s32_x2(__a, b) __extension__ ({ \
+ int32x2x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 2); })
+#define vst1_s64_x2(__a, b) __extension__ ({ \
+ int64x1x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 3); })
+#define vst1_f16_x2(__a, b) __extension__ ({ \
+ float16x4x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 7); })
+#define vst1_f32_x2(__a, b) __extension__ ({ \
+ float32x2x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 8); })
+#define vst1_f64_x2(__a, b) __extension__ ({ \
+ float64x1x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 9); })
+#define vst1_p8_x2(__a, b) __extension__ ({ \
+ poly8x8x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 4); })
+#define vst1_p16_x2(__a, b) __extension__ ({ \
+ poly16x4x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 5); })
+#define vst1_p64_x2(__a, b) __extension__ ({ \
+ poly64x1x2_t __b = (b); \
+ __builtin_neon_vst1_x2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 6); })
+
+#define vst1q_u8_x3(__a, b) __extension__ ({ \
+ uint8x16x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 48); })
+#define vst1q_u16_x3(__a, b) __extension__ ({ \
+ uint16x8x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 49); })
+#define vst1q_u32_x3(__a, b) __extension__ ({ \
+ uint32x4x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 50); })
+#define vst1q_u64_x3(__a, b) __extension__ ({ \
+ uint64x2x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 51); })
+#define vst1q_s8_x3(__a, b) __extension__ ({ \
+ int8x16x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, __b.val[0], __b.val[1], __b.val[2], 32); })
+#define vst1q_s16_x3(__a, b) __extension__ ({ \
+ int16x8x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 33); })
+#define vst1q_s32_x3(__a, b) __extension__ ({ \
+ int32x4x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 34); })
+#define vst1q_s64_x3(__a, b) __extension__ ({ \
+ int64x2x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 35); })
+#define vst1q_f16_x3(__a, b) __extension__ ({ \
+ float16x8x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 39); })
+#define vst1q_f32_x3(__a, b) __extension__ ({ \
+ float32x4x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 40); })
+#define vst1q_f64_x3(__a, b) __extension__ ({ \
+ float64x2x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 41); })
+#define vst1q_p8_x3(__a, b) __extension__ ({ \
+ poly8x16x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 36); })
+#define vst1q_p16_x3(__a, b) __extension__ ({ \
+ poly16x8x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 37); })
+#define vst1q_p64_x3(__a, b) __extension__ ({ \
+ poly64x2x3_t __b = (b); \
+ __builtin_neon_vst1q_x3_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 38); })
+#define vst1_u8_x3(__a, b) __extension__ ({ \
+ uint8x8x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 16); })
+#define vst1_u16_x3(__a, b) __extension__ ({ \
+ uint16x4x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 17); })
+#define vst1_u32_x3(__a, b) __extension__ ({ \
+ uint32x2x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 18); })
+#define vst1_u64_x3(__a, b) __extension__ ({ \
+ uint64x1x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 19); })
+#define vst1_s8_x3(__a, b) __extension__ ({ \
+ int8x8x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, __b.val[0], __b.val[1], __b.val[2], 0); })
+#define vst1_s16_x3(__a, b) __extension__ ({ \
+ int16x4x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 1); })
+#define vst1_s32_x3(__a, b) __extension__ ({ \
+ int32x2x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 2); })
+#define vst1_s64_x3(__a, b) __extension__ ({ \
+ int64x1x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 3); })
+#define vst1_f16_x3(__a, b) __extension__ ({ \
+ float16x4x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 7); })
+#define vst1_f32_x3(__a, b) __extension__ ({ \
+ float32x2x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 8); })
+#define vst1_f64_x3(__a, b) __extension__ ({ \
+ float64x1x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 9); })
+#define vst1_p8_x3(__a, b) __extension__ ({ \
+ poly8x8x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 4); })
+#define vst1_p16_x3(__a, b) __extension__ ({ \
+ poly16x4x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 5); })
+#define vst1_p64_x3(__a, b) __extension__ ({ \
+ poly64x1x3_t __b = (b); \
+ __builtin_neon_vst1_x3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 6); })
+
+#define vst1q_u8_x4(__a, b) __extension__ ({ \
+ uint8x16x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 48); })
+#define vst1q_u16_x4(__a, b) __extension__ ({ \
+ uint16x8x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 49); })
+#define vst1q_u32_x4(__a, b) __extension__ ({ \
+ uint32x4x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 50); })
+#define vst1q_u64_x4(__a, b) __extension__ ({ \
+ uint64x2x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 51); })
+#define vst1q_s8_x4(__a, b) __extension__ ({ \
+ int8x16x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 32); })
+#define vst1q_s16_x4(__a, b) __extension__ ({ \
+ int16x8x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 33); })
+#define vst1q_s32_x4(__a, b) __extension__ ({ \
+ int32x4x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 34); })
+#define vst1q_s64_x4(__a, b) __extension__ ({ \
+ int64x2x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 35); })
+#define vst1q_f16_x4(__a, b) __extension__ ({ \
+ float16x8x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 39); })
+#define vst1q_f32_x4(__a, b) __extension__ ({ \
+ float32x4x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 40); })
+#define vst1q_f64_x4(__a, b) __extension__ ({ \
+ float64x2x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 41); })
+#define vst1q_p8_x4(__a, b) __extension__ ({ \
+ poly8x16x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 36); })
+#define vst1q_p16_x4(__a, b) __extension__ ({ \
+ poly16x8x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 37); })
+#define vst1q_p64_x4(__a, b) __extension__ ({ \
+ poly64x2x4_t __b = (b); \
+ __builtin_neon_vst1q_x4_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 38); })
+#define vst1_u8_x4(__a, b) __extension__ ({ \
+ uint8x8x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 16); })
+#define vst1_u16_x4(__a, b) __extension__ ({ \
+ uint16x4x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 17); })
+#define vst1_u32_x4(__a, b) __extension__ ({ \
+ uint32x2x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 18); })
+#define vst1_u64_x4(__a, b) __extension__ ({ \
+ uint64x1x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 19); })
+#define vst1_s8_x4(__a, b) __extension__ ({ \
+ int8x8x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], 0); })
+#define vst1_s16_x4(__a, b) __extension__ ({ \
+ int16x4x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 1); })
+#define vst1_s32_x4(__a, b) __extension__ ({ \
+ int32x2x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 2); })
+#define vst1_s64_x4(__a, b) __extension__ ({ \
+ int64x1x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 3); })
+#define vst1_f16_x4(__a, b) __extension__ ({ \
+ float16x4x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 7); })
+#define vst1_f32_x4(__a, b) __extension__ ({ \
+ float32x2x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 8); })
+#define vst1_f64_x4(__a, b) __extension__ ({ \
+ float64x1x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 9); })
+#define vst1_p8_x4(__a, b) __extension__ ({ \
+ poly8x8x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 4); })
+#define vst1_p16_x4(__a, b) __extension__ ({ \
+ poly16x4x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 5); })
+#define vst1_p64_x4(__a, b) __extension__ ({ \
+ poly64x1x4_t __b = (b); \
+ __builtin_neon_vst1_x4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 6); })
+
+#define vst2q_u64(__a, b) __extension__ ({ \
+ uint64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 51); })
+#define vst2q_s64(__a, b) __extension__ ({ \
+ int64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 35); })
+#define vst2q_f64(__a, b) __extension__ ({ \
+ float64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 41); })
+#define vst2_f64(__a, b) __extension__ ({ \
+ float64x1x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 9); })
+#define vst2_p64(__a, b) __extension__ ({ \
+ poly64x1x2_t __b = (b); \
+ __builtin_neon_vst2_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], 6); })
+#define vst2q_p64(__a, b) __extension__ ({ \
+ poly64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], 38); })
+
+#define vst2q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 48); })
+#define vst2q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 51); })
+#define vst2q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, __b.val[0], __b.val[1], __c, 32); })
+#define vst2q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 35); })
+#define vst2q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 41); })
+#define vst2q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 36); })
+#define vst2q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x2_t __b = (b); \
+ __builtin_neon_vst2q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], __c, 38); })
+#define vst2_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 19); })
+#define vst2_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 3); })
+#define vst2_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 9); })
+#define vst2_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x2_t __b = (b); \
+ __builtin_neon_vst2_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], __c, 6); })
+
+#define vst3q_u64(__a, b) __extension__ ({ \
+ uint64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 51); })
+#define vst3q_s64(__a, b) __extension__ ({ \
+ int64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 35); })
+#define vst3q_f64(__a, b) __extension__ ({ \
+ float64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 41); })
+#define vst3_f64(__a, b) __extension__ ({ \
+ float64x1x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 9); })
+#define vst3_p64(__a, b) __extension__ ({ \
+ poly64x1x3_t __b = (b); \
+ __builtin_neon_vst3_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], 6); })
+#define vst3q_p64(__a, b) __extension__ ({ \
+ poly64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], 38); })
+
+#define vst3q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 48); })
+#define vst3q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 51); })
+#define vst3q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 32); })
+#define vst3q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 35); })
+#define vst3q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 41); })
+#define vst3q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 36); })
+#define vst3q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x3_t __b = (b); \
+ __builtin_neon_vst3q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], __c, 38); })
+#define vst3_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 19); })
+#define vst3_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 3); })
+#define vst3_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 9); })
+#define vst3_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x3_t __b = (b); \
+ __builtin_neon_vst3_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], __c, 6); })
+
+#define vst4q_u64(__a, b) __extension__ ({ \
+ uint64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 51); })
+#define vst4q_s64(__a, b) __extension__ ({ \
+ int64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 35); })
+#define vst4q_f64(__a, b) __extension__ ({ \
+ float64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 41); })
+#define vst4_f64(__a, b) __extension__ ({ \
+ float64x1x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 9); })
+#define vst4_p64(__a, b) __extension__ ({ \
+ poly64x1x4_t __b = (b); \
+ __builtin_neon_vst4_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], 6); })
+#define vst4q_p64(__a, b) __extension__ ({ \
+ poly64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], 38); })
+
+#define vst4q_lane_u8(__a, b, __c) __extension__ ({ \
+ uint8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 48); })
+#define vst4q_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 51); })
+#define vst4q_lane_s8(__a, b, __c) __extension__ ({ \
+ int8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); })
+#define vst4q_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 35); })
+#define vst4q_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 41); })
+#define vst4q_lane_p8(__a, b, __c) __extension__ ({ \
+ poly8x16x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 36); })
+#define vst4q_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x2x4_t __b = (b); \
+ __builtin_neon_vst4q_lane_v(__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], __c, 38); })
+#define vst4_lane_u64(__a, b, __c) __extension__ ({ \
+ uint64x1x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 19); })
+#define vst4_lane_s64(__a, b, __c) __extension__ ({ \
+ int64x1x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 3); })
+#define vst4_lane_f64(__a, b, __c) __extension__ ({ \
+ float64x1x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 9); })
+#define vst4_lane_p64(__a, b, __c) __extension__ ({ \
+ poly64x1x4_t __b = (b); \
+ __builtin_neon_vst4_lane_v(__a, (int8x8_t)__b.val[0], (int8x8_t)__b.val[1], (int8x8_t)__b.val[2], (int8x8_t)__b.val[3], __c, 6); })
+
+__ai float64x1_t vsub_f64(float64x1_t __a, float64x1_t __b) {
+ return __a - __b; }
+__ai float64x2_t vsubq_f64(float64x2_t __a, float64x2_t __b) {
+ return __a - __b; }
+
+__ai int8x8_t vuqadd_s8(int8x8_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vuqadd_v(__a, __b, 0); }
+__ai int16x4_t vuqadd_s16(int16x4_t __a, int16x4_t __b) {
+ return (int16x4_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 1); }
+__ai int32x2_t vuqadd_s32(int32x2_t __a, int32x2_t __b) {
+ return (int32x2_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 2); }
+__ai int64x1_t vuqadd_s64(int64x1_t __a, int64x1_t __b) {
+ return (int64x1_t)__builtin_neon_vuqadd_v((int8x8_t)__a, (int8x8_t)__b, 3); }
+__ai int8x16_t vuqaddq_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vuqaddq_v(__a, __b, 32); }
+__ai int16x8_t vuqaddq_s16(int16x8_t __a, int16x8_t __b) {
+ return (int16x8_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 33); }
+__ai int32x4_t vuqaddq_s32(int32x4_t __a, int32x4_t __b) {
+ return (int32x4_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 34); }
+__ai int64x2_t vuqaddq_s64(int64x2_t __a, int64x2_t __b) {
+ return (int64x2_t)__builtin_neon_vuqaddq_v((int8x16_t)__a, (int8x16_t)__b, 35); }
+
+__ai uint8x8_t vsqadd_u8(uint8x8_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 16); }
+__ai uint16x4_t vsqadd_u16(uint16x4_t __a, uint16x4_t __b) {
+ return (uint16x4_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 17); }
+__ai uint32x2_t vsqadd_u32(uint32x2_t __a, uint32x2_t __b) {
+ return (uint32x2_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 18); }
+__ai uint64x1_t vsqadd_u64(uint64x1_t __a, uint64x1_t __b) {
+ return (uint64x1_t)__builtin_neon_vsqadd_v((int8x8_t)__a, (int8x8_t)__b, 19); }
+__ai uint8x16_t vsqaddq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai uint16x8_t vsqaddq_u16(uint16x8_t __a, uint16x8_t __b) {
+ return (uint16x8_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 49); }
+__ai uint32x4_t vsqaddq_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+__ai uint64x2_t vsqaddq_u64(uint64x2_t __a, uint64x2_t __b) {
+ return (uint64x2_t)__builtin_neon_vsqaddq_v((int8x16_t)__a, (int8x16_t)__b, 51); }
+
+__ai int16x8_t vabal_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
+ return vabal_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
+__ai int32x4_t vabal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
+ return vabal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
+__ai int64x2_t vabal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
+ return vabal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
+__ai uint16x8_t vabal_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return vabal_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
+__ai uint32x4_t vabal_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vabal_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
+__ai uint64x2_t vabal_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vabal_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
+
+
+__ai int8x16_t vaddhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return vcombine_s8(__a, vaddhn_s16(__b, __c)); }
+__ai int16x8_t vaddhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return vcombine_s16(__a, vaddhn_s32(__b, __c)); }
+__ai int32x4_t vaddhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
+ return vcombine_s32(__a, vaddhn_s64(__b, __c)); }
+__ai uint8x16_t vaddhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vcombine_u8(__a, vaddhn_u16(__b, __c)); }
+__ai uint16x8_t vaddhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vcombine_u16(__a, vaddhn_u32(__b, __c)); }
+__ai uint32x4_t vaddhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
+ return vcombine_u32(__a, vaddhn_u64(__b, __c)); }
+
+__ai int16_t vaddlv_s8(int8x8_t __a) {
+ return (int16_t)__builtin_neon_vaddlv_s8(__a); }
+__ai int32_t vaddlv_s16(int16x4_t __a) {
+ return (int32_t)__builtin_neon_vaddlv_s16(__a); }
+__ai int64_t vaddlv_s32(int32x2_t __a) {
+ return (int64_t)__builtin_neon_vaddlv_s32(__a); }
+__ai uint16_t vaddlv_u8(uint8x8_t __a) {
+ return (uint16_t)__builtin_neon_vaddlv_u8((int8x8_t)__a); }
+__ai uint32_t vaddlv_u16(uint16x4_t __a) {
+ return (uint32_t)__builtin_neon_vaddlv_u16((int16x4_t)__a); }
+__ai uint64_t vaddlv_u32(uint32x2_t __a) {
+ return (uint64_t)__builtin_neon_vaddlv_u32((int32x2_t)__a); }
+__ai int16_t vaddlvq_s8(int8x16_t __a) {
+ return (int16_t)__builtin_neon_vaddlvq_s8(__a); }
+__ai int32_t vaddlvq_s16(int16x8_t __a) {
+ return (int32_t)__builtin_neon_vaddlvq_s16(__a); }
+__ai int64_t vaddlvq_s32(int32x4_t __a) {
+ return (int64_t)__builtin_neon_vaddlvq_s32(__a); }
+__ai uint16_t vaddlvq_u8(uint8x16_t __a) {
+ return (uint16_t)__builtin_neon_vaddlvq_u8((int8x16_t)__a); }
+__ai uint32_t vaddlvq_u16(uint16x8_t __a) {
+ return (uint32_t)__builtin_neon_vaddlvq_u16((int16x8_t)__a); }
+__ai uint64_t vaddlvq_u32(uint32x4_t __a) {
+ return (uint64_t)__builtin_neon_vaddlvq_u32((int32x4_t)__a); }
+
+__ai int16x8_t vaddl_high_s8(int8x16_t __a, int8x16_t __b) {
+ return vmovl_high_s8(__a) + vmovl_high_s8(__b); }
+__ai int32x4_t vaddl_high_s16(int16x8_t __a, int16x8_t __b) {
+ return vmovl_high_s16(__a) + vmovl_high_s16(__b); }
+__ai int64x2_t vaddl_high_s32(int32x4_t __a, int32x4_t __b) {
+ return vmovl_high_s32(__a) + vmovl_high_s32(__b); }
+__ai uint16x8_t vaddl_high_u8(uint8x16_t __a, uint8x16_t __b) {
+ return vmovl_high_u8(__a) + vmovl_high_u8(__b); }
+__ai uint32x4_t vaddl_high_u16(uint16x8_t __a, uint16x8_t __b) {
+ return vmovl_high_u16(__a) + vmovl_high_u16(__b); }
+__ai uint64x2_t vaddl_high_u32(uint32x4_t __a, uint32x4_t __b) {
+ return vmovl_high_u32(__a) + vmovl_high_u32(__b); }
+
+__ai int8_t vaddv_s8(int8x8_t __a) {
+ return (int8_t)__builtin_neon_vaddv_s8(__a); }
+__ai int16_t vaddv_s16(int16x4_t __a) {
+ return (int16_t)__builtin_neon_vaddv_s16(__a); }
+__ai int32_t vaddv_s32(int32x2_t __a) {
+ return (int32_t)__builtin_neon_vaddv_s32(__a); }
+__ai float32_t vaddv_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vaddv_f32(__a); }
+__ai uint8_t vaddv_u8(uint8x8_t __a) {
+ return (uint8_t)__builtin_neon_vaddv_u8((int8x8_t)__a); }
+__ai uint16_t vaddv_u16(uint16x4_t __a) {
+ return (uint16_t)__builtin_neon_vaddv_u16((int16x4_t)__a); }
+__ai uint32_t vaddv_u32(uint32x2_t __a) {
+ return (uint32_t)__builtin_neon_vaddv_u32((int32x2_t)__a); }
+__ai int8_t vaddvq_s8(int8x16_t __a) {
+ return (int8_t)__builtin_neon_vaddvq_s8(__a); }
+__ai int16_t vaddvq_s16(int16x8_t __a) {
+ return (int16_t)__builtin_neon_vaddvq_s16(__a); }
+__ai int32_t vaddvq_s32(int32x4_t __a) {
+ return (int32_t)__builtin_neon_vaddvq_s32(__a); }
+__ai uint8_t vaddvq_u8(uint8x16_t __a) {
+ return (uint8_t)__builtin_neon_vaddvq_u8((int8x16_t)__a); }
+__ai uint16_t vaddvq_u16(uint16x8_t __a) {
+ return (uint16_t)__builtin_neon_vaddvq_u16((int16x8_t)__a); }
+__ai uint32_t vaddvq_u32(uint32x4_t __a) {
+ return (uint32_t)__builtin_neon_vaddvq_u32((int32x4_t)__a); }
+__ai float32_t vaddvq_f32(float32x4_t __a) {
+ return (float32_t)__builtin_neon_vaddvq_f32(__a); }
+__ai float64_t vaddvq_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vaddvq_f64(__a); }
+__ai int64_t vaddvq_s64(int64x2_t __a) {
+ return (int64_t)__builtin_neon_vaddvq_s64(__a); }
+__ai uint64_t vaddvq_u64(uint64x2_t __a) {
+ return (uint64_t)__builtin_neon_vaddvq_u64((int64x2_t)__a); }
+
+__ai int16x8_t vaddw_high_s8(int16x8_t __a, int8x16_t __b) {
+ return __a + vmovl_high_s8(__b); }
+__ai int32x4_t vaddw_high_s16(int32x4_t __a, int16x8_t __b) {
+ return __a + vmovl_high_s16(__b); }
+__ai int64x2_t vaddw_high_s32(int64x2_t __a, int32x4_t __b) {
+ return __a + vmovl_high_s32(__b); }
+__ai uint16x8_t vaddw_high_u8(uint16x8_t __a, uint8x16_t __b) {
+ return __a + vmovl_high_u8(__b); }
+__ai uint32x4_t vaddw_high_u16(uint32x4_t __a, uint16x8_t __b) {
+ return __a + vmovl_high_u16(__b); }
+__ai uint64x2_t vaddw_high_u32(uint64x2_t __a, uint32x4_t __b) {
+ return __a + vmovl_high_u32(__b); }
+
+__ai float32x2_t vcvtx_f32_f64(float64x2_t __a) {
+ return (float32x2_t)__builtin_neon_vcvtx_f32_v((int8x16_t)__a, 41); }
+
+__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __a, float64x2_t __b) {
+ float32x2_t __a1 = vcvtx_f32_f64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+
+__ai float32x2_t vcvt_f32_f64(float64x2_t __a) {
+ return (float32x2_t)__builtin_neon_vcvt_f32_f64((int8x16_t)__a, 41); }
+
+__ai float64x1_t vcvt_f64_s64(int64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vcvt_f64_v((int8x8_t)__a, 3); }
+__ai float64x1_t vcvt_f64_u64(uint64x1_t __a) {
+ return (float64x1_t)__builtin_neon_vcvt_f64_v((int8x8_t)__a, 19); }
+__ai float64x2_t vcvtq_f64_s64(int64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vcvtq_f64_v((int8x16_t)__a, 35); }
+__ai float64x2_t vcvtq_f64_u64(uint64x2_t __a) {
+ return (float64x2_t)__builtin_neon_vcvtq_f64_v((int8x16_t)__a, 51); }
+
+__ai float64x2_t vcvt_f64_f32(float32x2_t __a) {
+ return (float64x2_t)__builtin_neon_vcvt_f64_f32((int8x8_t)__a, 41); }
+
+__ai float16x8_t vcvt_high_f16_f32(float16x4_t __a, float32x4_t __b) {
+ float16x4_t __a1 = vcvt_f16_f32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+
+__ai float32x4_t vcvt_high_f32_f16(float16x8_t __a) {
+ float16x4_t __a1 = vget_high_f16(__a);
+ return vcvt_f32_f16(__a1); }
+
+__ai float32x4_t vcvt_high_f32_f64(float32x2_t __a, float64x2_t __b) {
+ float32x2_t __a1 = vcvt_f32_f64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+
+__ai float64x2_t vcvt_high_f64_f32(float32x4_t __a) {
+ float32x2_t __a1 = vget_high_f32(__a);
+ return vcvt_f64_f32(__a1); }
+
+__ai int64x1_t vcvt_s64_f64(float64x1_t __a) {
+ return (int64x1_t)__builtin_neon_vcvt_s64_v((int8x8_t)__a, 3); }
+__ai int64x2_t vcvtq_s64_f64(float64x2_t __a) {
+ return (int64x2_t)__builtin_neon_vcvtq_s64_v((int8x16_t)__a, 35); }
+
+__ai uint64x1_t vcvt_u64_f64(float64x1_t __a) {
+ return (uint64x1_t)__builtin_neon_vcvt_u64_v((int8x8_t)__a, 19); }
+__ai uint64x2_t vcvtq_u64_f64(float64x2_t __a) {
+ return (uint64x2_t)__builtin_neon_vcvtq_u64_v((int8x16_t)__a, 51); }
+
+#define vdup_lane_f16(a, __b) __extension__ ({ \
+ float16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_lane_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdupq_lane_f16(a, __b) __extension__ ({ \
+ float16x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_lane_f64(a, __b) __extension__ ({ \
+ float64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_lane_p64(a, __b) __extension__ ({ \
+ poly64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdupq_lane_p64(a, __b) __extension__ ({ \
+ poly64x1_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+
+#define vdup_laneq_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_laneq_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_laneq_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_laneq_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdup_laneq_p8(a, __b) __extension__ ({ \
+ poly8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_laneq_p16(a, __b) __extension__ ({ \
+ poly16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_laneq_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdup_laneq_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_laneq_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_laneq_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdup_laneq_f16(a, __b) __extension__ ({ \
+ float16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdup_laneq_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_laneq_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdupq_laneq_s8(a, __b) __extension__ ({ \
+ int8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_s16(a, __b) __extension__ ({ \
+ int16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_s32(a, __b) __extension__ ({ \
+ int32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdupq_laneq_s64(a, __b) __extension__ ({ \
+ int64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdupq_laneq_p8(a, __b) __extension__ ({ \
+ poly8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_p16(a, __b) __extension__ ({ \
+ poly16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_u8(a, __b) __extension__ ({ \
+ uint8x16_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_u16(a, __b) __extension__ ({ \
+ uint16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_u32(a, __b) __extension__ ({ \
+ uint32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdupq_laneq_u64(a, __b) __extension__ ({ \
+ uint64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdupq_laneq_f16(a, __b) __extension__ ({ \
+ float16x8_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b, __b, __b, __b, __b); })
+#define vdupq_laneq_f32(a, __b) __extension__ ({ \
+ float32x4_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b, __b, __b); })
+#define vdupq_laneq_f64(a, __b) __extension__ ({ \
+ float64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+#define vdup_laneq_p64(a, __b) __extension__ ({ \
+ poly64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b); })
+#define vdupq_laneq_p64(a, __b) __extension__ ({ \
+ poly64x2_t __a = (a); \
+ __builtin_shufflevector(__a, __a, __b, __b); })
+
+#define vext_f64(a, b, __c) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); \
+ (float64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 9); })
+#define vextq_f64(a, b, __c) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); \
+ (float64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 41); })
+#define vext_p64(a, b, __c) __extension__ ({ \
+ poly64x1_t __a = (a); poly64x1_t __b = (b); \
+ (poly64x1_t)__builtin_neon_vext_v((int8x8_t)__a, (int8x8_t)__b, __c, 6); })
+#define vextq_p64(a, b, __c) __extension__ ({ \
+ poly64x2_t __a = (a); poly64x2_t __b = (b); \
+ (poly64x2_t)__builtin_neon_vextq_v((int8x16_t)__a, (int8x16_t)__b, __c, 38); })
+
+#define vfma_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
+ (float32x2_t)__builtin_neon_vfma_lane_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, __d, 8); })
+#define vfma_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); float64x1_t __c = (c); \
+ (float64x1_t)__builtin_neon_vfma_lane_v((int8x8_t)__a, (int8x8_t)__b, (int8x8_t)__c, __d, 9); })
+#define vfmaq_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
+ (float32x4_t)__builtin_neon_vfmaq_lane_v((int8x16_t)__a, (int8x16_t)__b, (int8x8_t)__c, __d, 40); })
+#define vfmaq_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); float64x1_t __c = (c); \
+ (float64x2_t)__builtin_neon_vfmaq_lane_v((int8x16_t)__a, (int8x16_t)__b, (int8x8_t)__c, __d, 41); })
+
+#define vfma_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
+ (float32x2_t)__builtin_neon_vfma_laneq_v((int8x8_t)__a, (int8x8_t)__b, (int8x16_t)__c, __d, 8); })
+#define vfma_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); float64x2_t __c = (c); \
+ (float64x1_t)__builtin_neon_vfma_laneq_v((int8x8_t)__a, (int8x8_t)__b, (int8x16_t)__c, __d, 9); })
+#define vfmaq_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
+ (float32x4_t)__builtin_neon_vfmaq_laneq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, __d, 40); })
+#define vfmaq_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); float64x2_t __c = (c); \
+ (float64x2_t)__builtin_neon_vfmaq_laneq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, __d, 41); })
+
+#define vfms_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x2_t __c = (c); \
+ float32x2_t __a1 = __a; \
+ float32x2_t __b1 = __b; \
+ float32x2_t __c1 = __c; \
+ vfma_lane_f32(__a1, __b1, -__c1, __d); })
+#define vfms_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); float64x1_t __c = (c); \
+ float64x1_t __a1 = __a; \
+ float64x1_t __b1 = __b; \
+ float64x1_t __c1 = __c; \
+ vfma_lane_f64(__a1, __b1, -__c1, __d); })
+#define vfmsq_lane_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x2_t __c = (c); \
+ float32x4_t __a1 = __a; \
+ float32x4_t __b1 = __b; \
+ float32x2_t __c1 = __c; \
+ vfmaq_lane_f32(__a1, __b1, -__c1, __d); })
+#define vfmsq_lane_f64(a, b, c, __d) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); float64x1_t __c = (c); \
+ float64x2_t __a1 = __a; \
+ float64x2_t __b1 = __b; \
+ float64x1_t __c1 = __c; \
+ vfmaq_lane_f64(__a1, __b1, -__c1, __d); })
+
+#define vfms_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
+ float32x2_t __a1 = __a; \
+ float32x2_t __b1 = __b; \
+ float32x4_t __c1 = __c; \
+ vfma_laneq_f32(__a1, __b1, -__c1, __d); })
+#define vfms_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64x1_t __a = (a); float64x1_t __b = (b); float64x2_t __c = (c); \
+ float64x1_t __a1 = __a; \
+ float64x1_t __b1 = __b; \
+ float64x2_t __c1 = __c; \
+ vfma_laneq_f64(__a1, __b1, -__c1, __d); })
+#define vfmsq_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
+ float32x4_t __a1 = __a; \
+ float32x4_t __b1 = __b; \
+ float32x4_t __c1 = __c; \
+ vfmaq_laneq_f32(__a1, __b1, -__c1, __d); })
+#define vfmsq_laneq_f64(a, b, c, __d) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); float64x2_t __c = (c); \
+ float64x2_t __a1 = __a; \
+ float64x2_t __b1 = __b; \
+ float64x2_t __c1 = __c; \
+ vfmaq_laneq_f64(__a1, __b1, -__c1, __d); })
+
+__ai float64x1_t vget_high_f64(float64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1); }
+__ai poly64x1_t vget_high_p64(poly64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 1); }
+
+__ai float64x1_t vget_low_f64(float64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 0); }
+__ai poly64x1_t vget_low_p64(poly64x2_t __a) {
+ return __builtin_shufflevector(__a, __a, 0); }
+
+__ai int8_t vmaxv_s8(int8x8_t __a) {
+ return (int8_t)__builtin_neon_vmaxv_s8(__a); }
+__ai int16_t vmaxv_s16(int16x4_t __a) {
+ return (int16_t)__builtin_neon_vmaxv_s16(__a); }
+__ai int32_t vmaxv_s32(int32x2_t __a) {
+ return (int32_t)__builtin_neon_vmaxv_s32(__a); }
+__ai float32_t vmaxv_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vmaxv_f32(__a); }
+__ai uint8_t vmaxv_u8(uint8x8_t __a) {
+ return (uint8_t)__builtin_neon_vmaxv_u8((int8x8_t)__a); }
+__ai uint16_t vmaxv_u16(uint16x4_t __a) {
+ return (uint16_t)__builtin_neon_vmaxv_u16((int16x4_t)__a); }
+__ai uint32_t vmaxv_u32(uint32x2_t __a) {
+ return (uint32_t)__builtin_neon_vmaxv_u32((int32x2_t)__a); }
+__ai int8_t vmaxvq_s8(int8x16_t __a) {
+ return (int8_t)__builtin_neon_vmaxvq_s8(__a); }
+__ai int16_t vmaxvq_s16(int16x8_t __a) {
+ return (int16_t)__builtin_neon_vmaxvq_s16(__a); }
+__ai int32_t vmaxvq_s32(int32x4_t __a) {
+ return (int32_t)__builtin_neon_vmaxvq_s32(__a); }
+__ai uint8_t vmaxvq_u8(uint8x16_t __a) {
+ return (uint8_t)__builtin_neon_vmaxvq_u8((int8x16_t)__a); }
+__ai uint16_t vmaxvq_u16(uint16x8_t __a) {
+ return (uint16_t)__builtin_neon_vmaxvq_u16((int16x8_t)__a); }
+__ai uint32_t vmaxvq_u32(uint32x4_t __a) {
+ return (uint32_t)__builtin_neon_vmaxvq_u32((int32x4_t)__a); }
+__ai float32_t vmaxvq_f32(float32x4_t __a) {
+ return (float32_t)__builtin_neon_vmaxvq_f32(__a); }
+__ai float64_t vmaxvq_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vmaxvq_f64(__a); }
+
+__ai int8_t vminv_s8(int8x8_t __a) {
+ return (int8_t)__builtin_neon_vminv_s8(__a); }
+__ai int16_t vminv_s16(int16x4_t __a) {
+ return (int16_t)__builtin_neon_vminv_s16(__a); }
+__ai int32_t vminv_s32(int32x2_t __a) {
+ return (int32_t)__builtin_neon_vminv_s32(__a); }
+__ai float32_t vminv_f32(float32x2_t __a) {
+ return (float32_t)__builtin_neon_vminv_f32(__a); }
+__ai uint8_t vminv_u8(uint8x8_t __a) {
+ return (uint8_t)__builtin_neon_vminv_u8((int8x8_t)__a); }
+__ai uint16_t vminv_u16(uint16x4_t __a) {
+ return (uint16_t)__builtin_neon_vminv_u16((int16x4_t)__a); }
+__ai uint32_t vminv_u32(uint32x2_t __a) {
+ return (uint32_t)__builtin_neon_vminv_u32((int32x2_t)__a); }
+__ai int8_t vminvq_s8(int8x16_t __a) {
+ return (int8_t)__builtin_neon_vminvq_s8(__a); }
+__ai int16_t vminvq_s16(int16x8_t __a) {
+ return (int16_t)__builtin_neon_vminvq_s16(__a); }
+__ai int32_t vminvq_s32(int32x4_t __a) {
+ return (int32_t)__builtin_neon_vminvq_s32(__a); }
+__ai uint8_t vminvq_u8(uint8x16_t __a) {
+ return (uint8_t)__builtin_neon_vminvq_u8((int8x16_t)__a); }
+__ai uint16_t vminvq_u16(uint16x8_t __a) {
+ return (uint16_t)__builtin_neon_vminvq_u16((int16x8_t)__a); }
+__ai uint32_t vminvq_u32(uint32x4_t __a) {
+ return (uint32_t)__builtin_neon_vminvq_u32((int32x4_t)__a); }
+__ai float32_t vminvq_f32(float32x4_t __a) {
+ return (float32_t)__builtin_neon_vminvq_f32(__a); }
+__ai float64_t vminvq_f64(float64x2_t __a) {
+ return (float64_t)__builtin_neon_vminvq_f64(__a); }
+
+__ai int16x8_t vmlal_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
+ return vmlal_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
+__ai int32x4_t vmlal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
+ return vmlal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
+__ai int64x2_t vmlal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
+ return vmlal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
+__ai uint16x8_t vmlal_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return vmlal_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
+__ai uint32x4_t vmlal_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vmlal_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
+__ai uint64x2_t vmlal_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vmlal_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
+
+#define vmlal_high_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ __a + vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_high_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ __a + vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlal_high_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
+ __a + vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_high_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
+ __a + vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vmlal_high_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ __a + vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_high_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ __a + vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlal_high_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
+ __a + vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_high_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
+ __a + vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vmlal_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
+ return vmlal_n_s16(__a, vget_high_s16(__b), __c); }
+__ai int64x2_t vmlal_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
+ return vmlal_n_s32(__a, vget_high_s32(__b), __c); }
+__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __a, uint16x8_t __b, uint16_t __c) {
+ return vmlal_n_u16(__a, vget_high_u16(__b), __c); }
+__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __a, uint32x4_t __b, uint32_t __c) {
+ return vmlal_n_u32(__a, vget_high_u32(__b), __c); }
+
+#define vmlal_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ __a + vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ __a + vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlal_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
+ __a + vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlal_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
+ __a + vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vmla_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmla_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmla_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmla_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmla_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlaq_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlaq_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlaq_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlaq_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlaq_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
+ __a + (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+
+__ai int16x8_t vmlsl_high_s8(int16x8_t __a, int8x16_t __b, int8x16_t __c) {
+ return vmlsl_s8(__a, vget_high_s8(__b), vget_high_s8(__c)); }
+__ai int32x4_t vmlsl_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
+ return vmlsl_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
+__ai int64x2_t vmlsl_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
+ return vmlsl_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
+__ai uint16x8_t vmlsl_high_u8(uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return vmlsl_u8(__a, vget_high_u8(__b), vget_high_u8(__c)); }
+__ai uint32x4_t vmlsl_high_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vmlsl_u16(__a, vget_high_u16(__b), vget_high_u16(__c)); }
+__ai uint64x2_t vmlsl_high_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vmlsl_u32(__a, vget_high_u32(__b), vget_high_u32(__c)); }
+
+#define vmlsl_high_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ __a - vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_high_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ __a - vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsl_high_lane_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x4_t __c = (c); \
+ __a - vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_high_lane_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x2_t __c = (c); \
+ __a - vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vmlsl_high_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ __a - vmull_s16(vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_high_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ __a - vmull_s32(vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsl_high_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
+ __a - vmull_u16(vget_high_u16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_high_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
+ __a - vmull_u32(vget_high_u32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vmlsl_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
+ return vmlsl_n_s16(__a, vget_high_s16(__b), __c); }
+__ai int64x2_t vmlsl_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
+ return vmlsl_n_s32(__a, vget_high_s32(__b), __c); }
+__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __a, uint16x8_t __b, uint16_t __c) {
+ return vmlsl_n_u16(__a, vget_high_u16(__b), __c); }
+__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __a, uint32x4_t __b, uint32_t __c) {
+ return vmlsl_n_u32(__a, vget_high_u32(__b), __c); }
+
+#define vmlsl_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ __a - vmull_s16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ __a - vmull_s32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsl_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
+ __a - vmull_u16(__b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsl_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint64x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
+ __a - vmull_u32(__b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vmls_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int16x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmls_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int32x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmls_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x4_t __b = (b); uint16x8_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmls_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x2_t __b = (b); uint32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmls_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); float32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d)); })
+#define vmlsq_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlsq_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsq_laneq_u16(a, b, c, __d) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); uint16x8_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d, __d, __d, __d, __d)); })
+#define vmlsq_laneq_u32(a, b, c, __d) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); uint32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vmlsq_laneq_f32(a, b, c, __d) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); float32x4_t __c = (c); \
+ __a - (__b * __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+
+
+
+#define vmull_high_lane_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x4_t __b = (b); \
+ vmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_high_lane_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x2_t __b = (b); \
+ vmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmull_high_lane_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x4_t __b = (b); \
+ vmull_u16(vget_high_u16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_high_lane_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x2_t __b = (b); \
+ vmull_u32(vget_high_u32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vmull_high_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ vmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_high_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ vmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmull_high_laneq_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ vmull_u16(vget_high_u16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_high_laneq_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ vmull_u32(vget_high_u32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+
+__ai int32x4_t vmull_high_n_s16(int16x8_t __a, int16_t __b) {
+ return vmull_n_s16(vget_high_s16(__a), __b); }
+__ai int64x2_t vmull_high_n_s32(int32x4_t __a, int32_t __b) {
+ return vmull_n_s32(vget_high_s32(__a), __b); }
+__ai uint32x4_t vmull_high_n_u16(uint16x8_t __a, uint16_t __b) {
+ return vmull_n_u16(vget_high_u16(__a), __b); }
+__ai uint64x2_t vmull_high_n_u32(uint32x4_t __a, uint32_t __b) {
+ return vmull_n_u32(vget_high_u32(__a), __b); }
+
+#define vmull_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x8_t __b = (b); \
+ vmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x4_t __b = (b); \
+ vmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmull_laneq_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x8_t __b = (b); \
+ vmull_u16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmull_laneq_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x4_t __b = (b); \
+ vmull_u32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vmulx_lane_f32(a, b, __c) __extension__ ({ \
+ float32x2_t __a = (a); float32x2_t __b = (b); \
+ vmulx_f32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmulxq_lane_f32(a, b, __c) __extension__ ({ \
+ float32x4_t __a = (a); float32x2_t __b = (b); \
+ vmulxq_f32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmulxq_lane_f64(a, b, __c) __extension__ ({ \
+ float64x2_t __a = (a); float64x1_t __b = (b); \
+ vmulxq_f64(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vmulx_laneq_f32(a, b, __c) __extension__ ({ \
+ float32x2_t __a = (a); float32x4_t __b = (b); \
+ vmulx_f32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vmulxq_laneq_f32(a, b, __c) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); \
+ vmulxq_f32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vmulxq_laneq_f64(a, b, __c) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); \
+ vmulxq_f64(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vmul_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x8_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmul_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmul_laneq_f32(a, b, __c) __extension__ ({ \
+ float32x2_t __a = (a); float32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmul_laneq_u16(a, b, __c) __extension__ ({ \
+ uint16x4_t __a = (a); uint16x8_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmul_laneq_u32(a, b, __c) __extension__ ({ \
+ uint32x2_t __a = (a); uint32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+#define vmulq_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
+#define vmulq_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmulq_laneq_f32(a, b, __c) __extension__ ({ \
+ float32x4_t __a = (a); float32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmulq_laneq_u16(a, b, __c) __extension__ ({ \
+ uint16x8_t __a = (a); uint16x8_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c); })
+#define vmulq_laneq_u32(a, b, __c) __extension__ ({ \
+ uint32x4_t __a = (a); uint32x4_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c, __c, __c); })
+#define vmulq_laneq_f64(a, b, __c) __extension__ ({ \
+ float64x2_t __a = (a); float64x2_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+
+#define vmulq_lane_f64(a, b, __c) __extension__ ({ \
+ float64x2_t __a = (a); float64x1_t __b = (b); \
+ __a * __builtin_shufflevector(__b, __b, __c, __c); })
+
+__ai float64x2_t vmulq_n_f64(float64x2_t __a, float64_t __b) {
+ return __a * (float64x2_t){ __b, __b }; }
+
+__ai int32x4_t vqdmlal_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
+ return vqdmlal_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
+__ai int64x2_t vqdmlal_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
+ return vqdmlal_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
+
+#define vqdmlal_high_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ vqdmlal_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlal_high_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ vqdmlal_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vqdmlal_high_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ vqdmlal_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlal_high_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ vqdmlal_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
+ return vqdmlal_n_s16(__a, vget_high_s16(__b), __c); }
+__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
+ return vqdmlal_n_s32(__a, vget_high_s32(__b), __c); }
+
+#define vqdmlal_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ vqdmlal_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlal_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ vqdmlal_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vqdmlsl_high_s16(int32x4_t __a, int16x8_t __b, int16x8_t __c) {
+ return vqdmlsl_s16(__a, vget_high_s16(__b), vget_high_s16(__c)); }
+__ai int64x2_t vqdmlsl_high_s32(int64x2_t __a, int32x4_t __b, int32x4_t __c) {
+ return vqdmlsl_s32(__a, vget_high_s32(__b), vget_high_s32(__c)); }
+
+#define vqdmlsl_high_lane_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x4_t __c = (c); \
+ vqdmlsl_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlsl_high_lane_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x2_t __c = (c); \
+ vqdmlsl_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vqdmlsl_high_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x8_t __b = (b); int16x8_t __c = (c); \
+ vqdmlsl_s16(__a, vget_high_s16(__b), __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlsl_high_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x4_t __b = (b); int32x4_t __c = (c); \
+ vqdmlsl_s32(__a, vget_high_s32(__b), __builtin_shufflevector(__c, __c, __d, __d)); })
+
+__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __a, int16x8_t __b, int16_t __c) {
+ return vqdmlsl_n_s16(__a, vget_high_s16(__b), __c); }
+__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __a, int32x4_t __b, int32_t __c) {
+ return vqdmlsl_n_s32(__a, vget_high_s32(__b), __c); }
+
+#define vqdmlsl_laneq_s16(a, b, c, __d) __extension__ ({ \
+ int32x4_t __a = (a); int16x4_t __b = (b); int16x8_t __c = (c); \
+ vqdmlsl_s16(__a, __b, __builtin_shufflevector(__c, __c, __d, __d, __d, __d)); })
+#define vqdmlsl_laneq_s32(a, b, c, __d) __extension__ ({ \
+ int64x2_t __a = (a); int32x2_t __b = (b); int32x4_t __c = (c); \
+ vqdmlsl_s32(__a, __b, __builtin_shufflevector(__c, __c, __d, __d)); })
+
+#define vqdmulh_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x8_t __b = (b); \
+ vqdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmulh_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x4_t __b = (b); \
+ vqdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vqdmulhq_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ vqdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
+#define vqdmulhq_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ vqdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+
+__ai int32x4_t vqdmull_high_s16(int16x8_t __a, int16x8_t __b) {
+ return vqdmull_s16(vget_high_s16(__a), vget_high_s16(__b)); }
+__ai int64x2_t vqdmull_high_s32(int32x4_t __a, int32x4_t __b) {
+ return vqdmull_s32(vget_high_s32(__a), vget_high_s32(__b)); }
+
+#define vqdmull_high_lane_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x4_t __b = (b); \
+ vqdmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmull_high_lane_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x2_t __b = (b); \
+ vqdmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vqdmull_high_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ vqdmull_s16(vget_high_s16(__a), __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmull_high_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ vqdmull_s32(vget_high_s32(__a), __builtin_shufflevector(__b, __b, __c, __c)); })
+
+__ai int32x4_t vqdmull_high_n_s16(int16x8_t __a, int16_t __b) {
+ return vqdmull_n_s16(vget_high_s16(__a), __b); }
+__ai int64x2_t vqdmull_high_n_s32(int32x4_t __a, int32_t __b) {
+ return vqdmull_n_s32(vget_high_s32(__a), __b); }
+
+#define vqdmull_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x8_t __b = (b); \
+ vqdmull_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqdmull_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x4_t __b = (b); \
+ vqdmull_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+
+#define vqrdmulh_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x4_t __a = (a); int16x8_t __b = (b); \
+ vqrdmulh_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+#define vqrdmulh_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x2_t __a = (a); int32x4_t __b = (b); \
+ vqrdmulh_s32(__a, __builtin_shufflevector(__b, __b, __c, __c)); })
+#define vqrdmulhq_laneq_s16(a, b, __c) __extension__ ({ \
+ int16x8_t __a = (a); int16x8_t __b = (b); \
+ vqrdmulhq_s16(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c, __c, __c, __c, __c)); })
+#define vqrdmulhq_laneq_s32(a, b, __c) __extension__ ({ \
+ int32x4_t __a = (a); int32x4_t __b = (b); \
+ vqrdmulhq_s32(__a, __builtin_shufflevector(__b, __b, __c, __c, __c, __c)); })
+
+__ai uint8x8_t vqtbl1_u8(uint8x16_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqtbl1_v((int8x16_t)__a, (int8x8_t)__b, 16); }
+__ai int8x8_t vqtbl1_s8(int8x16_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqtbl1_v(__a, __b, 0); }
+__ai poly8x8_t vqtbl1_p8(poly8x16_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vqtbl1_v((int8x16_t)__a, (int8x8_t)__b, 4); }
+__ai uint8x16_t vqtbl1q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqtbl1q_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+__ai int8x16_t vqtbl1q_s8(int8x16_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqtbl1q_v(__a, __b, 32); }
+__ai poly8x16_t vqtbl1q_p8(poly8x16_t __a, uint8x16_t __b) {
+ return (poly8x16_t)__builtin_neon_vqtbl1q_v((int8x16_t)__a, (int8x16_t)__b, 36); }
+
+__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqtbl2_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x8_t)__b, 16); }
+__ai int8x8_t vqtbl2_s8(int8x16x2_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqtbl2_v(__a.val[0], __a.val[1], __b, 0); }
+__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vqtbl2_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x8_t)__b, 4); }
+__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqtbl2q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__b, 48); }
+__ai int8x16_t vqtbl2q_s8(int8x16x2_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqtbl2q_v(__a.val[0], __a.val[1], __b, 32); }
+__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __a, uint8x16_t __b) {
+ return (poly8x16_t)__builtin_neon_vqtbl2q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__b, 36); }
+
+__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqtbl3_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x8_t)__b, 16); }
+__ai int8x8_t vqtbl3_s8(int8x16x3_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqtbl3_v(__a.val[0], __a.val[1], __a.val[2], __b, 0); }
+__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vqtbl3_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x8_t)__b, 4); }
+__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqtbl3q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__b, 48); }
+__ai int8x16_t vqtbl3q_s8(int8x16x3_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqtbl3q_v(__a.val[0], __a.val[1], __a.val[2], __b, 32); }
+__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __a, uint8x16_t __b) {
+ return (poly8x16_t)__builtin_neon_vqtbl3q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__b, 36); }
+
+__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __a, uint8x8_t __b) {
+ return (uint8x8_t)__builtin_neon_vqtbl4_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x8_t)__b, 16); }
+__ai int8x8_t vqtbl4_s8(int8x16x4_t __a, int8x8_t __b) {
+ return (int8x8_t)__builtin_neon_vqtbl4_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 0); }
+__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __a, uint8x8_t __b) {
+ return (poly8x8_t)__builtin_neon_vqtbl4_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x8_t)__b, 4); }
+__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vqtbl4q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x16_t)__b, 48); }
+__ai int8x16_t vqtbl4q_s8(int8x16x4_t __a, int8x16_t __b) {
+ return (int8x16_t)__builtin_neon_vqtbl4q_v(__a.val[0], __a.val[1], __a.val[2], __a.val[3], __b, 32); }
+__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __a, uint8x16_t __b) {
+ return (poly8x16_t)__builtin_neon_vqtbl4q_v((int8x16_t)__a.val[0], (int8x16_t)__a.val[1], (int8x16_t)__a.val[2], (int8x16_t)__a.val[3], (int8x16_t)__b, 36); }
+
+__ai uint8x8_t vqtbx1_u8(uint8x8_t __a, uint8x16_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vqtbx1_v((int8x8_t)__a, (int8x16_t)__b, (int8x8_t)__c, 16); }
+__ai int8x8_t vqtbx1_s8(int8x8_t __a, int8x16_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vqtbx1_v(__a, __b, __c, 0); }
+__ai poly8x8_t vqtbx1_p8(poly8x8_t __a, poly8x16_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vqtbx1_v((int8x8_t)__a, (int8x16_t)__b, (int8x8_t)__c, 4); }
+__ai uint8x16_t vqtbx1q_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) {
+ return (uint8x16_t)__builtin_neon_vqtbx1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 48); }
+__ai int8x16_t vqtbx1q_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c) {
+ return (int8x16_t)__builtin_neon_vqtbx1q_v(__a, __b, __c, 32); }
+__ai poly8x16_t vqtbx1q_p8(poly8x16_t __a, poly8x16_t __b, uint8x16_t __c) {
+ return (poly8x16_t)__builtin_neon_vqtbx1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 36); }
+
+__ai uint8x8_t vqtbx2_u8(uint8x8_t __a, uint8x16x2_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vqtbx2_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x8_t)__c, 16); }
+__ai int8x8_t vqtbx2_s8(int8x8_t __a, int8x16x2_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vqtbx2_v(__a, __b.val[0], __b.val[1], __c, 0); }
+__ai poly8x8_t vqtbx2_p8(poly8x8_t __a, poly8x16x2_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vqtbx2_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x8_t)__c, 4); }
+__ai uint8x16_t vqtbx2q_u8(uint8x16_t __a, uint8x16x2_t __b, uint8x16_t __c) {
+ return (uint8x16_t)__builtin_neon_vqtbx2q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__c, 48); }
+__ai int8x16_t vqtbx2q_s8(int8x16_t __a, int8x16x2_t __b, int8x16_t __c) {
+ return (int8x16_t)__builtin_neon_vqtbx2q_v(__a, __b.val[0], __b.val[1], __c, 32); }
+__ai poly8x16_t vqtbx2q_p8(poly8x16_t __a, poly8x16x2_t __b, uint8x16_t __c) {
+ return (poly8x16_t)__builtin_neon_vqtbx2q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__c, 36); }
+
+__ai uint8x8_t vqtbx3_u8(uint8x8_t __a, uint8x16x3_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vqtbx3_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x8_t)__c, 16); }
+__ai int8x8_t vqtbx3_s8(int8x8_t __a, int8x16x3_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vqtbx3_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 0); }
+__ai poly8x8_t vqtbx3_p8(poly8x8_t __a, poly8x16x3_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vqtbx3_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x8_t)__c, 4); }
+__ai uint8x16_t vqtbx3q_u8(uint8x16_t __a, uint8x16x3_t __b, uint8x16_t __c) {
+ return (uint8x16_t)__builtin_neon_vqtbx3q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__c, 48); }
+__ai int8x16_t vqtbx3q_s8(int8x16_t __a, int8x16x3_t __b, int8x16_t __c) {
+ return (int8x16_t)__builtin_neon_vqtbx3q_v(__a, __b.val[0], __b.val[1], __b.val[2], __c, 32); }
+__ai poly8x16_t vqtbx3q_p8(poly8x16_t __a, poly8x16x3_t __b, uint8x16_t __c) {
+ return (poly8x16_t)__builtin_neon_vqtbx3q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__c, 36); }
+
+__ai uint8x8_t vqtbx4_u8(uint8x8_t __a, uint8x16x4_t __b, uint8x8_t __c) {
+ return (uint8x8_t)__builtin_neon_vqtbx4_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x8_t)__c, 16); }
+__ai int8x8_t vqtbx4_s8(int8x8_t __a, int8x16x4_t __b, int8x8_t __c) {
+ return (int8x8_t)__builtin_neon_vqtbx4_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 0); }
+__ai poly8x8_t vqtbx4_p8(poly8x8_t __a, poly8x16x4_t __b, uint8x8_t __c) {
+ return (poly8x8_t)__builtin_neon_vqtbx4_v((int8x8_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x8_t)__c, 4); }
+__ai uint8x16_t vqtbx4q_u8(uint8x16_t __a, uint8x16x4_t __b, uint8x16_t __c) {
+ return (uint8x16_t)__builtin_neon_vqtbx4q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x16_t)__c, 48); }
+__ai int8x16_t vqtbx4q_s8(int8x16_t __a, int8x16x4_t __b, int8x16_t __c) {
+ return (int8x16_t)__builtin_neon_vqtbx4q_v(__a, __b.val[0], __b.val[1], __b.val[2], __b.val[3], __c, 32); }
+__ai poly8x16_t vqtbx4q_p8(poly8x16_t __a, poly8x16x4_t __b, uint8x16_t __c) {
+ return (poly8x16_t)__builtin_neon_vqtbx4q_v((int8x16_t)__a, (int8x16_t)__b.val[0], (int8x16_t)__b.val[1], (int8x16_t)__b.val[2], (int8x16_t)__b.val[3], (int8x16_t)__c, 36); }
+
+__ai int8x16_t vraddhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return vcombine_s8(__a, vraddhn_s16(__b, __c)); }
+__ai int16x8_t vraddhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return vcombine_s16(__a, vraddhn_s32(__b, __c)); }
+__ai int32x4_t vraddhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
+ return vcombine_s32(__a, vraddhn_s64(__b, __c)); }
+__ai uint8x16_t vraddhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vcombine_u8(__a, vraddhn_u16(__b, __c)); }
+__ai uint16x8_t vraddhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vcombine_u16(__a, vraddhn_u32(__b, __c)); }
+__ai uint32x4_t vraddhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
+ return vcombine_u32(__a, vraddhn_u64(__b, __c)); }
+
+__ai int8x16_t vrsubhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return vcombine_s8(__a, vrsubhn_s16(__b, __c)); }
+__ai int16x8_t vrsubhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return vcombine_s16(__a, vrsubhn_s32(__b, __c)); }
+__ai int32x4_t vrsubhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
+ return vcombine_s32(__a, vrsubhn_s64(__b, __c)); }
+__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vcombine_u8(__a, vrsubhn_u16(__b, __c)); }
+__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vcombine_u16(__a, vrsubhn_u32(__b, __c)); }
+__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
+ return vcombine_u32(__a, vrsubhn_u64(__b, __c)); }
+
+__ai int8x16_t vsubhn_high_s16(int8x8_t __a, int16x8_t __b, int16x8_t __c) {
+ return vcombine_s8(__a, vsubhn_s16(__b, __c)); }
+__ai int16x8_t vsubhn_high_s32(int16x4_t __a, int32x4_t __b, int32x4_t __c) {
+ return vcombine_s16(__a, vsubhn_s32(__b, __c)); }
+__ai int32x4_t vsubhn_high_s64(int32x2_t __a, int64x2_t __b, int64x2_t __c) {
+ return vcombine_s32(__a, vsubhn_s64(__b, __c)); }
+__ai uint8x16_t vsubhn_high_u16(uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) {
+ return vcombine_u8(__a, vsubhn_u16(__b, __c)); }
+__ai uint16x8_t vsubhn_high_u32(uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return vcombine_u16(__a, vsubhn_u32(__b, __c)); }
+__ai uint32x4_t vsubhn_high_u64(uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) {
+ return vcombine_u32(__a, vsubhn_u64(__b, __c)); }
+
+__ai int16x8_t vsubl_high_s8(int8x16_t __a, int8x16_t __b) {
+ return vmovl_high_s8(__a) - vmovl_high_s8(__b); }
+__ai int32x4_t vsubl_high_s16(int16x8_t __a, int16x8_t __b) {
+ return vmovl_high_s16(__a) - vmovl_high_s16(__b); }
+__ai int64x2_t vsubl_high_s32(int32x4_t __a, int32x4_t __b) {
+ return vmovl_high_s32(__a) - vmovl_high_s32(__b); }
+__ai uint16x8_t vsubl_high_u8(uint8x16_t __a, uint8x16_t __b) {
+ return vmovl_high_u8(__a) - vmovl_high_u8(__b); }
+__ai uint32x4_t vsubl_high_u16(uint16x8_t __a, uint16x8_t __b) {
+ return vmovl_high_u16(__a) - vmovl_high_u16(__b); }
+__ai uint64x2_t vsubl_high_u32(uint32x4_t __a, uint32x4_t __b) {
+ return vmovl_high_u32(__a) - vmovl_high_u32(__b); }
+
+__ai int16x8_t vsubw_high_s8(int16x8_t __a, int8x16_t __b) {
+ return __a - vmovl_high_s8(__b); }
+__ai int32x4_t vsubw_high_s16(int32x4_t __a, int16x8_t __b) {
+ return __a - vmovl_high_s16(__b); }
+__ai int64x2_t vsubw_high_s32(int64x2_t __a, int32x4_t __b) {
+ return __a - vmovl_high_s32(__b); }
+__ai uint16x8_t vsubw_high_u8(uint16x8_t __a, uint8x16_t __b) {
+ return __a - vmovl_high_u8(__b); }
+__ai uint32x4_t vsubw_high_u16(uint32x4_t __a, uint16x8_t __b) {
+ return __a - vmovl_high_u16(__b); }
+__ai uint64x2_t vsubw_high_u32(uint64x2_t __a, uint32x4_t __b) {
+ return __a - vmovl_high_u32(__b); }
+
+__ai int8x8_t vtrn1_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai int16x4_t vtrn1_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai int32x2_t vtrn1_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x8_t vtrn1_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai uint16x4_t vtrn1_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai uint32x2_t vtrn1_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x2_t vtrn1_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x8_t vtrn1_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai poly16x4_t vtrn1_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai int8x16_t vtrn1q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
+__ai int16x8_t vtrn1q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai int32x4_t vtrn1q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai int64x2_t vtrn1q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x16_t vtrn1q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
+__ai uint16x8_t vtrn1q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai uint32x4_t vtrn1q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai uint64x2_t vtrn1q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x4_t vtrn1q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 2, 6); }
+__ai float64x2_t vtrn1q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x16_t vtrn1q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); }
+__ai poly16x8_t vtrn1q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 2, 10, 4, 12, 6, 14); }
+__ai poly64x2_t vtrn1q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+
+__ai int8x8_t vtrn2_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai int16x4_t vtrn2_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai int32x2_t vtrn2_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x8_t vtrn2_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai uint16x4_t vtrn2_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai uint32x2_t vtrn2_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x2_t vtrn2_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x8_t vtrn2_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai poly16x4_t vtrn2_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai int8x16_t vtrn2q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
+__ai int16x8_t vtrn2q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai int32x4_t vtrn2q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai int64x2_t vtrn2q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x16_t vtrn2q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
+__ai uint16x8_t vtrn2q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai uint32x4_t vtrn2q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai uint64x2_t vtrn2q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x4_t vtrn2q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 5, 3, 7); }
+__ai float64x2_t vtrn2q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x16_t vtrn2q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); }
+__ai poly16x8_t vtrn2q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 9, 3, 11, 5, 13, 7, 15); }
+__ai poly64x2_t vtrn2q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+
+__ai int8x8_t vuzp1_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai int16x4_t vuzp1_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai int32x2_t vuzp1_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x8_t vuzp1_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai uint16x4_t vuzp1_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai uint32x2_t vuzp1_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x2_t vuzp1_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x8_t vuzp1_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai poly16x4_t vuzp1_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai int8x16_t vuzp1q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
+__ai int16x8_t vuzp1q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai int32x4_t vuzp1q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai int64x2_t vuzp1q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x16_t vuzp1q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
+__ai uint16x8_t vuzp1q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai uint32x4_t vuzp1q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai uint64x2_t vuzp1q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x4_t vuzp1q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6); }
+__ai float64x2_t vuzp1q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x16_t vuzp1q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); }
+__ai poly16x8_t vuzp1q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2, 4, 6, 8, 10, 12, 14); }
+__ai poly64x2_t vuzp1q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+
+__ai int8x8_t vuzp2_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai int16x4_t vuzp2_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai int32x2_t vuzp2_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x8_t vuzp2_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai uint16x4_t vuzp2_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai uint32x2_t vuzp2_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x2_t vuzp2_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x8_t vuzp2_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai poly16x4_t vuzp2_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai int8x16_t vuzp2q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
+__ai int16x8_t vuzp2q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai int32x4_t vuzp2q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai int64x2_t vuzp2q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x16_t vuzp2q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
+__ai uint16x8_t vuzp2q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai uint32x4_t vuzp2q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai uint64x2_t vuzp2q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x4_t vuzp2q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7); }
+__ai float64x2_t vuzp2q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x16_t vuzp2q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); }
+__ai poly16x8_t vuzp2q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3, 5, 7, 9, 11, 13, 15); }
+__ai poly64x2_t vuzp2q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+
+__ai int8x8_t vzip1_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai int16x4_t vzip1_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai int32x2_t vzip1_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x8_t vzip1_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai uint16x4_t vzip1_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai uint32x2_t vzip1_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x2_t vzip1_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x8_t vzip1_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai poly16x4_t vzip1_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai int8x16_t vzip1q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
+__ai int16x8_t vzip1q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai int32x4_t vzip1q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai int64x2_t vzip1q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai uint8x16_t vzip1q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
+__ai uint16x8_t vzip1q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai uint32x4_t vzip1q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai uint64x2_t vzip1q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai float32x4_t vzip1q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); }
+__ai float64x2_t vzip1q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+__ai poly8x16_t vzip1q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); }
+__ai poly16x8_t vzip1q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 8, 1, 9, 2, 10, 3, 11); }
+__ai poly64x2_t vzip1q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 0, 2); }
+
+__ai int8x8_t vzip2_s8(int8x8_t __a, int8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai int16x4_t vzip2_s16(int16x4_t __a, int16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai int32x2_t vzip2_s32(int32x2_t __a, int32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x8_t vzip2_u8(uint8x8_t __a, uint8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai uint16x4_t vzip2_u16(uint16x4_t __a, uint16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai uint32x2_t vzip2_u32(uint32x2_t __a, uint32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x2_t vzip2_f32(float32x2_t __a, float32x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x8_t vzip2_p8(poly8x8_t __a, poly8x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai poly16x4_t vzip2_p16(poly16x4_t __a, poly16x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai int8x16_t vzip2q_s8(int8x16_t __a, int8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
+__ai int16x8_t vzip2q_s16(int16x8_t __a, int16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai int32x4_t vzip2q_s32(int32x4_t __a, int32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai int64x2_t vzip2q_s64(int64x2_t __a, int64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai uint8x16_t vzip2q_u8(uint8x16_t __a, uint8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
+__ai uint16x8_t vzip2q_u16(uint16x8_t __a, uint16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai uint32x4_t vzip2q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai uint64x2_t vzip2q_u64(uint64x2_t __a, uint64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai float32x4_t vzip2q_f32(float32x4_t __a, float32x4_t __b) {
+ return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); }
+__ai float64x2_t vzip2q_f64(float64x2_t __a, float64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+__ai poly8x16_t vzip2q_p8(poly8x16_t __a, poly8x16_t __b) {
+ return __builtin_shufflevector(__a, __b, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); }
+__ai poly16x8_t vzip2q_p16(poly16x8_t __a, poly16x8_t __b) {
+ return __builtin_shufflevector(__a, __b, 4, 12, 5, 13, 6, 14, 7, 15); }
+__ai poly64x2_t vzip2q_p64(poly64x2_t __a, poly64x2_t __b) {
+ return __builtin_shufflevector(__a, __b, 1, 3); }
+
+__ai int8x16_t vmovn_high_s16(int8x8_t __a, int16x8_t __b) {
+ int8x8_t __a1 = vmovn_s16(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai int16x8_t vmovn_high_s32(int16x4_t __a, int32x4_t __b) {
+ int16x4_t __a1 = vmovn_s32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai int32x4_t vmovn_high_s64(int32x2_t __a, int64x2_t __b) {
+ int32x2_t __a1 = vmovn_s64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+__ai uint8x16_t vmovn_high_u16(uint8x8_t __a, uint16x8_t __b) {
+ uint8x8_t __a1 = vmovn_u16(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); }
+__ai uint16x8_t vmovn_high_u32(uint16x4_t __a, uint32x4_t __b) {
+ uint16x4_t __a1 = vmovn_u32(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3, 4, 5, 6, 7); }
+__ai uint32x4_t vmovn_high_u64(uint32x2_t __a, uint64x2_t __b) {
+ uint32x2_t __a1 = vmovn_u64(__b);
+ return __builtin_shufflevector(__a, __a1, 0, 1, 2, 3); }
+
+#ifdef __ARM_FEATURE_CRYPTO
+__ai uint8x16_t vaesdq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vaesdq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+
+__ai uint8x16_t vaeseq_u8(uint8x16_t __a, uint8x16_t __b) {
+ return (uint8x16_t)__builtin_neon_vaeseq_v((int8x16_t)__a, (int8x16_t)__b, 48); }
+
+__ai uint8x16_t vaesimcq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vaesimcq_v((int8x16_t)__a, 48); }
+
+__ai uint8x16_t vaesmcq_u8(uint8x16_t __a) {
+ return (uint8x16_t)__builtin_neon_vaesmcq_v((int8x16_t)__a, 48); }
+
+__ai uint32x4_t vsha1cq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha1cq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
+
+__ai uint32_t vsha1h_u32(uint32_t __a) {
+ return (uint32_t)__builtin_neon_vsha1h_u32(__a); }
+
+__ai uint32x4_t vsha1mq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha1mq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
+
+__ai uint32x4_t vsha1pq_u32(uint32x4_t __a, uint32_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha1pq_u32((int32x4_t)__a, __b, (int32x4_t)__c); }
+
+__ai uint32x4_t vsha1su0q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha1su0q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
+
+__ai uint32x4_t vsha1su1q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vsha1su1q_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint32x4_t vsha256hq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha256hq_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
+
+__ai uint32x4_t vsha256h2q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha256h2q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
+
+__ai uint32x4_t vsha256su0q_u32(uint32x4_t __a, uint32x4_t __b) {
+ return (uint32x4_t)__builtin_neon_vsha256su0q_v((int8x16_t)__a, (int8x16_t)__b, 50); }
+
+__ai uint32x4_t vsha256su1q_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) {
+ return (uint32x4_t)__builtin_neon_vsha256su1q_v((int8x16_t)__a, (int8x16_t)__b, (int8x16_t)__c, 50); }
+
+#endif
+
+#endif
+
+#undef __ai
+
+#endif /* __ARM_NEON_H */
diff --git a/renderscript/clang-include/avx2intrin.h b/renderscript/clang-include/avx2intrin.h
index 1887fc8..9574469 100644
--- a/renderscript/clang-include/avx2intrin.h
+++ b/renderscript/clang-include/avx2intrin.h
@@ -753,9 +753,9 @@ _mm256_broadcastsd_pd(__m128d __X)
}
static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
-_mm_broadcastsi128_si256(__m128i const *__a)
+_mm256_broadcastsi128_si256(__m128i __X)
{
- return (__m256i)__builtin_ia32_vbroadcastsi256(__a);
+ return (__m256i)__builtin_ia32_vbroadcastsi256(__X);
}
#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
@@ -1061,7 +1061,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
__m128i __a = (a); \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
__m128i __mask = (mask); \
(__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \
@@ -1069,7 +1069,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
__m256i __a = (a); \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
__m256i __mask = (mask); \
(__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \
@@ -1077,7 +1077,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
__m128i __a = (a); \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
__m128i __mask = (mask); \
(__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \
@@ -1085,7 +1085,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
__m256i __a = (a); \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m256i __i = (i); \
__m256i __mask = (mask); \
(__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \
@@ -1176,28 +1176,28 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
(__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
(__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \
(const __v2di *)__m, (__v4si)__i, \
(__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
(__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \
(const __v4di *)__m, (__v4si)__i, \
(__v4di)_mm256_set1_epi64x(-1), (s)); })
#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m128i __i = (i); \
(__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \
(const __v2di *)__m, (__v2di)__i, \
(__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
- int const *__m = (m); \
+ long long const *__m = (m); \
__m256i __i = (i); \
(__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \
(const __v4di *)__m, (__v4di)__i, \
diff --git a/renderscript/clang-include/avxintrin.h b/renderscript/clang-include/avxintrin.h
index 50454f2..141c4d9 100644
--- a/renderscript/clang-include/avxintrin.h
+++ b/renderscript/clang-include/avxintrin.h
@@ -435,21 +435,21 @@ static __inline int __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi32(__m256i __a, int const __imm)
{
__v8si __b = (__v8si)__a;
- return __b[__imm];
+ return __b[__imm & 7];
}
static __inline int __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi16(__m256i __a, int const __imm)
{
__v16hi __b = (__v16hi)__a;
- return __b[__imm];
+ return __b[__imm & 15];
}
static __inline int __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi8(__m256i __a, int const __imm)
{
__v32qi __b = (__v32qi)__a;
- return __b[__imm];
+ return __b[__imm & 31];
}
#ifdef __x86_64__
@@ -457,7 +457,7 @@ static __inline long long __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi64(__m256i __a, const int __imm)
{
__v4di __b = (__v4di)__a;
- return __b[__imm];
+ return __b[__imm & 3];
}
#endif
diff --git a/renderscript/clang-include/emmintrin.h b/renderscript/clang-include/emmintrin.h
index f965dce..b3f8569 100644
--- a/renderscript/clang-include/emmintrin.h
+++ b/renderscript/clang-include/emmintrin.h
@@ -826,7 +826,9 @@ _mm_xor_si128(__m128i __a, __m128i __b)
}
#define _mm_slli_si128(a, count) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128i __a = (a); \
+ _Pragma("clang diagnostic pop"); \
(__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -891,7 +893,9 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
#define _mm_srli_si128(a, count) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128i __a = (a); \
+ _Pragma("clang diagnostic pop"); \
(__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -1214,6 +1218,14 @@ _mm_stream_si32(int *__p, int __a)
__builtin_ia32_movnti(__p, __a);
}
+#ifdef __x86_64__
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_si64(long long *__p, long long __a)
+{
+ __builtin_ia32_movnti64(__p, __a);
+}
+#endif
+
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_clflush(void const *__p)
{
@@ -1254,7 +1266,7 @@ static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_extract_epi16(__m128i __a, int __imm)
{
__v8hi __b = (__v8hi)__a;
- return (unsigned short)__b[__imm];
+ return (unsigned short)__b[__imm & 7];
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -1272,20 +1284,26 @@ _mm_movemask_epi8(__m128i __a)
}
#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128i __a = (a); \
+ _Pragma("clang diagnostic pop"); \
(__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
(imm) & 0x3, ((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128i __a = (a); \
+ _Pragma("clang diagnostic pop"); \
(__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
(imm) & 0x3, ((imm) & 0xc) >> 2, \
((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
4, 5, 6, 7); })
#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128i __a = (a); \
+ _Pragma("clang diagnostic pop"); \
(__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
0, 1, 2, 3, \
4 + (((imm) & 0x03) >> 0), \
@@ -1348,7 +1366,7 @@ _mm_movepi64_pi64(__m128i __a)
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_movpi64_pi64(__m64 __a)
+_mm_movpi64_epi64(__m64 __a)
{
return (__m128i){ (long long)__a, 0 };
}
@@ -1378,8 +1396,10 @@ _mm_movemask_pd(__m128d __a)
}
#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \
__m128d __a = (a); \
__m128d __b = (b); \
+ _Pragma("clang diagnostic pop"); \
__builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
diff --git a/renderscript/clang-include/f16cintrin.h b/renderscript/clang-include/f16cintrin.h
index a6d7812..f3614c0 100644
--- a/renderscript/clang-include/f16cintrin.h
+++ b/renderscript/clang-include/f16cintrin.h
@@ -1,6 +1,6 @@
-/*===---- f16cintrin.h - F16C intrinsics ---------------------------------===
+/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
*
- * Permission is hereby granted, free of charge, to any person obtaining __a copy
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
diff --git a/renderscript/clang-include/immintrin.h b/renderscript/clang-include/immintrin.h
index fea7c3b..15d6e05 100644
--- a/renderscript/clang-include/immintrin.h
+++ b/renderscript/clang-include/immintrin.h
@@ -111,4 +111,8 @@ _xtest(void)
}
#endif
+#ifdef __SHA__
+#include <shaintrin.h>
+#endif
+
#endif /* __IMMINTRIN_H */
diff --git a/renderscript/clang-include/limits.h b/renderscript/clang-include/limits.h
index ecd09a4..91bd404 100644
--- a/renderscript/clang-include/limits.h
+++ b/renderscript/clang-include/limits.h
@@ -87,8 +87,10 @@
#define CHAR_MAX __SCHAR_MAX__
#endif
-/* C99 5.2.4.2.1: Added long long. */
-#if __STDC_VERSION__ >= 199901
+/* C99 5.2.4.2.1: Added long long.
+ C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
+ */
+#if __STDC_VERSION__ >= 199901 || __cplusplus >= 201103L
#undef LLONG_MIN
#undef LLONG_MAX
diff --git a/renderscript/clang-include/shaintrin.h b/renderscript/clang-include/shaintrin.h
new file mode 100644
index 0000000..66ed055
--- /dev/null
+++ b/renderscript/clang-include/shaintrin.h
@@ -0,0 +1,74 @@
+/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <shaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SHAINTRIN_H
+#define __SHAINTRIN_H
+
+#if !defined (__SHA__)
+# error "SHA instructions not enabled"
+#endif
+
+#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
+ __builtin_ia32_sha1rnds4((V1), (V2), (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
+{
+ return __builtin_ia32_sha1nexte(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return __builtin_ia32_sha1msg1(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return __builtin_ia32_sha1msg2(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
+{
+ return __builtin_ia32_sha256rnds2(__X, __Y, __Z);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
+{
+ return __builtin_ia32_sha256msg1(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
+{
+ return __builtin_ia32_sha256msg2(__X, __Y);
+}
+
+#endif /* __SHAINTRIN_H */
diff --git a/renderscript/clang-include/smmintrin.h b/renderscript/clang-include/smmintrin.h
index 498f6f0..53b3ccb 100644
--- a/renderscript/clang-include/smmintrin.h
+++ b/renderscript/clang-include/smmintrin.h
@@ -197,7 +197,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
#define _mm_extract_ps(X, N) (__extension__ \
({ union { int __i; float __f; } __t; \
__v4sf __a = (__v4sf)(X); \
- __t.__f = __a[N]; \
+ __t.__f = __a[(N) & 3]; \
__t.__i;}))
/* Miscellaneous insert and extract macros. */
@@ -215,14 +215,14 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/* Insert int into packed integer array at index. */
#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- __a[(N)] = (I); \
+ __a[(N) & 15] = (I); \
__a;}))
#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[(N)] = (I); \
+ __a[(N) & 3] = (I); \
__a;}))
#ifdef __x86_64__
#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[(N)] = (I); \
+ __a[(N) & 1] = (I); \
__a;}))
#endif /* __x86_64__ */
@@ -230,12 +230,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
* as a zero extended value, so it is unsigned.
*/
#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- (unsigned char)__a[(N)];}))
+ (int)(unsigned char) \
+ __a[(N) & 15];}))
#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- (unsigned)__a[(N)];}))
+ __a[(N) & 3];}))
#ifdef __x86_64__
#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[(N)];}))
+ __a[(N) & 1];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
diff --git a/renderscript/clang-include/tbmintrin.h b/renderscript/clang-include/tbmintrin.h
new file mode 100644
index 0000000..f95e34f
--- /dev/null
+++ b/renderscript/clang-include/tbmintrin.h
@@ -0,0 +1,158 @@
+/*===---- tbmintrin.h - TBM intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TBM__
+#error "TBM instruction set is not enabled"
+#endif
+
+#ifndef __X86INTRIN_H
+#error "Never use <tbmintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __TBMINTRIN_H
+#define __TBMINTRIN_H
+
+#define __bextri_u32(a, b) (__builtin_ia32_bextri_u32((a), (b)))
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blcfill_u32(unsigned int a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blci_u32(unsigned int a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blcic_u32(unsigned int a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blcmsk_u32(unsigned int a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blcs_u32(unsigned int a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsfill_u32(unsigned int a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsic_u32(unsigned int a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__t1mskc_u32(unsigned int a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__tzmsk_u32(unsigned int a)
+{
+ return ~a & (a - 1);
+}
+
+#ifdef __x86_64__
+#define __bextri_u64(a, b) (__builtin_ia32_bextri_u64((a), (int)(b)))
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blcfill_u64(unsigned long long a)
+{
+ return a & (a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blci_u64(unsigned long long a)
+{
+ return a | ~(a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blcic_u64(unsigned long long a)
+{
+ return ~a & (a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blcmsk_u64(unsigned long long a)
+{
+ return a ^ (a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blcs_u64(unsigned long long a)
+{
+ return a | (a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blsfill_u64(unsigned long long a)
+{
+ return a | (a - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__blsic_u64(unsigned long long a)
+{
+ return ~a | (a - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__t1mskc_u64(unsigned long long a)
+{
+ return ~a | (a + 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__,
+ __nodebug__))
+__tzmsk_u64(unsigned long long a)
+{
+ return ~a & (a - 1);
+}
+#endif
+
+#endif /* __TBMINTRIN_H */
diff --git a/renderscript/clang-include/unwind.h b/renderscript/clang-include/unwind.h
index e94fd70..685c1df 100644
--- a/renderscript/clang-include/unwind.h
+++ b/renderscript/clang-include/unwind.h
@@ -27,8 +27,8 @@
#define __CLANG_UNWIND_H
#if __has_include_next(<unwind.h>)
-/* Darwin and libunwind provide an unwind.h. If that's available, use
- * it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+/* Darwin (from 11.x on) and libunwind provide an unwind.h. If that's available,
+ * use it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
* so define that around the include.*/
# ifndef _GNU_SOURCE
# define _SHOULD_UNDEFINE_GNU_SOURCE
@@ -66,7 +66,17 @@ extern "C" {
#pragma GCC visibility push(default)
#endif
+typedef uintptr_t _Unwind_Word;
+typedef intptr_t _Unwind_Sword;
+typedef uintptr_t _Unwind_Ptr;
+typedef uintptr_t _Unwind_Internal_Ptr;
+typedef uint64_t _Unwind_Exception_Class;
+
+typedef intptr_t _sleb128_t;
+typedef uintptr_t _uleb128_t;
+
struct _Unwind_Context;
+struct _Unwind_Exception;
typedef enum {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
@@ -81,8 +91,43 @@ typedef enum {
_URC_CONTINUE_UNWIND = 8
} _Unwind_Reason_Code;
+typedef enum {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8,
+ _UA_END_OF_STACK = 16 /* gcc extension to C++ ABI */
+} _Unwind_Action;
+
+typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
+ struct _Unwind_Exception *);
+
+struct _Unwind_Exception {
+ _Unwind_Exception_Class exception_class;
+ _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ _Unwind_Word private_1;
+ _Unwind_Word private_2;
+ /* The Itanium ABI requires that _Unwind_Exception objects are "double-word
+ * aligned". GCC has interpreted this to mean "use the maximum useful
+ * alignment for the target"; so do we. */
+} __attribute__((__aligned__));
-#ifdef __arm__
+typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)(int, _Unwind_Action,
+ _Unwind_Exception_Class,
+ struct _Unwind_Exception *,
+ struct _Unwind_Context *,
+ void *);
+
+typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn)(
+ int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+typedef _Unwind_Personality_Fn __personality_routine;
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
+ void *);
+
+#if defined(__arm__) && !defined(__APPLE__)
typedef enum {
_UVRSC_CORE = 0, /* integer register */
@@ -111,14 +156,116 @@ _Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context,
_Unwind_VRS_DataRepresentation __representation,
void *__valuep);
+_Unwind_VRS_Result _Unwind_VRS_Set(struct _Unwind_Context *__context,
+ _Unwind_VRS_RegClass __regclass,
+ uint32_t __regno,
+ _Unwind_VRS_DataRepresentation __representation,
+ void *__valuep);
+
+static __inline__
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *__context, int __index) {
+ _Unwind_Word __value;
+ _Unwind_VRS_Get(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+ return __value;
+}
+
+static __inline__
+void _Unwind_SetGR(struct _Unwind_Context *__context, int __index,
+ _Unwind_Word __value) {
+ _Unwind_VRS_Set(__context, _UVRSC_CORE, __index, _UVRSD_UINT32, &__value);
+}
+
+static __inline__
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *__context) {
+ _Unwind_Word __ip = _Unwind_GetGR(__context, 15);
+ return __ip & ~(_Unwind_Word)(0x1); /* Remove thumb mode bit. */
+}
+
+static __inline__
+void _Unwind_SetIP(struct _Unwind_Context *__context, _Unwind_Word __value) {
+ _Unwind_Word __thumb_mode_bit = _Unwind_GetGR(__context, 15) & 0x1;
+ _Unwind_SetGR(__context, 15, __value | __thumb_mode_bit);
+}
+#else
+_Unwind_Word _Unwind_GetGR(struct _Unwind_Context *, int);
+void _Unwind_SetGR(struct _Unwind_Context *, int, _Unwind_Word);
+
+_Unwind_Word _Unwind_GetIP(struct _Unwind_Context *);
+void _Unwind_SetIP(struct _Unwind_Context *, _Unwind_Word);
+#endif
+
+
+_Unwind_Word _Unwind_GetIPInfo(struct _Unwind_Context *, int *);
+
+_Unwind_Word _Unwind_GetCFA(struct _Unwind_Context *);
+
+void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context *);
+
+_Unwind_Ptr _Unwind_GetRegionStart(struct _Unwind_Context *);
+
+/* DWARF EH functions; currently not available on Darwin/ARM */
+#if !defined(__APPLE__) || !defined(__arm__)
+
+_Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_DeleteException(struct _Unwind_Exception *);
+void _Unwind_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+#endif
+
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
+
+/* setjmp(3)/longjmp(3) stuff */
+typedef struct SjLj_Function_Context *_Unwind_FunctionContext_t;
+
+void _Unwind_SjLj_Register(_Unwind_FunctionContext_t);
+void _Unwind_SjLj_Unregister(_Unwind_FunctionContext_t);
+_Unwind_Reason_Code _Unwind_SjLj_RaiseException(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind(struct _Unwind_Exception *,
+ _Unwind_Stop_Fn, void *);
+void _Unwind_SjLj_Resume(struct _Unwind_Exception *);
+_Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *);
+
+void *_Unwind_FindEnclosingFunction(void *);
+
+#ifdef __APPLE__
+
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *)
+ __attribute__((unavailable));
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *)
+ __attribute__((unavailable));
+
+/* Darwin-specific functions */
+void __register_frame(const void *);
+void __deregister_frame(const void *);
+
+struct dwarf_eh_bases {
+ uintptr_t tbase;
+ uintptr_t dbase;
+ uintptr_t func;
+};
+void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *);
+
+void __register_frame_info_bases(const void *, void *, void *, void *)
+ __attribute__((unavailable));
+void __register_frame_info(const void *, void *) __attribute__((unavailable));
+void __register_frame_info_table_bases(const void *, void*, void *, void *)
+ __attribute__((unavailable));
+void __register_frame_info_table(const void *, void *)
+ __attribute__((unavailable));
+void __register_frame_table(const void *) __attribute__((unavailable));
+void __deregister_frame_info(const void *) __attribute__((unavailable));
+void __deregister_frame_info_bases(const void *)__attribute__((unavailable));
+
#else
-uintptr_t _Unwind_GetIP(struct _Unwind_Context* __context);
+_Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *);
+_Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *);
#endif
-typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context*, void*);
-_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void*);
#ifndef HIDE_EXPORTS
#pragma GCC visibility pop
diff --git a/renderscript/clang-include/x86intrin.h b/renderscript/clang-include/x86intrin.h
index 94fbe2f..399016f 100644
--- a/renderscript/clang-include/x86intrin.h
+++ b/renderscript/clang-include/x86intrin.h
@@ -66,6 +66,10 @@
#include <xopintrin.h>
#endif
+#ifdef __TBM__
+#include <tbmintrin.h>
+#endif
+
#ifdef __F16C__
#include <f16cintrin.h>
#endif
diff --git a/renderscript/include/rs_core_math.rsh b/renderscript/include/rs_core_math.rsh
index 2b7c362..8fe6ad2 100644
--- a/renderscript/include/rs_core_math.rsh
+++ b/renderscript/include/rs_core_math.rsh
@@ -14,7939 +14,8099 @@
* limitations under the License.
*/
+// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
+
#ifndef __rs_core_math_rsh__
#define __rs_core_math_rsh__
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to float2
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(float2);
+extern uchar __attribute__((const, overloadable))abs(char value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to float3
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(float3);
+extern uchar2 __attribute__((const, overloadable))abs(char2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to float4
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(float4);
+extern uchar3 __attribute__((const, overloadable))abs(char3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to float2
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(double2);
+extern uchar4 __attribute__((const, overloadable))abs(char4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to float3
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(double3);
+extern ushort __attribute__((const, overloadable))abs(short value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to float4
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(double4);
+extern ushort2 __attribute__((const, overloadable))abs(short2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to float2
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(char2);
+extern ushort3 __attribute__((const, overloadable))abs(short3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to float3
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(char3);
+extern ushort4 __attribute__((const, overloadable))abs(short4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to float4
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(char4);
+extern uint __attribute__((const, overloadable))abs(int value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to float2
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(uchar2);
+extern uint2 __attribute__((const, overloadable))abs(int2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to float3
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(uchar3);
+extern uint3 __attribute__((const, overloadable))abs(int3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to float4
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(uchar4);
+extern uint4 __attribute__((const, overloadable))abs(int4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to float2
+ * acos
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(short2);
+extern float __attribute__((const, overloadable))acos(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to float3
+ * acos
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(short3);
+extern float2 __attribute__((const, overloadable))acos(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to float4
+ * acos
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(short4);
+extern float3 __attribute__((const, overloadable))acos(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to float2
+ * acos
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(ushort2);
+extern float4 __attribute__((const, overloadable))acos(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to float3
+ * acosh
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(ushort3);
+extern float __attribute__((const, overloadable))acosh(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to float4
+ * acosh
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(ushort4);
+extern float2 __attribute__((const, overloadable))acosh(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to float2
+ * acosh
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(int2);
+extern float3 __attribute__((const, overloadable))acosh(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to float3
+ * acosh
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(int3);
+extern float4 __attribute__((const, overloadable))acosh(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to float4
+ * acospi
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(int4);
+extern float __attribute__((const, overloadable))acospi(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to float2
+ * acospi
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(uint2);
+extern float2 __attribute__((const, overloadable))acospi(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to float3
+ * acospi
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(uint3);
+extern float3 __attribute__((const, overloadable))acospi(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to float4
+ * acospi
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(uint4);
+extern float4 __attribute__((const, overloadable))acospi(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to float2
+ * asin
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(long2);
+extern float __attribute__((const, overloadable))asin(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to float3
+ * asin
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(long3);
+extern float2 __attribute__((const, overloadable))asin(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to float4
+ * asin
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(long4);
+extern float3 __attribute__((const, overloadable))asin(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to float2
+ * asin
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))convert_float2(ulong2);
+extern float4 __attribute__((const, overloadable))asin(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to float3
+ * asinh
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))convert_float3(ulong3);
+extern float __attribute__((const, overloadable))asinh(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to float4
+ * asinh
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))convert_float4(ulong4);
+extern float2 __attribute__((const, overloadable))asinh(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to double2
+ * asinh
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(float2);
+extern float3 __attribute__((const, overloadable))asinh(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to double3
+ * asinh
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(float3);
+extern float4 __attribute__((const, overloadable))asinh(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to double4
+ * Return the inverse sine divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(float4);
+extern float __attribute__((const, overloadable))asinpi(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to double2
+ * Return the inverse sine divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(double2);
+extern float2 __attribute__((const, overloadable))asinpi(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to double3
+ * Return the inverse sine divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(double3);
+extern float3 __attribute__((const, overloadable))asinpi(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to double4
+ * Return the inverse sine divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(double4);
+extern float4 __attribute__((const, overloadable))asinpi(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to double2
+ * Return the inverse tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(char2);
+extern float __attribute__((const, overloadable))atan(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to double3
+ * Return the inverse tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(char3);
+extern float2 __attribute__((const, overloadable))atan(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to double4
+ * Return the inverse tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(char4);
+extern float3 __attribute__((const, overloadable))atan(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to double2
+ * Return the inverse tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(uchar2);
+extern float4 __attribute__((const, overloadable))atan(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to double3
+ * Return the inverse tangent of y / x.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(uchar3);
+extern float __attribute__((const, overloadable))atan2(float y, float x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to double4
+ * Return the inverse tangent of y / x.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(uchar4);
+extern float2 __attribute__((const, overloadable))atan2(float2 y, float2 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to double2
+ * Return the inverse tangent of y / x.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(short2);
+extern float3 __attribute__((const, overloadable))atan2(float3 y, float3 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to double3
+ * Return the inverse tangent of y / x.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(short3);
+extern float4 __attribute__((const, overloadable))atan2(float4 y, float4 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to double4
+ * Return the inverse tangent of y / x, divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(short4);
+extern float __attribute__((const, overloadable))atan2pi(float y, float x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to double2
+ * Return the inverse tangent of y / x, divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(ushort2);
+extern float2 __attribute__((const, overloadable))atan2pi(float2 y, float2 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to double3
+ * Return the inverse tangent of y / x, divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(ushort3);
+extern float3 __attribute__((const, overloadable))atan2pi(float3 y, float3 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to double4
+ * Return the inverse tangent of y / x, divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(ushort4);
+extern float4 __attribute__((const, overloadable))atan2pi(float4 y, float4 x);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to double2
+ * Return the inverse hyperbolic tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(int2);
+extern float __attribute__((const, overloadable))atanh(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to double3
+ * Return the inverse hyperbolic tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(int3);
+extern float2 __attribute__((const, overloadable))atanh(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to double4
+ * Return the inverse hyperbolic tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(int4);
+extern float3 __attribute__((const, overloadable))atanh(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to double2
+ * Return the inverse hyperbolic tangent.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(uint2);
+extern float4 __attribute__((const, overloadable))atanh(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to double3
+ * Return the inverse tangent divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(uint3);
+extern float __attribute__((const, overloadable))atanpi(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to double4
+ * Return the inverse tangent divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(uint4);
+extern float2 __attribute__((const, overloadable))atanpi(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to double2
+ * Return the inverse tangent divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(long2);
+extern float3 __attribute__((const, overloadable))atanpi(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to double3
+ * Return the inverse tangent divided by PI.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(long3);
+extern float4 __attribute__((const, overloadable))atanpi(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to double4
+ * Return the cube root.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(long4);
+extern float __attribute__((const, overloadable))cbrt(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to double2
+ * Return the cube root.
*
* Supported by API versions 9 and newer.
*/
-extern double2 __attribute__((const, overloadable))convert_double2(ulong2);
+extern float2 __attribute__((const, overloadable))cbrt(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to double3
+ * Return the cube root.
*
* Supported by API versions 9 and newer.
*/
-extern double3 __attribute__((const, overloadable))convert_double3(ulong3);
+extern float3 __attribute__((const, overloadable))cbrt(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to double4
+ * Return the cube root.
*
* Supported by API versions 9 and newer.
*/
-extern double4 __attribute__((const, overloadable))convert_double4(ulong4);
+extern float4 __attribute__((const, overloadable))cbrt(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to char2
+ * Return the smallest integer not less than a value.
*
* Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(float2);
+extern float __attribute__((const, overloadable))ceil(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to char3
+ * Return the smallest integer not less than a value.
*
* Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(float3);
+extern float2 __attribute__((const, overloadable))ceil(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to char4
+ * Return the smallest integer not less than a value.
*
* Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(float4);
+extern float3 __attribute__((const, overloadable))ceil(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to char2
+ * Return the smallest integer not less than a value.
*
* Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(double2);
+extern float4 __attribute__((const, overloadable))ceil(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to char3
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(double3);
+extern float __attribute__((const, overloadable))clamp(float value, float min_value, float max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to char4
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(double4);
+extern float2 __attribute__((const, overloadable))clamp(float2 value, float2 min_value, float2 max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to char2
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(char2);
+extern float3 __attribute__((const, overloadable))clamp(float3 value, float3 min_value, float3 max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to char3
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(char3);
+extern float4 __attribute__((const, overloadable))clamp(float4 value, float4 min_value, float4 max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to char4
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(char4);
+extern float2 __attribute__((const, overloadable))clamp(float2 value, float min_value, float max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to char2
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(uchar2);
+extern float3 __attribute__((const, overloadable))clamp(float3 value, float min_value, float max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to char3
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
*
* Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(uchar3);
+extern float4 __attribute__((const, overloadable))clamp(float4 value, float min_value, float max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uchar4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(uchar4);
+extern char __attribute__((const, overloadable))clamp(char value, char min_value, char max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(short2);
+extern char2 __attribute__((const, overloadable))clamp(char2 value, char2 min_value, char2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(short3);
+extern char3 __attribute__((const, overloadable))clamp(char3 value, char3 min_value, char3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(short4);
+extern char4 __attribute__((const, overloadable))clamp(char4 value, char4 min_value, char4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(ushort2);
+extern uchar __attribute__((const, overloadable))clamp(uchar value, uchar min_value, uchar max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(ushort3);
+extern uchar2 __attribute__((const, overloadable))clamp(uchar2 value, uchar2 min_value, uchar2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(ushort4);
+extern uchar3 __attribute__((const, overloadable))clamp(uchar3 value, uchar3 min_value, uchar3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(int2);
+extern uchar4 __attribute__((const, overloadable))clamp(uchar4 value, uchar4 min_value, uchar4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(int3);
+extern short __attribute__((const, overloadable))clamp(short value, short min_value, short max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(int4);
+extern short2 __attribute__((const, overloadable))clamp(short2 value, short2 min_value, short2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(uint2);
+extern short3 __attribute__((const, overloadable))clamp(short3 value, short3 min_value, short3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(uint3);
+extern short4 __attribute__((const, overloadable))clamp(short4 value, short4 min_value, short4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(uint4);
+extern ushort __attribute__((const, overloadable))clamp(ushort value, ushort min_value, ushort max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(long2);
+extern ushort2 __attribute__((const, overloadable))clamp(ushort2 value, ushort2 min_value, ushort2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(long3);
+extern ushort3 __attribute__((const, overloadable))clamp(ushort3 value, ushort3 min_value, ushort3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(long4);
+extern ushort4 __attribute__((const, overloadable))clamp(ushort4 value, ushort4 min_value, ushort4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong2 to char2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char2 __attribute__((const, overloadable))convert_char2(ulong2);
+extern int __attribute__((const, overloadable))clamp(int value, int min_value, int max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong3 to char3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char3 __attribute__((const, overloadable))convert_char3(ulong3);
+extern int2 __attribute__((const, overloadable))clamp(int2 value, int2 min_value, int2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong4 to char4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern char4 __attribute__((const, overloadable))convert_char4(ulong4);
+extern int3 __attribute__((const, overloadable))clamp(int3 value, int3 min_value, int3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(float2);
+extern int4 __attribute__((const, overloadable))clamp(int4 value, int4 min_value, int4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(float3);
+extern uint __attribute__((const, overloadable))clamp(uint value, uint min_value, uint max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(float4);
+extern uint2 __attribute__((const, overloadable))clamp(uint2 value, uint2 min_value, uint2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(double2);
+extern uint3 __attribute__((const, overloadable))clamp(uint3 value, uint3 min_value, uint3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(double3);
+extern uint4 __attribute__((const, overloadable))clamp(uint4 value, uint4 min_value, uint4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(double4);
+extern long __attribute__((const, overloadable))clamp(long value, long min_value, long max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from char2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(char2);
+extern long2 __attribute__((const, overloadable))clamp(long2 value, long2 min_value, long2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from char3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(char3);
+extern long3 __attribute__((const, overloadable))clamp(long3 value, long3 min_value, long3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from char4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(char4);
+extern long4 __attribute__((const, overloadable))clamp(long4 value, long4 min_value, long4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uchar2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(uchar2);
+extern ulong __attribute__((const, overloadable))clamp(ulong value, ulong min_value, ulong max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uchar3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(uchar3);
+extern ulong2 __attribute__((const, overloadable))clamp(ulong2 value, ulong2 min_value, ulong2 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uchar4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(uchar4);
+extern ulong3 __attribute__((const, overloadable))clamp(ulong3 value, ulong3 min_value, ulong3 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(short2);
+extern ulong4 __attribute__((const, overloadable))clamp(ulong4 value, ulong4 min_value, ulong4 max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(short3);
+extern char2 __attribute__((const, overloadable))clamp(char2 value, char min_value, char max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from short4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(short4);
+extern char3 __attribute__((const, overloadable))clamp(char3 value, char min_value, char max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(ushort2);
+extern char4 __attribute__((const, overloadable))clamp(char4 value, char min_value, char max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(ushort3);
+extern uchar2 __attribute__((const, overloadable))clamp(uchar2 value, uchar min_value, uchar max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ushort4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(ushort4);
+extern uchar3 __attribute__((const, overloadable))clamp(uchar3 value, uchar min_value, uchar max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(int2);
+extern uchar4 __attribute__((const, overloadable))clamp(uchar4 value, uchar min_value, uchar max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(int3);
+extern short2 __attribute__((const, overloadable))clamp(short2 value, short min_value, short max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from int4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(int4);
+extern short3 __attribute__((const, overloadable))clamp(short3 value, short min_value, short max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(uint2);
+extern short4 __attribute__((const, overloadable))clamp(short4 value, short min_value, short max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(uint3);
+extern ushort2 __attribute__((const, overloadable))clamp(ushort2 value, ushort min_value, ushort max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from uint4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(uint4);
+extern ushort3 __attribute__((const, overloadable))clamp(ushort3 value, ushort min_value, ushort max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(long2);
+extern ushort4 __attribute__((const, overloadable))clamp(ushort4 value, ushort min_value, ushort max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(long3);
+extern int2 __attribute__((const, overloadable))clamp(int2 value, int min_value, int max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from long4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(long4);
+extern int3 __attribute__((const, overloadable))clamp(int3 value, int min_value, int max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong2 to uchar2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))convert_uchar2(ulong2);
+extern int4 __attribute__((const, overloadable))clamp(int4 value, int min_value, int max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong3 to uchar3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))convert_uchar3(ulong3);
+extern uint2 __attribute__((const, overloadable))clamp(uint2 value, uint min_value, uint max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from ulong4 to uchar4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))convert_uchar4(ulong4);
+extern uint3 __attribute__((const, overloadable))clamp(uint3 value, uint min_value, uint max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float2 to short2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(float2);
+extern uint4 __attribute__((const, overloadable))clamp(uint4 value, uint min_value, uint max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float3 to short3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(float3);
+extern long2 __attribute__((const, overloadable))clamp(long2 value, long min_value, long max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from float4 to short4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(float4);
+extern long3 __attribute__((const, overloadable))clamp(long3 value, long min_value, long max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double2 to short2
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(double2);
+extern long4 __attribute__((const, overloadable))clamp(long4 value, long min_value, long max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double3 to short3
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(double3);
+extern ulong2 __attribute__((const, overloadable))clamp(ulong2 value, ulong min_value, ulong max_value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
/*
- * Component wise conversion from double4 to short4
+ * Clamp a value to a specified high and low bound.
*
- * Supported by API versions 9 and newer.
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(double4);
+extern ulong3 __attribute__((const, overloadable))clamp(ulong3 value, ulong min_value, ulong max_value);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+/*
+ * Clamp a value to a specified high and low bound.
+ *
+ * @param amount value to be clamped. Supports 1,2,3,4 components
+ * @param min_value Lower bound, must be scalar or matching vector.
+ * @param max_value High bound, must match type of low
+ *
+ * Supported by API versions 19 and newer.
+ */
+extern ulong4 __attribute__((const, overloadable))clamp(ulong4 value, ulong min_value, ulong max_value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(char2);
+extern char __attribute__((const, overloadable))clz(char value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(char3);
+extern char2 __attribute__((const, overloadable))clz(char2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(char4);
+extern char3 __attribute__((const, overloadable))clz(char3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(uchar2);
+extern char4 __attribute__((const, overloadable))clz(char4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(uchar3);
+extern uchar __attribute__((const, overloadable))clz(uchar value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(uchar4);
+extern uchar2 __attribute__((const, overloadable))clz(uchar2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(short2);
+extern uchar3 __attribute__((const, overloadable))clz(uchar3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(short3);
+extern uchar4 __attribute__((const, overloadable))clz(uchar4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(short4);
+extern short __attribute__((const, overloadable))clz(short value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(ushort2);
+extern short2 __attribute__((const, overloadable))clz(short2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(ushort3);
+extern short3 __attribute__((const, overloadable))clz(short3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(ushort4);
+extern short4 __attribute__((const, overloadable))clz(short4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(int2);
+extern ushort __attribute__((const, overloadable))clz(ushort value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(int3);
+extern ushort2 __attribute__((const, overloadable))clz(ushort2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(int4);
+extern ushort3 __attribute__((const, overloadable))clz(ushort3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(uint2);
+extern ushort4 __attribute__((const, overloadable))clz(ushort4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(uint3);
+extern int __attribute__((const, overloadable))clz(int value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(uint4);
+extern int2 __attribute__((const, overloadable))clz(int2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(long2);
+extern int3 __attribute__((const, overloadable))clz(int3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(long3);
+extern int4 __attribute__((const, overloadable))clz(int4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(long4);
+extern uint __attribute__((const, overloadable))clz(uint value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to short2
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))convert_short2(ulong2);
+extern uint2 __attribute__((const, overloadable))clz(uint2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to short3
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))convert_short3(ulong3);
+extern uint3 __attribute__((const, overloadable))clz(uint3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to short4
+ * Return the number of leading 0-bits in a value.
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))convert_short4(ulong4);
+extern uint4 __attribute__((const, overloadable))clz(uint4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to ushort2
+ * Component wise conversion from float2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(float2);
+extern float2 __attribute__((const, overloadable))convert_float2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to ushort3
+ * Component wise conversion from float3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(float3);
+extern float3 __attribute__((const, overloadable))convert_float3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to ushort4
+ * Component wise conversion from float4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(float4);
+extern float4 __attribute__((const, overloadable))convert_float4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to ushort2
+ * Component wise conversion from double2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(double2);
+extern float2 __attribute__((const, overloadable))convert_float2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to ushort3
+ * Component wise conversion from double3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(double3);
+extern float3 __attribute__((const, overloadable))convert_float3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to ushort4
+ * Component wise conversion from double4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(double4);
+extern float4 __attribute__((const, overloadable))convert_float4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to ushort2
+ * Component wise conversion from char2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(char2);
+extern float2 __attribute__((const, overloadable))convert_float2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to ushort3
+ * Component wise conversion from char3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(char3);
+extern float3 __attribute__((const, overloadable))convert_float3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to ushort4
+ * Component wise conversion from char4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(char4);
+extern float4 __attribute__((const, overloadable))convert_float4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to ushort2
+ * Component wise conversion from uchar2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(uchar2);
+extern float2 __attribute__((const, overloadable))convert_float2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to ushort3
+ * Component wise conversion from uchar3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(uchar3);
+extern float3 __attribute__((const, overloadable))convert_float3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to ushort4
+ * Component wise conversion from uchar4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(uchar4);
+extern float4 __attribute__((const, overloadable))convert_float4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to ushort2
+ * Component wise conversion from short2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(short2);
+extern float2 __attribute__((const, overloadable))convert_float2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to ushort3
+ * Component wise conversion from short3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(short3);
+extern float3 __attribute__((const, overloadable))convert_float3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to ushort4
+ * Component wise conversion from short4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(short4);
+extern float4 __attribute__((const, overloadable))convert_float4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to ushort2
+ * Component wise conversion from ushort2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(ushort2);
+extern float2 __attribute__((const, overloadable))convert_float2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to ushort3
+ * Component wise conversion from ushort3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(ushort3);
+extern float3 __attribute__((const, overloadable))convert_float3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to ushort4
+ * Component wise conversion from ushort4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(ushort4);
+extern float4 __attribute__((const, overloadable))convert_float4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to ushort2
+ * Component wise conversion from int2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(int2);
+extern float2 __attribute__((const, overloadable))convert_float2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to ushort3
+ * Component wise conversion from int3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(int3);
+extern float3 __attribute__((const, overloadable))convert_float3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to ushort4
+ * Component wise conversion from int4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(int4);
+extern float4 __attribute__((const, overloadable))convert_float4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to ushort2
+ * Component wise conversion from uint2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(uint2);
+extern float2 __attribute__((const, overloadable))convert_float2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to ushort3
+ * Component wise conversion from uint3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(uint3);
+extern float3 __attribute__((const, overloadable))convert_float3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to ushort4
+ * Component wise conversion from uint4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(uint4);
+extern float4 __attribute__((const, overloadable))convert_float4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to ushort2
+ * Component wise conversion from long2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(long2);
+extern float2 __attribute__((const, overloadable))convert_float2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to ushort3
+ * Component wise conversion from long3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(long3);
+extern float3 __attribute__((const, overloadable))convert_float3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to ushort4
+ * Component wise conversion from long4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(long4);
+extern float4 __attribute__((const, overloadable))convert_float4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to ushort2
+ * Component wise conversion from ulong2 to float2
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))convert_ushort2(ulong2);
+extern float2 __attribute__((const, overloadable))convert_float2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to ushort3
+ * Component wise conversion from ulong3 to float3
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))convert_ushort3(ulong3);
+extern float3 __attribute__((const, overloadable))convert_float3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to ushort4
+ * Component wise conversion from ulong4 to float4
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))convert_ushort4(ulong4);
+extern float4 __attribute__((const, overloadable))convert_float4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to int2
+ * Component wise conversion from float2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(float2);
+extern double2 __attribute__((const, overloadable))convert_double2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to int3
+ * Component wise conversion from float3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(float3);
+extern double3 __attribute__((const, overloadable))convert_double3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to int4
+ * Component wise conversion from float4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(float4);
+extern double4 __attribute__((const, overloadable))convert_double4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to int2
+ * Component wise conversion from double2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(double2);
+extern double2 __attribute__((const, overloadable))convert_double2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to int3
+ * Component wise conversion from double3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(double3);
+extern double3 __attribute__((const, overloadable))convert_double3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to int4
+ * Component wise conversion from double4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(double4);
+extern double4 __attribute__((const, overloadable))convert_double4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to int2
+ * Component wise conversion from char2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(char2);
+extern double2 __attribute__((const, overloadable))convert_double2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to int3
+ * Component wise conversion from char3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(char3);
+extern double3 __attribute__((const, overloadable))convert_double3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to int4
+ * Component wise conversion from char4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(char4);
+extern double4 __attribute__((const, overloadable))convert_double4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to int2
+ * Component wise conversion from uchar2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(uchar2);
+extern double2 __attribute__((const, overloadable))convert_double2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to int3
+ * Component wise conversion from uchar3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(uchar3);
+extern double3 __attribute__((const, overloadable))convert_double3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to int4
+ * Component wise conversion from uchar4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(uchar4);
+extern double4 __attribute__((const, overloadable))convert_double4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to int2
+ * Component wise conversion from short2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(short2);
+extern double2 __attribute__((const, overloadable))convert_double2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to int3
+ * Component wise conversion from short3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(short3);
+extern double3 __attribute__((const, overloadable))convert_double3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to int4
+ * Component wise conversion from short4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(short4);
+extern double4 __attribute__((const, overloadable))convert_double4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to int2
+ * Component wise conversion from ushort2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(ushort2);
+extern double2 __attribute__((const, overloadable))convert_double2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to int3
+ * Component wise conversion from ushort3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(ushort3);
+extern double3 __attribute__((const, overloadable))convert_double3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to int4
+ * Component wise conversion from ushort4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(ushort4);
+extern double4 __attribute__((const, overloadable))convert_double4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to int2
+ * Component wise conversion from int2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(int2);
+extern double2 __attribute__((const, overloadable))convert_double2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to int3
+ * Component wise conversion from int3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(int3);
+extern double3 __attribute__((const, overloadable))convert_double3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to int4
+ * Component wise conversion from int4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(int4);
+extern double4 __attribute__((const, overloadable))convert_double4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to int2
+ * Component wise conversion from uint2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(uint2);
+extern double2 __attribute__((const, overloadable))convert_double2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to int3
+ * Component wise conversion from uint3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(uint3);
+extern double3 __attribute__((const, overloadable))convert_double3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to int4
+ * Component wise conversion from uint4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(uint4);
+extern double4 __attribute__((const, overloadable))convert_double4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to int2
+ * Component wise conversion from long2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(long2);
+extern double2 __attribute__((const, overloadable))convert_double2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to int3
+ * Component wise conversion from long3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(long3);
+extern double3 __attribute__((const, overloadable))convert_double3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to int4
+ * Component wise conversion from long4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(long4);
+extern double4 __attribute__((const, overloadable))convert_double4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to int2
+ * Component wise conversion from ulong2 to double2
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))convert_int2(ulong2);
+extern double2 __attribute__((const, overloadable))convert_double2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to int3
+ * Component wise conversion from ulong3 to double3
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))convert_int3(ulong3);
+extern double3 __attribute__((const, overloadable))convert_double3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to int4
+ * Component wise conversion from ulong4 to double4
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))convert_int4(ulong4);
+extern double4 __attribute__((const, overloadable))convert_double4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to uint2
+ * Component wise conversion from float2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(float2);
+extern char2 __attribute__((const, overloadable))convert_char2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to uint3
+ * Component wise conversion from float3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(float3);
+extern char3 __attribute__((const, overloadable))convert_char3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to uint4
+ * Component wise conversion from float4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(float4);
+extern char4 __attribute__((const, overloadable))convert_char4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to uint2
+ * Component wise conversion from double2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(double2);
+extern char2 __attribute__((const, overloadable))convert_char2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to uint3
+ * Component wise conversion from double3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(double3);
+extern char3 __attribute__((const, overloadable))convert_char3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to uint4
+ * Component wise conversion from double4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(double4);
+extern char4 __attribute__((const, overloadable))convert_char4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to uint2
+ * Component wise conversion from char2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(char2);
+extern char2 __attribute__((const, overloadable))convert_char2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to uint3
+ * Component wise conversion from char3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(char3);
+extern char3 __attribute__((const, overloadable))convert_char3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to uint4
+ * Component wise conversion from char4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(char4);
+extern char4 __attribute__((const, overloadable))convert_char4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to uint2
+ * Component wise conversion from uchar2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(uchar2);
+extern char2 __attribute__((const, overloadable))convert_char2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to uint3
+ * Component wise conversion from uchar3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(uchar3);
+extern char3 __attribute__((const, overloadable))convert_char3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to uint4
+ * Component wise conversion from uchar4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(uchar4);
+extern char4 __attribute__((const, overloadable))convert_char4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to uint2
+ * Component wise conversion from short2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(short2);
+extern char2 __attribute__((const, overloadable))convert_char2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to uint3
+ * Component wise conversion from short3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(short3);
+extern char3 __attribute__((const, overloadable))convert_char3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to uint4
+ * Component wise conversion from short4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(short4);
+extern char4 __attribute__((const, overloadable))convert_char4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to uint2
+ * Component wise conversion from ushort2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(ushort2);
+extern char2 __attribute__((const, overloadable))convert_char2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to uint3
+ * Component wise conversion from ushort3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(ushort3);
+extern char3 __attribute__((const, overloadable))convert_char3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to uint4
+ * Component wise conversion from ushort4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(ushort4);
+extern char4 __attribute__((const, overloadable))convert_char4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to uint2
+ * Component wise conversion from int2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(int2);
+extern char2 __attribute__((const, overloadable))convert_char2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to uint3
+ * Component wise conversion from int3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(int3);
+extern char3 __attribute__((const, overloadable))convert_char3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to uint4
+ * Component wise conversion from int4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(int4);
+extern char4 __attribute__((const, overloadable))convert_char4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to uint2
+ * Component wise conversion from uint2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(uint2);
+extern char2 __attribute__((const, overloadable))convert_char2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to uint3
+ * Component wise conversion from uint3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(uint3);
+extern char3 __attribute__((const, overloadable))convert_char3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to uint4
+ * Component wise conversion from uint4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(uint4);
+extern char4 __attribute__((const, overloadable))convert_char4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to uint2
+ * Component wise conversion from long2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(long2);
+extern char2 __attribute__((const, overloadable))convert_char2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to uint3
+ * Component wise conversion from long3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(long3);
+extern char3 __attribute__((const, overloadable))convert_char3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to uint4
+ * Component wise conversion from long4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(long4);
+extern char4 __attribute__((const, overloadable))convert_char4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to uint2
+ * Component wise conversion from ulong2 to char2
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))convert_uint2(ulong2);
+extern char2 __attribute__((const, overloadable))convert_char2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to uint3
+ * Component wise conversion from ulong3 to char3
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))convert_uint3(ulong3);
+extern char3 __attribute__((const, overloadable))convert_char3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to uint4
+ * Component wise conversion from ulong4 to char4
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))convert_uint4(ulong4);
+extern char4 __attribute__((const, overloadable))convert_char4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to long2
+ * Component wise conversion from float2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(float2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to long3
+ * Component wise conversion from float3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(float3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to long4
+ * Component wise conversion from float4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(float4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to long2
+ * Component wise conversion from double2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(double2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to long3
+ * Component wise conversion from double3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(double3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to long4
+ * Component wise conversion from double4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(double4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to long2
+ * Component wise conversion from char2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(char2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to long3
+ * Component wise conversion from char3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(char3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to long4
+ * Component wise conversion from char4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(char4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to long2
+ * Component wise conversion from uchar2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(uchar2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to long3
+ * Component wise conversion from uchar3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(uchar3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to long4
+ * Component wise conversion from uchar4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(uchar4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to long2
+ * Component wise conversion from short2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(short2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to long3
+ * Component wise conversion from short3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(short3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to long4
+ * Component wise conversion from short4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(short4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to long2
+ * Component wise conversion from ushort2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(ushort2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to long3
+ * Component wise conversion from ushort3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(ushort3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to long4
+ * Component wise conversion from ushort4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(ushort4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to long2
+ * Component wise conversion from int2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(int2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to long3
+ * Component wise conversion from int3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(int3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to long4
+ * Component wise conversion from int4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(int4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to long2
+ * Component wise conversion from uint2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(uint2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to long3
+ * Component wise conversion from uint3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(uint3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to long4
+ * Component wise conversion from uint4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(uint4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to long2
+ * Component wise conversion from long2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(long2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to long3
+ * Component wise conversion from long3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(long3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to long4
+ * Component wise conversion from long4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(long4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to long2
+ * Component wise conversion from ulong2 to uchar2
*
* Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))convert_long2(ulong2);
+extern uchar2 __attribute__((const, overloadable))convert_uchar2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to long3
+ * Component wise conversion from ulong3 to uchar3
*
* Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))convert_long3(ulong3);
+extern uchar3 __attribute__((const, overloadable))convert_uchar3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to long4
+ * Component wise conversion from ulong4 to uchar4
*
* Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))convert_long4(ulong4);
+extern uchar4 __attribute__((const, overloadable))convert_uchar4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float2 to ulong2
+ * Component wise conversion from float2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(float2);
+extern short2 __attribute__((const, overloadable))convert_short2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float3 to ulong3
+ * Component wise conversion from float3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(float3);
+extern short3 __attribute__((const, overloadable))convert_short3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from float4 to ulong4
+ * Component wise conversion from float4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(float4);
+extern short4 __attribute__((const, overloadable))convert_short4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double2 to ulong2
+ * Component wise conversion from double2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(double2);
+extern short2 __attribute__((const, overloadable))convert_short2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double3 to ulong3
+ * Component wise conversion from double3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(double3);
+extern short3 __attribute__((const, overloadable))convert_short3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from double4 to ulong4
+ * Component wise conversion from double4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(double4);
+extern short4 __attribute__((const, overloadable))convert_short4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char2 to ulong2
+ * Component wise conversion from char2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(char2);
+extern short2 __attribute__((const, overloadable))convert_short2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char3 to ulong3
+ * Component wise conversion from char3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(char3);
+extern short3 __attribute__((const, overloadable))convert_short3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from char4 to ulong4
+ * Component wise conversion from char4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(char4);
+extern short4 __attribute__((const, overloadable))convert_short4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar2 to ulong2
+ * Component wise conversion from uchar2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(uchar2);
+extern short2 __attribute__((const, overloadable))convert_short2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar3 to ulong3
+ * Component wise conversion from uchar3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(uchar3);
+extern short3 __attribute__((const, overloadable))convert_short3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uchar4 to ulong4
+ * Component wise conversion from uchar4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(uchar4);
+extern short4 __attribute__((const, overloadable))convert_short4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short2 to ulong2
+ * Component wise conversion from short2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(short2);
+extern short2 __attribute__((const, overloadable))convert_short2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short3 to ulong3
+ * Component wise conversion from short3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(short3);
+extern short3 __attribute__((const, overloadable))convert_short3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from short4 to ulong4
+ * Component wise conversion from short4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(short4);
+extern short4 __attribute__((const, overloadable))convert_short4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort2 to ulong2
+ * Component wise conversion from ushort2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(ushort2);
+extern short2 __attribute__((const, overloadable))convert_short2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort3 to ulong3
+ * Component wise conversion from ushort3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(ushort3);
+extern short3 __attribute__((const, overloadable))convert_short3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ushort4 to ulong4
+ * Component wise conversion from ushort4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(ushort4);
+extern short4 __attribute__((const, overloadable))convert_short4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int2 to ulong2
+ * Component wise conversion from int2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(int2);
+extern short2 __attribute__((const, overloadable))convert_short2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int3 to ulong3
+ * Component wise conversion from int3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(int3);
+extern short3 __attribute__((const, overloadable))convert_short3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from int4 to ulong4
+ * Component wise conversion from int4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(int4);
+extern short4 __attribute__((const, overloadable))convert_short4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint2 to ulong2
+ * Component wise conversion from uint2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(uint2);
+extern short2 __attribute__((const, overloadable))convert_short2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint3 to ulong3
+ * Component wise conversion from uint3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(uint3);
+extern short3 __attribute__((const, overloadable))convert_short3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from uint4 to ulong4
+ * Component wise conversion from uint4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(uint4);
+extern short4 __attribute__((const, overloadable))convert_short4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long2 to ulong2
+ * Component wise conversion from long2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(long2);
+extern short2 __attribute__((const, overloadable))convert_short2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long3 to ulong3
+ * Component wise conversion from long3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(long3);
+extern short3 __attribute__((const, overloadable))convert_short3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from long4 to ulong4
+ * Component wise conversion from long4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(long4);
+extern short4 __attribute__((const, overloadable))convert_short4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong2 to ulong2
+ * Component wise conversion from ulong2 to short2
*
* Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))convert_ulong2(ulong2);
+extern short2 __attribute__((const, overloadable))convert_short2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong3 to ulong3
+ * Component wise conversion from ulong3 to short3
*
* Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))convert_ulong3(ulong3);
+extern short3 __attribute__((const, overloadable))convert_short3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Component wise conversion from ulong4 to ulong4
+ * Component wise conversion from ulong4 to short4
*
* Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))convert_ulong4(ulong4);
+extern short4 __attribute__((const, overloadable))convert_short4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acos
+ * Component wise conversion from float2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))acos(float);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acos
+ * Component wise conversion from float3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))acos(float2);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acos
+ * Component wise conversion from float4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))acos(float3);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acos
+ * Component wise conversion from double2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))acos(float4);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acosh
+ * Component wise conversion from double3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))acosh(float);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acosh
+ * Component wise conversion from double4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))acosh(float2);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acosh
+ * Component wise conversion from char2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))acosh(float3);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acosh
+ * Component wise conversion from char3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))acosh(float4);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acospi
+ * Component wise conversion from char4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))acospi(float);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acospi
+ * Component wise conversion from uchar2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))acospi(float2);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acospi
+ * Component wise conversion from uchar3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))acospi(float3);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * acospi
+ * Component wise conversion from uchar4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))acospi(float4);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asin
+ * Component wise conversion from short2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))asin(float);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asin
+ * Component wise conversion from short3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))asin(float2);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asin
+ * Component wise conversion from short4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))asin(float3);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asin
+ * Component wise conversion from ushort2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))asin(float4);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asinh
+ * Component wise conversion from ushort3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))asinh(float);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asinh
+ * Component wise conversion from ushort4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))asinh(float2);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asinh
+ * Component wise conversion from int2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))asinh(float3);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * asinh
+ * Component wise conversion from int3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))asinh(float4);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse sine divided by PI.
+ * Component wise conversion from int4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))asinpi(float);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse sine divided by PI.
+ * Component wise conversion from uint2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))asinpi(float2);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse sine divided by PI.
+ * Component wise conversion from uint3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))asinpi(float3);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse sine divided by PI.
+ * Component wise conversion from uint4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))asinpi(float4);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent.
+ * Component wise conversion from long2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atan(float);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent.
+ * Component wise conversion from long3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atan(float2);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent.
+ * Component wise conversion from long4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atan(float3);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent.
+ * Component wise conversion from ulong2 to ushort2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atan(float4);
+extern ushort2 __attribute__((const, overloadable))convert_ushort2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x.
+ * Component wise conversion from ulong3 to ushort3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atan2(float y, float x);
+extern ushort3 __attribute__((const, overloadable))convert_ushort3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x.
+ * Component wise conversion from ulong4 to ushort4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atan2(float2 y, float2 x);
+extern ushort4 __attribute__((const, overloadable))convert_ushort4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x.
+ * Component wise conversion from float2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atan2(float3 y, float3 x);
+extern int2 __attribute__((const, overloadable))convert_int2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x.
+ * Component wise conversion from float3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atan2(float4 y, float4 x);
+extern int3 __attribute__((const, overloadable))convert_int3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse hyperbolic tangent.
+ * Component wise conversion from float4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atanh(float);
+extern int4 __attribute__((const, overloadable))convert_int4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse hyperbolic tangent.
+ * Component wise conversion from double2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atanh(float2);
+extern int2 __attribute__((const, overloadable))convert_int2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse hyperbolic tangent.
+ * Component wise conversion from double3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atanh(float3);
+extern int3 __attribute__((const, overloadable))convert_int3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse hyperbolic tangent.
+ * Component wise conversion from double4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atanh(float4);
+extern int4 __attribute__((const, overloadable))convert_int4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent divided by PI.
+ * Component wise conversion from char2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atanpi(float);
+extern int2 __attribute__((const, overloadable))convert_int2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent divided by PI.
+ * Component wise conversion from char3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atanpi(float2);
+extern int3 __attribute__((const, overloadable))convert_int3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent divided by PI.
+ * Component wise conversion from char4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atanpi(float3);
+extern int4 __attribute__((const, overloadable))convert_int4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent divided by PI.
+ * Component wise conversion from uchar2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atanpi(float4);
+extern int2 __attribute__((const, overloadable))convert_int2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x, divided by PI.
+ * Component wise conversion from uchar3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atan2pi(float y, float x);
+extern int3 __attribute__((const, overloadable))convert_int3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x, divided by PI.
+ * Component wise conversion from uchar4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atan2pi(float2 y, float2 x);
+extern int4 __attribute__((const, overloadable))convert_int4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x, divided by PI.
+ * Component wise conversion from short2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atan2pi(float3 y, float3 x);
+extern int2 __attribute__((const, overloadable))convert_int2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the inverse tangent of y / x, divided by PI.
+ * Component wise conversion from short3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atan2pi(float4 y, float4 x);
+extern int3 __attribute__((const, overloadable))convert_int3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cube root.
+ * Component wise conversion from short4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))cbrt(float);
+extern int4 __attribute__((const, overloadable))convert_int4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cube root.
+ * Component wise conversion from ushort2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))cbrt(float2);
+extern int2 __attribute__((const, overloadable))convert_int2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cube root.
+ * Component wise conversion from ushort3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))cbrt(float3);
+extern int3 __attribute__((const, overloadable))convert_int3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cube root.
+ * Component wise conversion from ushort4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))cbrt(float4);
+extern int4 __attribute__((const, overloadable))convert_int4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not less than a value.
+ * Component wise conversion from int2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))ceil(float);
+extern int2 __attribute__((const, overloadable))convert_int2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not less than a value.
+ * Component wise conversion from int3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))ceil(float2);
+extern int3 __attribute__((const, overloadable))convert_int3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not less than a value.
+ * Component wise conversion from int4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))ceil(float3);
+extern int4 __attribute__((const, overloadable))convert_int4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not less than a value.
+ * Component wise conversion from uint2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))ceil(float4);
+extern int2 __attribute__((const, overloadable))convert_int2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Copy the sign bit from y to x.
+ * Component wise conversion from uint3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))copysign(float x, float y);
+extern int3 __attribute__((const, overloadable))convert_int3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Copy the sign bit from y to x.
+ * Component wise conversion from uint4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))copysign(float2 x, float2 y);
+extern int4 __attribute__((const, overloadable))convert_int4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Copy the sign bit from y to x.
+ * Component wise conversion from long2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))copysign(float3 x, float3 y);
+extern int2 __attribute__((const, overloadable))convert_int2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Copy the sign bit from y to x.
+ * Component wise conversion from long3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))copysign(float4 x, float4 y);
+extern int3 __attribute__((const, overloadable))convert_int3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine.
+ * Component wise conversion from long4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))cos(float);
+extern int4 __attribute__((const, overloadable))convert_int4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine.
+ * Component wise conversion from ulong2 to int2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))cos(float2);
+extern int2 __attribute__((const, overloadable))convert_int2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine.
+ * Component wise conversion from ulong3 to int3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))cos(float3);
+extern int3 __attribute__((const, overloadable))convert_int3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine.
+ * Component wise conversion from ulong4 to int4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))cos(float4);
+extern int4 __attribute__((const, overloadable))convert_int4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hypebolic cosine.
+ * Component wise conversion from float2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))cosh(float);
+extern uint2 __attribute__((const, overloadable))convert_uint2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hypebolic cosine.
+ * Component wise conversion from float3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))cosh(float2);
+extern uint3 __attribute__((const, overloadable))convert_uint3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hypebolic cosine.
+ * Component wise conversion from float4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))cosh(float3);
+extern uint4 __attribute__((const, overloadable))convert_uint4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hypebolic cosine.
+ * Component wise conversion from double2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))cosh(float4);
+extern uint2 __attribute__((const, overloadable))convert_uint2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine of the value * PI.
+ * Component wise conversion from double3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))cospi(float);
+extern uint3 __attribute__((const, overloadable))convert_uint3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine of the value * PI.
+ * Component wise conversion from double4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))cospi(float2);
+extern uint4 __attribute__((const, overloadable))convert_uint4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine of the value * PI.
+ * Component wise conversion from char2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))cospi(float3);
+extern uint2 __attribute__((const, overloadable))convert_uint2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the cosine of the value * PI.
+ * Component wise conversion from char3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))cospi(float4);
+extern uint3 __attribute__((const, overloadable))convert_uint3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the complementary error function.
+ * Component wise conversion from char4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))erfc(float);
+extern uint4 __attribute__((const, overloadable))convert_uint4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the complementary error function.
+ * Component wise conversion from uchar2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))erfc(float2);
+extern uint2 __attribute__((const, overloadable))convert_uint2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the complementary error function.
+ * Component wise conversion from uchar3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))erfc(float3);
+extern uint3 __attribute__((const, overloadable))convert_uint3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the complementary error function.
+ * Component wise conversion from uchar4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))erfc(float4);
+extern uint4 __attribute__((const, overloadable))convert_uint4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the error function.
+ * Component wise conversion from short2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))erf(float);
+extern uint2 __attribute__((const, overloadable))convert_uint2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the error function.
+ * Component wise conversion from short3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))erf(float2);
+extern uint3 __attribute__((const, overloadable))convert_uint3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the error function.
+ * Component wise conversion from short4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))erf(float3);
+extern uint4 __attribute__((const, overloadable))convert_uint4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the error function.
+ * Component wise conversion from ushort2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))erf(float4);
+extern uint2 __attribute__((const, overloadable))convert_uint2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return e ^ value.
+ * Component wise conversion from ushort3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))exp(float);
+extern uint3 __attribute__((const, overloadable))convert_uint3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return e ^ value.
+ * Component wise conversion from ushort4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))exp(float2);
+extern uint4 __attribute__((const, overloadable))convert_uint4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return e ^ value.
+ * Component wise conversion from int2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))exp(float3);
+extern uint2 __attribute__((const, overloadable))convert_uint2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return e ^ value.
+ * Component wise conversion from int3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))exp(float4);
+extern uint3 __attribute__((const, overloadable))convert_uint3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 2 ^ value.
+ * Component wise conversion from int4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))exp2(float);
+extern uint4 __attribute__((const, overloadable))convert_uint4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 2 ^ value.
+ * Component wise conversion from uint2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))exp2(float2);
+extern uint2 __attribute__((const, overloadable))convert_uint2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 2 ^ value.
+ * Component wise conversion from uint3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))exp2(float3);
+extern uint3 __attribute__((const, overloadable))convert_uint3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 2 ^ value.
+ * Component wise conversion from uint4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))exp2(float4);
+extern uint4 __attribute__((const, overloadable))convert_uint4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 10 ^ value.
+ * Component wise conversion from long2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))exp10(float);
+extern uint2 __attribute__((const, overloadable))convert_uint2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 10 ^ value.
+ * Component wise conversion from long3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))exp10(float2);
+extern uint3 __attribute__((const, overloadable))convert_uint3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 10 ^ value.
+ * Component wise conversion from long4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))exp10(float3);
+extern uint4 __attribute__((const, overloadable))convert_uint4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return 10 ^ value.
+ * Component wise conversion from ulong2 to uint2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))exp10(float4);
+extern uint2 __attribute__((const, overloadable))convert_uint2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (e ^ value) - 1.
+ * Component wise conversion from ulong3 to uint3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))expm1(float);
+extern uint3 __attribute__((const, overloadable))convert_uint3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (e ^ value) - 1.
+ * Component wise conversion from ulong4 to uint4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))expm1(float2);
+extern uint4 __attribute__((const, overloadable))convert_uint4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (e ^ value) - 1.
+ * Component wise conversion from float2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))expm1(float3);
+extern long2 __attribute__((const, overloadable))convert_long2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (e ^ value) - 1.
+ * Component wise conversion from float3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))expm1(float4);
+extern long3 __attribute__((const, overloadable))convert_long3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Component wise conversion from float4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fabs(float);
+extern long4 __attribute__((const, overloadable))convert_long4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Component wise conversion from double2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fabs(float2);
+extern long2 __attribute__((const, overloadable))convert_long2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Component wise conversion from double3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fabs(float3);
+extern long3 __attribute__((const, overloadable))convert_long3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Component wise conversion from double4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fabs(float4);
+extern long4 __attribute__((const, overloadable))convert_long4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the positive difference between two values.
+ * Component wise conversion from char2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fdim(float, float);
+extern long2 __attribute__((const, overloadable))convert_long2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the positive difference between two values.
+ * Component wise conversion from char3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fdim(float2, float2);
+extern long3 __attribute__((const, overloadable))convert_long3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the positive difference between two values.
+ * Component wise conversion from char4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fdim(float3, float3);
+extern long4 __attribute__((const, overloadable))convert_long4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the positive difference between two values.
+ * Component wise conversion from uchar2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fdim(float4, float4);
+extern long2 __attribute__((const, overloadable))convert_long2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not greater than a value.
+ * Component wise conversion from uchar3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))floor(float);
+extern long3 __attribute__((const, overloadable))convert_long3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not greater than a value.
+ * Component wise conversion from uchar4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))floor(float2);
+extern long4 __attribute__((const, overloadable))convert_long4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not greater than a value.
+ * Component wise conversion from short2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))floor(float3);
+extern long2 __attribute__((const, overloadable))convert_long2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the smallest integer not greater than a value.
+ * Component wise conversion from short3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))floor(float4);
+extern long3 __attribute__((const, overloadable))convert_long3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (a * b) + c.
+ * Component wise conversion from short4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fma(float a, float b, float c);
+extern long4 __attribute__((const, overloadable))convert_long4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (a * b) + c.
+ * Component wise conversion from ushort2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fma(float2 a, float2 b, float2 c);
+extern long2 __attribute__((const, overloadable))convert_long2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (a * b) + c.
+ * Component wise conversion from ushort3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fma(float3 a, float3 b, float3 c);
+extern long3 __attribute__((const, overloadable))convert_long3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (a * b) + c.
+ * Component wise conversion from ushort4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fma(float4 a, float4 b, float4 c);
+extern long4 __attribute__((const, overloadable))convert_long4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from int2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fmax(float x, float y);
+extern long2 __attribute__((const, overloadable))convert_long2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from int3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fmax(float2 x, float2 y);
+extern long3 __attribute__((const, overloadable))convert_long3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from int4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fmax(float3 x, float3 y);
+extern long4 __attribute__((const, overloadable))convert_long4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from uint2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fmax(float4 x, float4 y);
+extern long2 __attribute__((const, overloadable))convert_long2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from uint3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fmax(float2 x, float y);
+extern long3 __attribute__((const, overloadable))convert_long3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from uint4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fmax(float3 x, float y);
+extern long4 __attribute__((const, overloadable))convert_long4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x < y ? y : x)
+ * Component wise conversion from long2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fmax(float4 x, float y);
+extern long2 __attribute__((const, overloadable))convert_long2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from long3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fmin(float x, float y);
+extern long3 __attribute__((const, overloadable))convert_long3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from long4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fmin(float2 x, float2 y);
+extern long4 __attribute__((const, overloadable))convert_long4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from ulong2 to long2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fmin(float3 x, float3 y);
+extern long2 __attribute__((const, overloadable))convert_long2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from ulong3 to long3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fmin(float4 x, float4 y);
+extern long3 __attribute__((const, overloadable))convert_long3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from ulong4 to long4
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fmin(float2 x, float y);
+extern long4 __attribute__((const, overloadable))convert_long4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from float2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fmin(float3 x, float y);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x > y ? y : x)
+ * Component wise conversion from float3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fmin(float4 x, float y);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the remainder from x / y
+ * Component wise conversion from float4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fmod(float x, float y);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the remainder from x / y
+ * Component wise conversion from double2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fmod(float2 x, float2 y);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(double2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the remainder from x / y
+ * Component wise conversion from double3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fmod(float3 x, float3 y);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(double3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the remainder from x / y
+ * Component wise conversion from double4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fmod(float4 x, float4 y);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(double4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
- *
- * @param iptr iptr[0] will be set to the floor of the input value.
+ * Component wise conversion from char2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fract(float v, float *iptr);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(char2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
- *
- * @param iptr iptr[0] will be set to the floor of the input value.
+ * Component wise conversion from char3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fract(float2 v, float2 *iptr);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(char3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
- *
- * @param iptr iptr[0] will be set to the floor of the input value.
+ * Component wise conversion from char4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fract(float3 v, float3 *iptr);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(char4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
- *
- * @param iptr iptr[0] will be set to the floor of the input value.
+ * Component wise conversion from uchar2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fract(float4 v, float4 *iptr);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(uchar2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
+ * Component wise conversion from uchar3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-static float __attribute__((const, overloadable))fract(float v) {
- float unused;
- return fract(v, &unused);
-}
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(uchar3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
+ * Component wise conversion from uchar4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-static float2 __attribute__((const, overloadable))fract(float2 v) {
- float2 unused;
- return fract(v, &unused);
-}
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(uchar4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
+ * Component wise conversion from short2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-static float3 __attribute__((const, overloadable))fract(float3 v) {
- float3 unused;
- return fract(v, &unused);
-}
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(short2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return fractional part of v
+ * Component wise conversion from short3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-static float4 __attribute__((const, overloadable))fract(float4 v) {
- float4 unused;
- return fract(v, &unused);
-}
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(short3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the mantissa and place the exponent into iptr[0]
- *
- * @param v Supports float, float2, float3, float4.
+ * Component wise conversion from short4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))frexp(float v, int *iptr);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(short4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the mantissa and place the exponent into iptr[0]
- *
- * @param v Supports float, float2, float3, float4.
+ * Component wise conversion from ushort2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))frexp(float2 v, int2 *iptr);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(ushort2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the mantissa and place the exponent into iptr[0]
- *
- * @param v Supports float, float2, float3, float4.
+ * Component wise conversion from ushort3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))frexp(float3 v, int3 *iptr);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(ushort3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the mantissa and place the exponent into iptr[0]
- *
- * @param v Supports float, float2, float3, float4.
+ * Component wise conversion from ushort4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))frexp(float4 v, int4 *iptr);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(ushort4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return sqrt(x*x + y*y)
+ * Component wise conversion from int2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))hypot(float x, float y);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(int2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return sqrt(x*x + y*y)
+ * Component wise conversion from int3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))hypot(float2 x, float2 y);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(int3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return sqrt(x*x + y*y)
+ * Component wise conversion from int4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))hypot(float3 x, float3 y);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(int4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return sqrt(x*x + y*y)
+ * Component wise conversion from uint2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))hypot(float4 x, float4 y);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(uint2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integer exponent of a value
+ * Component wise conversion from uint3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern int __attribute__((const, overloadable))ilogb(float);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(uint3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integer exponent of a value
+ * Component wise conversion from uint4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))ilogb(float2);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(uint4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integer exponent of a value
+ * Component wise conversion from long2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))ilogb(float3);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(long2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integer exponent of a value
+ * Component wise conversion from long3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))ilogb(float4);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(long3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Component wise conversion from long4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))ldexp(float x, int y);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(long4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Component wise conversion from ulong2 to ulong2
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))ldexp(float2 x, int2 y);
+extern ulong2 __attribute__((const, overloadable))convert_ulong2(ulong2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Component wise conversion from ulong3 to ulong3
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))ldexp(float3 x, int3 y);
+extern ulong3 __attribute__((const, overloadable))convert_ulong3(ulong3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Component wise conversion from ulong4 to ulong4
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))ldexp(float4 x, int4 y);
+extern ulong4 __attribute__((const, overloadable))convert_ulong4(ulong4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Copy the sign bit from y to x.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))ldexp(float2 x, int y);
+extern float __attribute__((const, overloadable))copysign(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Copy the sign bit from y to x.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))ldexp(float3 x, int y);
+extern float2 __attribute__((const, overloadable))copysign(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (x * 2^y)
- *
- * @param x Supports 1,2,3,4 components
- * @param y Supports single component or matching vector.
+ * Copy the sign bit from y to x.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))ldexp(float4 x, int y);
+extern float3 __attribute__((const, overloadable))copysign(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Copy the sign bit from y to x.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))lgamma(float x);
+extern float4 __attribute__((const, overloadable))copysign(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))lgamma(float2 x);
+extern float __attribute__((const, overloadable))cos(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))lgamma(float3 x);
+extern float2 __attribute__((const, overloadable))cos(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))lgamma(float4 x);
+extern float3 __attribute__((const, overloadable))cos(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))lgamma(float x, int *y);
+extern float4 __attribute__((const, overloadable))cos(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the hypebolic cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))lgamma(float2 x, int2 *y);
+extern float __attribute__((const, overloadable))cosh(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the hypebolic cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))lgamma(float3 x, int3 *y);
+extern float2 __attribute__((const, overloadable))cosh(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the hypebolic cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))lgamma(float4 x, int4 *y);
+extern float3 __attribute__((const, overloadable))cosh(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm.
+ * Return the hypebolic cosine.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))log(float x);
+extern float4 __attribute__((const, overloadable))cosh(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm.
+ * Return the cosine of the value * PI.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))log(float2 x);
+extern float __attribute__((const, overloadable))cospi(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm.
+ * Return the cosine of the value * PI.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))log(float3 x);
+extern float2 __attribute__((const, overloadable))cospi(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm.
+ * Return the cosine of the value * PI.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))log(float4 x);
+extern float3 __attribute__((const, overloadable))cospi(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 2 logarithm.
+ * Return the cosine of the value * PI.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))log2(float x);
+extern float4 __attribute__((const, overloadable))cospi(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 2 logarithm.
+ * Compute the cross product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))log2(float2 x);
+extern float3 __attribute__((const, overloadable))cross(float3 lhs, float3 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 2 logarithm.
+ * Compute the cross product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))log2(float3 x);
+extern float4 __attribute__((const, overloadable))cross(float4 lhs, float4 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 2 logarithm.
+ * Convert from radians to degrees.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))log2(float4 x);
+extern float __attribute__((const, overloadable))degrees(float value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 10 logarithm.
+ * Convert from radians to degrees.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))log10(float x);
+extern float2 __attribute__((const, overloadable))degrees(float2 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 10 logarithm.
+ * Convert from radians to degrees.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))log10(float2 x);
+extern float3 __attribute__((const, overloadable))degrees(float3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 10 logarithm.
+ * Convert from radians to degrees.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))log10(float3 x);
+extern float4 __attribute__((const, overloadable))degrees(float4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the base 10 logarithm.
+ * Compute the distance between two points.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))log10(float4 x);
+extern float __attribute__((const, overloadable))distance(float lhs, float rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm of (v + 1.0f)
+ * Compute the distance between two points.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))log1p(float x);
+extern float __attribute__((const, overloadable))distance(float2 lhs, float2 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm of (v + 1.0f)
+ * Compute the distance between two points.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))log1p(float2 x);
+extern float __attribute__((const, overloadable))distance(float3 lhs, float3 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm of (v + 1.0f)
+ * Compute the distance between two points.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))log1p(float3 x);
+extern float __attribute__((const, overloadable))distance(float4 lhs, float4 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the natural logarithm of (v + 1.0f)
+ * Compute the dot product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))log1p(float4 x);
+extern float __attribute__((const, overloadable))dot(float lhs, float rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the exponent of the value.
+ * Compute the dot product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))logb(float x);
+extern float __attribute__((const, overloadable))dot(float2 lhs, float2 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the exponent of the value.
+ * Compute the dot product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))logb(float2 x);
+extern float __attribute__((const, overloadable))dot(float3 lhs, float3 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the exponent of the value.
+ * Compute the dot product of two vectors.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))logb(float3 x);
+extern float __attribute__((const, overloadable))dot(float4 lhs, float4 rhs);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the exponent of the value.
+ * Return the error function.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))logb(float4 x);
+extern float __attribute__((const, overloadable))erf(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute (a * b) + c
+ * Return the error function.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))mad(float a, float b, float c);
+extern float2 __attribute__((const, overloadable))erf(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute (a * b) + c
+ * Return the error function.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))mad(float2 a, float2 b, float2 c);
+extern float3 __attribute__((const, overloadable))erf(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute (a * b) + c
+ * Return the error function.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))mad(float3 a, float3 b, float3 c);
+extern float4 __attribute__((const, overloadable))erf(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute (a * b) + c
+ * Return the complementary error function.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))mad(float4 a, float4 b, float4 c);
+extern float __attribute__((const, overloadable))erfc(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integral and fractional components of a number.
- *
- * @param x Source value
- * @param iret iret[0] will be set to the integral portion of the number.
- * @return The floating point portion of the value.
+ * Return the complementary error function.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))modf(float x, float *iret);
+extern float2 __attribute__((const, overloadable))erfc(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integral and fractional components of a number.
- *
- * @param x Source value
- * @param iret iret[0] will be set to the integral portion of the number.
- * @return The floating point portion of the value.
+ * Return the complementary error function.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))modf(float2 x, float2 *iret);
+extern float3 __attribute__((const, overloadable))erfc(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integral and fractional components of a number.
- *
- * @param x Source value
- * @param iret iret[0] will be set to the integral portion of the number.
- * @return The floating point portion of the value.
+ * Return the complementary error function.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))modf(float3 x, float3 *iret);
+extern float4 __attribute__((const, overloadable))erfc(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the integral and fractional components of a number.
- *
- * @param x Source value
- * @param iret iret[0] will be set to the integral portion of the number.
- * @return The floating point portion of the value.
+ * Return e ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))modf(float4 x, float4 *iret);
+extern float __attribute__((const, overloadable))exp(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * generate a nan
+ * Return e ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))nan(uint);
+extern float2 __attribute__((const, overloadable))exp(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the next floating point number from x towards y.
+ * Return e ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))nextafter(float x, float y);
+extern float3 __attribute__((const, overloadable))exp(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the next floating point number from x towards y.
+ * Return e ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))nextafter(float2 x, float2 y);
+extern float4 __attribute__((const, overloadable))exp(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the next floating point number from x towards y.
+ * Return 10 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))nextafter(float3 x, float3 y);
+extern float __attribute__((const, overloadable))exp10(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the next floating point number from x towards y.
+ * Return 10 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))nextafter(float4 x, float4 y);
+extern float2 __attribute__((const, overloadable))exp10(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 10 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))pow(float x, float y);
+extern float3 __attribute__((const, overloadable))exp10(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 10 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))pow(float2 x, float2 y);
+extern float4 __attribute__((const, overloadable))exp10(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 2 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))pow(float3 x, float3 y);
+extern float __attribute__((const, overloadable))exp2(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 2 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))pow(float4 x, float4 y);
+extern float2 __attribute__((const, overloadable))exp2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 2 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))pown(float x, int y);
+extern float3 __attribute__((const, overloadable))exp2(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return 2 ^ value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))pown(float2 x, int2 y);
+extern float4 __attribute__((const, overloadable))exp2(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return (e ^ value) - 1.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))pown(float3 x, int3 y);
+extern float __attribute__((const, overloadable))expm1(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
+ * Return (e ^ value) - 1.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))pown(float4 x, int4 y);
+extern float2 __attribute__((const, overloadable))expm1(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
- * y must be > 0
+ * Return (e ^ value) - 1.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))powr(float x, float y);
+extern float3 __attribute__((const, overloadable))expm1(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
- * y must be > 0
+ * Return (e ^ value) - 1.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))powr(float2 x, float2 y);
+extern float4 __attribute__((const, overloadable))expm1(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
- * y must be > 0
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))powr(float3 x, float3 y);
+extern float __attribute__((const, overloadable))fabs(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return x ^ y.
- * y must be > 0
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))powr(float4 x, float4 y);
+extern float2 __attribute__((const, overloadable))fabs(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return round x/y to the nearest integer then compute the remander.
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))remainder(float x, float y);
+extern float3 __attribute__((const, overloadable))fabs(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return round x/y to the nearest integer then compute the remander.
+ * Return the absolute value of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))remainder(float2 x, float2 y);
+extern float4 __attribute__((const, overloadable))fabs(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_distance(float lhs, float rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_distance(float2 lhs, float2 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_distance(float3 lhs, float3 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_distance(float4 lhs, float4 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_length(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_length(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_length(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_length(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Approximately normalize a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))fast_normalize(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Approximately normalize a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float2 __attribute__((const, overloadable))fast_normalize(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Approximately normalize a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float3 __attribute__((const, overloadable))fast_normalize(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Approximately normalize a vector.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float4 __attribute__((const, overloadable))fast_normalize(float4 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return round x/y to the nearest integer then compute the remander.
+ * Return the positive difference between two values.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))remainder(float3 x, float3 y);
+extern float __attribute__((const, overloadable))fdim(float a, float b);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return round x/y to the nearest integer then compute the remander.
+ * Return the positive difference between two values.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))remainder(float4 x, float4 y);
+extern float2 __attribute__((const, overloadable))fdim(float2 a, float2 b);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * todo
+ * Return the positive difference between two values.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))remquo(float, float, int *);
+extern float3 __attribute__((const, overloadable))fdim(float3 a, float3 b);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * todo
+ * Return the positive difference between two values.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))remquo(float2, float2, int2 *);
+extern float4 __attribute__((const, overloadable))fdim(float4 a, float4 b);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * todo
+ * Return the smallest integer not greater than a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))remquo(float3, float3, int3 *);
+extern float __attribute__((const, overloadable))floor(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * todo
+ * Return the smallest integer not greater than a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))remquo(float4, float4, int4 *);
+extern float2 __attribute__((const, overloadable))floor(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value.
+ * Return the smallest integer not greater than a value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))rint(float);
+extern float3 __attribute__((const, overloadable))floor(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value.
+ * Return the smallest integer not greater than a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))rint(float2);
+extern float4 __attribute__((const, overloadable))floor(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value.
+ * Return (a * b) + c.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))rint(float3);
+extern float __attribute__((const, overloadable))fma(float a, float b, float c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value.
+ * Return (a * b) + c.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))rint(float4);
+extern float2 __attribute__((const, overloadable))fma(float2 a, float2 b, float2 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the Nth root of a value.
+ * Return (a * b) + c.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))rootn(float v, int n);
+extern float3 __attribute__((const, overloadable))fma(float3 a, float3 b, float3 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the Nth root of a value.
+ * Return (a * b) + c.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))rootn(float2 v, int2 n);
+extern float4 __attribute__((const, overloadable))fma(float4 a, float4 b, float4 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the Nth root of a value.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))rootn(float3 v, int3 n);
+extern float __attribute__((const, overloadable))fmax(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the Nth root of a value.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))rootn(float4 v, int4 n);
+extern float2 __attribute__((const, overloadable))fmax(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value. Half values are rounded away from zero.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))round(float);
+extern float3 __attribute__((const, overloadable))fmax(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value. Half values are rounded away from zero.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))round(float2);
+extern float4 __attribute__((const, overloadable))fmax(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value. Half values are rounded away from zero.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))round(float3);
+extern float2 __attribute__((const, overloadable))fmax(float2 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Round to the nearest integral value. Half values are rounded away from zero.
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))round(float4);
+extern float3 __attribute__((const, overloadable))fmax(float3 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (1 / sqrt(value)).
+ * Return (x < y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))rsqrt(float);
+extern float4 __attribute__((const, overloadable))fmax(float4 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (1 / sqrt(value)).
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))rsqrt(float2);
+extern float __attribute__((const, overloadable))fmin(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (1 / sqrt(value)).
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))rsqrt(float3);
+extern float2 __attribute__((const, overloadable))fmin(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return (1 / sqrt(value)).
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))rsqrt(float4);
+extern float3 __attribute__((const, overloadable))fmin(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the square root of a value.
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))sqrt(float);
+extern float4 __attribute__((const, overloadable))fmin(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the square root of a value.
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))sqrt(float2);
+extern float2 __attribute__((const, overloadable))fmin(float2 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the square root of a value.
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))sqrt(float3);
+extern float3 __attribute__((const, overloadable))fmin(float3 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the square root of a value.
+ * Return (x > y ? y : x)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))sqrt(float4);
+extern float4 __attribute__((const, overloadable))fmin(float4 x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine of a value specified in radians.
+ * Return the remainder from x / y
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))sin(float);
+extern float __attribute__((const, overloadable))fmod(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine of a value specified in radians.
+ * Return the remainder from x / y
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))sin(float2);
+extern float2 __attribute__((const, overloadable))fmod(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine of a value specified in radians.
+ * Return the remainder from x / y
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))sin(float3);
+extern float3 __attribute__((const, overloadable))fmod(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine of a value specified in radians.
+ * Return the remainder from x / y
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))sin(float4);
+extern float4 __attribute__((const, overloadable))fmod(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine and cosine of a value.
+ * Return fractional part of v
*
- * @return sine
- * @param v The incoming value in radians
- * @param *cosptr cosptr[0] will be set to the cosine value.
+ * @param floor floor[0] will be set to the floor of the input value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))sincos(float v, float *cosptr);
+extern float __attribute__((overloadable))fract(float v, float *floor);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine and cosine of a value.
+ * Return fractional part of v
*
- * @return sine
- * @param v The incoming value in radians
- * @param *cosptr cosptr[0] will be set to the cosine value.
+ * @param floor floor[0] will be set to the floor of the input value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))sincos(float2 v, float2 *cosptr);
+extern float2 __attribute__((overloadable))fract(float2 v, float2 *floor);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine and cosine of a value.
+ * Return fractional part of v
*
- * @return sine
- * @param v The incoming value in radians
- * @param *cosptr cosptr[0] will be set to the cosine value.
+ * @param floor floor[0] will be set to the floor of the input value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))sincos(float3 v, float3 *cosptr);
+extern float3 __attribute__((overloadable))fract(float3 v, float3 *floor);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sine and cosine of a value.
+ * Return fractional part of v
*
- * @return sine
- * @param v The incoming value in radians
- * @param *cosptr cosptr[0] will be set to the cosine value.
+ * @param floor floor[0] will be set to the floor of the input value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))sincos(float4 v, float4 *cosptr);
+extern float4 __attribute__((overloadable))fract(float4 v, float4 *floor);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic sine of a value specified in radians.
+ * Return fractional part of v
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))sinh(float);
+static float __attribute__((const, overloadable))fract(float v) {
+ float unused;
+ return fract(v, &unused);
+}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic sine of a value specified in radians.
+ * Return fractional part of v
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))sinh(float2);
+static float2 __attribute__((const, overloadable))fract(float2 v) {
+ float2 unused;
+ return fract(v, &unused);
+}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic sine of a value specified in radians.
+ * Return fractional part of v
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))sinh(float3);
+static float3 __attribute__((const, overloadable))fract(float3 v) {
+ float3 unused;
+ return fract(v, &unused);
+}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic sine of a value specified in radians.
+ * Return fractional part of v
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))sinh(float4);
+static float4 __attribute__((const, overloadable))fract(float4 v) {
+ float4 unused;
+ return fract(v, &unused);
+}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sin(v * PI).
+ * Return the mantissa and place the exponent into iptr[0]
+ *
+ * @param v Supports float, float2, float3, float4.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))sinpi(float);
+extern float __attribute__((overloadable))frexp(float v, int *iptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sin(v * PI).
+ * Return the mantissa and place the exponent into iptr[0]
+ *
+ * @param v Supports float, float2, float3, float4.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))sinpi(float2);
+extern float2 __attribute__((overloadable))frexp(float2 v, int2 *iptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sin(v * PI).
+ * Return the mantissa and place the exponent into iptr[0]
+ *
+ * @param v Supports float, float2, float3, float4.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))sinpi(float3);
+extern float3 __attribute__((overloadable))frexp(float3 v, int3 *iptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the sin(v * PI).
+ * Return the mantissa and place the exponent into iptr[0]
+ *
+ * @param v Supports float, float2, float3, float4.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))sinpi(float4);
+extern float4 __attribute__((overloadable))frexp(float4 v, int4 *iptr);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
/*
- * Return the tangent of a value.
+ * Return the approximate reciprocal of a value.
*
- * Supported by API versions 9 and newer.
+ * Supported by API versions 17 and newer.
*/
-extern float __attribute__((const, overloadable))tan(float);
+extern float __attribute__((const, overloadable))half_recip(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
/*
- * Return the tangent of a value.
+ * Return the approximate reciprocal of a value.
*
- * Supported by API versions 9 and newer.
+ * Supported by API versions 17 and newer.
*/
-extern float2 __attribute__((const, overloadable))tan(float2);
+extern float2 __attribute__((const, overloadable))half_recip(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
/*
- * Return the tangent of a value.
+ * Return the approximate reciprocal of a value.
*
- * Supported by API versions 9 and newer.
+ * Supported by API versions 17 and newer.
*/
-extern float3 __attribute__((const, overloadable))tan(float3);
+extern float3 __attribute__((const, overloadable))half_recip(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
/*
- * Return the tangent of a value.
+ * Return the approximate reciprocal of a value.
*
- * Supported by API versions 9 and newer.
+ * Supported by API versions 17 and newer.
*/
-extern float4 __attribute__((const, overloadable))tan(float4);
+extern float4 __attribute__((const, overloadable))half_recip(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 9))
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
/*
- * Return the hyperbolic tangent of a value.
+ * Return the approximate value of (1.f / sqrt(value)).
*
- * Supported by API versions 9 and newer.
+ * Supported by API versions 17 and newer.
*/
-extern float __attribute__((const, overloadable))tanh(float);
+extern float __attribute__((const, overloadable))half_rsqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate value of (1.f / sqrt(value)).
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float2 __attribute__((const, overloadable))half_rsqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate value of (1.f / sqrt(value)).
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float3 __attribute__((const, overloadable))half_rsqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate value of (1.f / sqrt(value)).
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float4 __attribute__((const, overloadable))half_rsqrt(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate square root of a value.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float __attribute__((const, overloadable))half_sqrt(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate square root of a value.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float2 __attribute__((const, overloadable))half_sqrt(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate square root of a value.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float3 __attribute__((const, overloadable))half_sqrt(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+/*
+ * Return the approximate square root of a value.
+ *
+ * Supported by API versions 17 and newer.
+ */
+extern float4 __attribute__((const, overloadable))half_sqrt(float4 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic tangent of a value.
+ * Return sqrt(x*x + y*y)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))tanh(float2);
+extern float __attribute__((const, overloadable))hypot(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic tangent of a value.
+ * Return sqrt(x*x + y*y)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))tanh(float3);
+extern float2 __attribute__((const, overloadable))hypot(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the hyperbolic tangent of a value.
+ * Return sqrt(x*x + y*y)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))tanh(float4);
+extern float3 __attribute__((const, overloadable))hypot(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return tan(v * PI)
+ * Return sqrt(x*x + y*y)
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))tanpi(float);
+extern float4 __attribute__((const, overloadable))hypot(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return tan(v * PI)
+ * Return the integer exponent of a value
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))tanpi(float2);
+extern int __attribute__((const, overloadable))ilogb(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return tan(v * PI)
+ * Return the integer exponent of a value
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))tanpi(float3);
+extern int2 __attribute__((const, overloadable))ilogb(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return tan(v * PI)
+ * Return the integer exponent of a value
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))tanpi(float4);
+extern int3 __attribute__((const, overloadable))ilogb(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the gamma function of a value.
+ * Return the integer exponent of a value
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))tgamma(float);
+extern int4 __attribute__((const, overloadable))ilogb(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the gamma function of a value.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))tgamma(float2);
+extern float __attribute__((const, overloadable))ldexp(float x, int y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the gamma function of a value.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))tgamma(float3);
+extern float2 __attribute__((const, overloadable))ldexp(float2 x, int2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the gamma function of a value.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))tgamma(float4);
+extern float3 __attribute__((const, overloadable))ldexp(float3 x, int3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * ound to integral using truncation.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))trunc(float);
+extern float4 __attribute__((const, overloadable))ldexp(float4 x, int4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * ound to integral using truncation.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))trunc(float2);
+extern float2 __attribute__((const, overloadable))ldexp(float2 x, int y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * ound to integral using truncation.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))trunc(float3);
+extern float3 __attribute__((const, overloadable))ldexp(float3 x, int y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * ound to integral using truncation.
+ * Return (x * 2^y)
+ *
+ * @param x Supports 1,2,3,4 components
+ * @param y Supports single component or matching vector.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))trunc(float4);
+extern float4 __attribute__((const, overloadable))ldexp(float4 x, int y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Compute the length of a vector.
*
* Supported by API versions 9 and newer.
*/
-extern uchar __attribute__((const, overloadable))abs(char value);
+extern float __attribute__((const, overloadable))length(float v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Compute the length of a vector.
*
* Supported by API versions 9 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))abs(char2 value);
+extern float __attribute__((const, overloadable))length(float2 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Compute the length of a vector.
*
* Supported by API versions 9 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))abs(char3 value);
+extern float __attribute__((const, overloadable))length(float3 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Compute the length of a vector.
*
* Supported by API versions 9 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))abs(char4 value);
+extern float __attribute__((const, overloadable))length(float4 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern ushort __attribute__((const, overloadable))abs(short value);
+extern float __attribute__((const, overloadable))lgamma(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))abs(short2 value);
+extern float2 __attribute__((const, overloadable))lgamma(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))abs(short3 value);
+extern float3 __attribute__((const, overloadable))lgamma(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))abs(short4 value);
+extern float4 __attribute__((const, overloadable))lgamma(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern uint __attribute__((const, overloadable))abs(int value);
+extern float __attribute__((overloadable))lgamma(float x, int *y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))abs(int2 value);
+extern float2 __attribute__((overloadable))lgamma(float2 x, int2 *y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))abs(int3 value);
+extern float3 __attribute__((overloadable))lgamma(float3 x, int3 *y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the absolute value of a value.
+ * Return the log gamma and sign
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))abs(int4 value);
+extern float4 __attribute__((overloadable))lgamma(float4 x, int4 *y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern char __attribute__((const, overloadable))clz(char value);
+extern float __attribute__((const, overloadable))log(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))clz(char2 value);
+extern float2 __attribute__((const, overloadable))log(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))clz(char3 value);
+extern float3 __attribute__((const, overloadable))log(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))clz(char4 value);
+extern float4 __attribute__((const, overloadable))log(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 10 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern uchar __attribute__((const, overloadable))clz(uchar value);
+extern float __attribute__((const, overloadable))log10(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 10 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))clz(uchar2 value);
+extern float2 __attribute__((const, overloadable))log10(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 10 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))clz(uchar3 value);
+extern float3 __attribute__((const, overloadable))log10(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 10 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))clz(uchar4 value);
+extern float4 __attribute__((const, overloadable))log10(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm of (v + 1.0f)
*
* Supported by API versions 9 and newer.
*/
-extern short __attribute__((const, overloadable))clz(short value);
+extern float __attribute__((const, overloadable))log1p(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm of (v + 1.0f)
*
* Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))clz(short2 value);
+extern float2 __attribute__((const, overloadable))log1p(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm of (v + 1.0f)
*
* Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))clz(short3 value);
+extern float3 __attribute__((const, overloadable))log1p(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the natural logarithm of (v + 1.0f)
*
* Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))clz(short4 value);
+extern float4 __attribute__((const, overloadable))log1p(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 2 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern ushort __attribute__((const, overloadable))clz(ushort value);
+extern float __attribute__((const, overloadable))log2(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 2 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))clz(ushort2 value);
+extern float2 __attribute__((const, overloadable))log2(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 2 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))clz(ushort3 value);
+extern float3 __attribute__((const, overloadable))log2(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Return the base 2 logarithm.
*
* Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))clz(ushort4 value);
+extern float4 __attribute__((const, overloadable))log2(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute the exponent of the value.
*
* Supported by API versions 9 and newer.
*/
-extern int __attribute__((const, overloadable))clz(int value);
+extern float __attribute__((const, overloadable))logb(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute the exponent of the value.
*
* Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))clz(int2 value);
+extern float2 __attribute__((const, overloadable))logb(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute the exponent of the value.
*
* Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))clz(int3 value);
+extern float3 __attribute__((const, overloadable))logb(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute the exponent of the value.
*
* Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))clz(int4 value);
+extern float4 __attribute__((const, overloadable))logb(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute (a * b) + c
*
* Supported by API versions 9 and newer.
*/
-extern uint __attribute__((const, overloadable))clz(uint value);
+extern float __attribute__((const, overloadable))mad(float a, float b, float c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute (a * b) + c
*
* Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))clz(uint2 value);
+extern float2 __attribute__((const, overloadable))mad(float2 a, float2 b, float2 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute (a * b) + c
*
* Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))clz(uint3 value);
+extern float3 __attribute__((const, overloadable))mad(float3 a, float3 b, float3 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the number of leading 0-bits in a value.
+ * Compute (a * b) + c
*
* Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))clz(uint4 value);
+extern float4 __attribute__((const, overloadable))mad(float4 a, float4 b, float4 c);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))min(float, float);
+extern float __attribute__((const, overloadable))max(float, float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))min(float2, float2);
+extern float2 __attribute__((const, overloadable))max(float2, float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))min(float3, float3);
+extern float3 __attribute__((const, overloadable))max(float3, float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))min(float4, float4);
+extern float4 __attribute__((const, overloadable))max(float4, float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char __attribute__((const, overloadable))min(char v1, char v2) {
- return (v1 < v2 ? v1 : v2);
+static char __attribute__((const, overloadable))max(char v1, char v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar __attribute__((const, overloadable))min(uchar v1, uchar v2) {
- return (v1 < v2 ? v1 : v2);
+static uchar __attribute__((const, overloadable))max(uchar v1, uchar v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short __attribute__((const, overloadable))min(short v1, short v2) {
- return (v1 < v2 ? v1 : v2);
+static short __attribute__((const, overloadable))max(short v1, short v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort __attribute__((const, overloadable))min(ushort v1, ushort v2) {
- return (v1 < v2 ? v1 : v2);
+static ushort __attribute__((const, overloadable))max(ushort v1, ushort v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int __attribute__((const, overloadable))min(int v1, int v2) {
- return (v1 < v2 ? v1 : v2);
+static int __attribute__((const, overloadable))max(int v1, int v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint __attribute__((const, overloadable))min(uint v1, uint v2) {
- return (v1 < v2 ? v1 : v2);
+static uint __attribute__((const, overloadable))max(uint v1, uint v2) {
+ return (v1 > v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char2 __attribute__((const, overloadable))min(char2 v1, char2 v2) {
+static char2 __attribute__((const, overloadable))max(char2 v1, char2 v2) {
char2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar2 __attribute__((const, overloadable))min(uchar2 v1, uchar2 v2) {
+static uchar2 __attribute__((const, overloadable))max(uchar2 v1, uchar2 v2) {
uchar2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short2 __attribute__((const, overloadable))min(short2 v1, short2 v2) {
+static short2 __attribute__((const, overloadable))max(short2 v1, short2 v2) {
short2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort2 __attribute__((const, overloadable))min(ushort2 v1, ushort2 v2) {
+static ushort2 __attribute__((const, overloadable))max(ushort2 v1, ushort2 v2) {
ushort2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int2 __attribute__((const, overloadable))min(int2 v1, int2 v2) {
+static int2 __attribute__((const, overloadable))max(int2 v1, int2 v2) {
int2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint2 __attribute__((const, overloadable))min(uint2 v1, uint2 v2) {
+static uint2 __attribute__((const, overloadable))max(uint2 v1, uint2 v2) {
uint2 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char3 __attribute__((const, overloadable))min(char3 v1, char3 v2) {
+static char3 __attribute__((const, overloadable))max(char3 v1, char3 v2) {
char3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar3 __attribute__((const, overloadable))min(uchar3 v1, uchar3 v2) {
+static uchar3 __attribute__((const, overloadable))max(uchar3 v1, uchar3 v2) {
uchar3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short3 __attribute__((const, overloadable))min(short3 v1, short3 v2) {
+static short3 __attribute__((const, overloadable))max(short3 v1, short3 v2) {
short3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort3 __attribute__((const, overloadable))min(ushort3 v1, ushort3 v2) {
+static ushort3 __attribute__((const, overloadable))max(ushort3 v1, ushort3 v2) {
ushort3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int3 __attribute__((const, overloadable))min(int3 v1, int3 v2) {
+static int3 __attribute__((const, overloadable))max(int3 v1, int3 v2) {
int3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint3 __attribute__((const, overloadable))min(uint3 v1, uint3 v2) {
+static uint3 __attribute__((const, overloadable))max(uint3 v1, uint3 v2) {
uint3 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char4 __attribute__((const, overloadable))min(char4 v1, char4 v2) {
+static char4 __attribute__((const, overloadable))max(char4 v1, char4 v2) {
char4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar4 __attribute__((const, overloadable))min(uchar4 v1, uchar4 v2) {
+static uchar4 __attribute__((const, overloadable))max(uchar4 v1, uchar4 v2) {
uchar4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short4 __attribute__((const, overloadable))min(short4 v1, short4 v2) {
+static short4 __attribute__((const, overloadable))max(short4 v1, short4 v2) {
short4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort4 __attribute__((const, overloadable))min(ushort4 v1, ushort4 v2) {
+static ushort4 __attribute__((const, overloadable))max(ushort4 v1, ushort4 v2) {
ushort4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int4 __attribute__((const, overloadable))min(int4 v1, int4 v2) {
+static int4 __attribute__((const, overloadable))max(int4 v1, int4 v2) {
int4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2) {
+static uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2) {
uint4 tmp;
- tmp.x = (v1.x < v2.x ? v1.x : v2.x);
- tmp.y = (v1.y < v2.y ? v1.y : v2.y);
- tmp.z = (v1.z < v2.z ? v1.z : v2.z);
- tmp.w = (v1.w < v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x > v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w > v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char __attribute__((const, overloadable))min(char v1, char v2);
+extern char __attribute__((const, overloadable))max(char v1, char v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char2 __attribute__((const, overloadable))min(char2 v1, char2 v2);
+extern char2 __attribute__((const, overloadable))max(char2 v1, char2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char3 __attribute__((const, overloadable))min(char3 v1, char3 v2);
+extern char3 __attribute__((const, overloadable))max(char3 v1, char3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char4 __attribute__((const, overloadable))min(char4 v1, char4 v2);
+extern char4 __attribute__((const, overloadable))max(char4 v1, char4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar __attribute__((const, overloadable))min(uchar v1, uchar v2);
+extern uchar __attribute__((const, overloadable))max(uchar v1, uchar v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))min(uchar2 v1, uchar2 v2);
+extern uchar2 __attribute__((const, overloadable))max(uchar2 v1, uchar2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))min(uchar3 v1, uchar3 v2);
+extern uchar3 __attribute__((const, overloadable))max(uchar3 v1, uchar3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))min(uchar4 v1, uchar4 v2);
+extern uchar4 __attribute__((const, overloadable))max(uchar4 v1, uchar4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short __attribute__((const, overloadable))min(short v1, short v2);
+extern short __attribute__((const, overloadable))max(short v1, short v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short2 __attribute__((const, overloadable))min(short2 v1, short2 v2);
+extern short2 __attribute__((const, overloadable))max(short2 v1, short2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short3 __attribute__((const, overloadable))min(short3 v1, short3 v2);
+extern short3 __attribute__((const, overloadable))max(short3 v1, short3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short4 __attribute__((const, overloadable))min(short4 v1, short4 v2);
+extern short4 __attribute__((const, overloadable))max(short4 v1, short4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort __attribute__((const, overloadable))min(ushort v1, ushort v2);
+extern ushort __attribute__((const, overloadable))max(ushort v1, ushort v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))min(ushort2 v1, ushort2 v2);
+extern ushort2 __attribute__((const, overloadable))max(ushort2 v1, ushort2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))min(ushort3 v1, ushort3 v2);
+extern ushort3 __attribute__((const, overloadable))max(ushort3 v1, ushort3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))min(ushort4 v1, ushort4 v2);
+extern ushort4 __attribute__((const, overloadable))max(ushort4 v1, ushort4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int __attribute__((const, overloadable))min(int v1, int v2);
+extern int __attribute__((const, overloadable))max(int v1, int v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int2 __attribute__((const, overloadable))min(int2 v1, int2 v2);
+extern int2 __attribute__((const, overloadable))max(int2 v1, int2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int3 __attribute__((const, overloadable))min(int3 v1, int3 v2);
+extern int3 __attribute__((const, overloadable))max(int3 v1, int3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int4 __attribute__((const, overloadable))min(int4 v1, int4 v2);
+extern int4 __attribute__((const, overloadable))max(int4 v1, int4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint __attribute__((const, overloadable))min(uint v1, uint v2);
+extern uint __attribute__((const, overloadable))max(uint v1, uint v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint2 __attribute__((const, overloadable))min(uint2 v1, uint2 v2);
+extern uint2 __attribute__((const, overloadable))max(uint2 v1, uint2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint3 __attribute__((const, overloadable))min(uint3 v1, uint3 v2);
+extern uint3 __attribute__((const, overloadable))max(uint3 v1, uint3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2);
+extern uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long __attribute__((const, overloadable))min(long v1, long v2);
+extern long __attribute__((const, overloadable))max(long v1, long v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long2 __attribute__((const, overloadable))min(long2 v1, long2 v2);
+extern long2 __attribute__((const, overloadable))max(long2 v1, long2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long3 __attribute__((const, overloadable))min(long3 v1, long3 v2);
+extern long3 __attribute__((const, overloadable))max(long3 v1, long3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long4 __attribute__((const, overloadable))min(long4 v1, long4 v2);
+extern long4 __attribute__((const, overloadable))max(long4 v1, long4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong __attribute__((const, overloadable))min(ulong v1, ulong v2);
+extern ulong __attribute__((const, overloadable))max(ulong v1, ulong v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))min(ulong2 v1, ulong2 v2);
+extern ulong2 __attribute__((const, overloadable))max(ulong2 v1, ulong2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))min(ulong3 v1, ulong3 v2);
+extern ulong3 __attribute__((const, overloadable))max(ulong3 v1, ulong3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the minimum value from two arguments
+ * Return the maximum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))min(ulong4 v1, ulong4 v2);
+extern ulong4 __attribute__((const, overloadable))max(ulong4 v1, ulong4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))max(float, float);
+extern float __attribute__((const, overloadable))min(float, float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))max(float2, float2);
+extern float2 __attribute__((const, overloadable))min(float2, float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))max(float3, float3);
+extern float3 __attribute__((const, overloadable))min(float3, float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))max(float4, float4);
+extern float4 __attribute__((const, overloadable))min(float4, float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char __attribute__((const, overloadable))max(char v1, char v2) {
- return (v1 > v2 ? v1 : v2);
+static char __attribute__((const, overloadable))min(char v1, char v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar __attribute__((const, overloadable))max(uchar v1, uchar v2) {
- return (v1 > v2 ? v1 : v2);
+static uchar __attribute__((const, overloadable))min(uchar v1, uchar v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short __attribute__((const, overloadable))max(short v1, short v2) {
- return (v1 > v2 ? v1 : v2);
+static short __attribute__((const, overloadable))min(short v1, short v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort __attribute__((const, overloadable))max(ushort v1, ushort v2) {
- return (v1 > v2 ? v1 : v2);
+static ushort __attribute__((const, overloadable))min(ushort v1, ushort v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int __attribute__((const, overloadable))max(int v1, int v2) {
- return (v1 > v2 ? v1 : v2);
+static int __attribute__((const, overloadable))min(int v1, int v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint __attribute__((const, overloadable))max(uint v1, uint v2) {
- return (v1 > v2 ? v1 : v2);
+static uint __attribute__((const, overloadable))min(uint v1, uint v2) {
+ return (v1 < v2 ? v1 : v2);
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char2 __attribute__((const, overloadable))max(char2 v1, char2 v2) {
+static char2 __attribute__((const, overloadable))min(char2 v1, char2 v2) {
char2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar2 __attribute__((const, overloadable))max(uchar2 v1, uchar2 v2) {
+static uchar2 __attribute__((const, overloadable))min(uchar2 v1, uchar2 v2) {
uchar2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short2 __attribute__((const, overloadable))max(short2 v1, short2 v2) {
+static short2 __attribute__((const, overloadable))min(short2 v1, short2 v2) {
short2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort2 __attribute__((const, overloadable))max(ushort2 v1, ushort2 v2) {
+static ushort2 __attribute__((const, overloadable))min(ushort2 v1, ushort2 v2) {
ushort2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int2 __attribute__((const, overloadable))max(int2 v1, int2 v2) {
+static int2 __attribute__((const, overloadable))min(int2 v1, int2 v2) {
int2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint2 __attribute__((const, overloadable))max(uint2 v1, uint2 v2) {
+static uint2 __attribute__((const, overloadable))min(uint2 v1, uint2 v2) {
uint2 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char3 __attribute__((const, overloadable))max(char3 v1, char3 v2) {
+static char3 __attribute__((const, overloadable))min(char3 v1, char3 v2) {
char3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar3 __attribute__((const, overloadable))max(uchar3 v1, uchar3 v2) {
+static uchar3 __attribute__((const, overloadable))min(uchar3 v1, uchar3 v2) {
uchar3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short3 __attribute__((const, overloadable))max(short3 v1, short3 v2) {
+static short3 __attribute__((const, overloadable))min(short3 v1, short3 v2) {
short3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort3 __attribute__((const, overloadable))max(ushort3 v1, ushort3 v2) {
+static ushort3 __attribute__((const, overloadable))min(ushort3 v1, ushort3 v2) {
ushort3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int3 __attribute__((const, overloadable))max(int3 v1, int3 v2) {
+static int3 __attribute__((const, overloadable))min(int3 v1, int3 v2) {
int3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint3 __attribute__((const, overloadable))max(uint3 v1, uint3 v2) {
+static uint3 __attribute__((const, overloadable))min(uint3 v1, uint3 v2) {
uint3 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static char4 __attribute__((const, overloadable))max(char4 v1, char4 v2) {
+static char4 __attribute__((const, overloadable))min(char4 v1, char4 v2) {
char4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uchar4 __attribute__((const, overloadable))max(uchar4 v1, uchar4 v2) {
+static uchar4 __attribute__((const, overloadable))min(uchar4 v1, uchar4 v2) {
uchar4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static short4 __attribute__((const, overloadable))max(short4 v1, short4 v2) {
+static short4 __attribute__((const, overloadable))min(short4 v1, short4 v2) {
short4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static ushort4 __attribute__((const, overloadable))max(ushort4 v1, ushort4 v2) {
+static ushort4 __attribute__((const, overloadable))min(ushort4 v1, ushort4 v2) {
ushort4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static int4 __attribute__((const, overloadable))max(int4 v1, int4 v2) {
+static int4 __attribute__((const, overloadable))min(int4 v1, int4 v2) {
int4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9) && (RS_VERSION <= 19))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Suppored by API versions 9 - 19
*/
-static uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2) {
+static uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2) {
uint4 tmp;
- tmp.x = (v1.x > v2.x ? v1.x : v2.x);
- tmp.y = (v1.y > v2.y ? v1.y : v2.y);
- tmp.z = (v1.z > v2.z ? v1.z : v2.z);
- tmp.w = (v1.w > v2.w ? v1.w : v2.w);
+ tmp.x = (v1.x < v2.x ? v1.x : v2.x);
+ tmp.y = (v1.y < v2.y ? v1.y : v2.y);
+ tmp.z = (v1.z < v2.z ? v1.z : v2.z);
+ tmp.w = (v1.w < v2.w ? v1.w : v2.w);
return tmp;
}
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char __attribute__((const, overloadable))max(char v1, char v2);
+extern char __attribute__((const, overloadable))min(char v1, char v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char2 __attribute__((const, overloadable))max(char2 v1, char2 v2);
+extern char2 __attribute__((const, overloadable))min(char2 v1, char2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char3 __attribute__((const, overloadable))max(char3 v1, char3 v2);
+extern char3 __attribute__((const, overloadable))min(char3 v1, char3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern char4 __attribute__((const, overloadable))max(char4 v1, char4 v2);
+extern char4 __attribute__((const, overloadable))min(char4 v1, char4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar __attribute__((const, overloadable))max(uchar v1, uchar v2);
+extern uchar __attribute__((const, overloadable))min(uchar v1, uchar v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))max(uchar2 v1, uchar2 v2);
+extern uchar2 __attribute__((const, overloadable))min(uchar2 v1, uchar2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))max(uchar3 v1, uchar3 v2);
+extern uchar3 __attribute__((const, overloadable))min(uchar3 v1, uchar3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))max(uchar4 v1, uchar4 v2);
+extern uchar4 __attribute__((const, overloadable))min(uchar4 v1, uchar4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short __attribute__((const, overloadable))max(short v1, short v2);
+extern short __attribute__((const, overloadable))min(short v1, short v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short2 __attribute__((const, overloadable))max(short2 v1, short2 v2);
+extern short2 __attribute__((const, overloadable))min(short2 v1, short2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short3 __attribute__((const, overloadable))max(short3 v1, short3 v2);
+extern short3 __attribute__((const, overloadable))min(short3 v1, short3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern short4 __attribute__((const, overloadable))max(short4 v1, short4 v2);
+extern short4 __attribute__((const, overloadable))min(short4 v1, short4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort __attribute__((const, overloadable))max(ushort v1, ushort v2);
+extern ushort __attribute__((const, overloadable))min(ushort v1, ushort v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))max(ushort2 v1, ushort2 v2);
+extern ushort2 __attribute__((const, overloadable))min(ushort2 v1, ushort2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))max(ushort3 v1, ushort3 v2);
+extern ushort3 __attribute__((const, overloadable))min(ushort3 v1, ushort3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))max(ushort4 v1, ushort4 v2);
+extern ushort4 __attribute__((const, overloadable))min(ushort4 v1, ushort4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int __attribute__((const, overloadable))max(int v1, int v2);
+extern int __attribute__((const, overloadable))min(int v1, int v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int2 __attribute__((const, overloadable))max(int2 v1, int2 v2);
+extern int2 __attribute__((const, overloadable))min(int2 v1, int2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int3 __attribute__((const, overloadable))max(int3 v1, int3 v2);
+extern int3 __attribute__((const, overloadable))min(int3 v1, int3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern int4 __attribute__((const, overloadable))max(int4 v1, int4 v2);
+extern int4 __attribute__((const, overloadable))min(int4 v1, int4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint __attribute__((const, overloadable))max(uint v1, uint v2);
+extern uint __attribute__((const, overloadable))min(uint v1, uint v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint2 __attribute__((const, overloadable))max(uint2 v1, uint2 v2);
+extern uint2 __attribute__((const, overloadable))min(uint2 v1, uint2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint3 __attribute__((const, overloadable))max(uint3 v1, uint3 v2);
+extern uint3 __attribute__((const, overloadable))min(uint3 v1, uint3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2);
+extern uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long __attribute__((const, overloadable))max(long v1, long v2);
+extern long __attribute__((const, overloadable))min(long v1, long v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long2 __attribute__((const, overloadable))max(long2 v1, long2 v2);
+extern long2 __attribute__((const, overloadable))min(long2 v1, long2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long3 __attribute__((const, overloadable))max(long3 v1, long3 v2);
+extern long3 __attribute__((const, overloadable))min(long3 v1, long3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern long4 __attribute__((const, overloadable))max(long4 v1, long4 v2);
+extern long4 __attribute__((const, overloadable))min(long4 v1, long4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong __attribute__((const, overloadable))max(ulong v1, ulong v2);
+extern ulong __attribute__((const, overloadable))min(ulong v1, ulong v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))max(ulong2 v1, ulong2 v2);
+extern ulong2 __attribute__((const, overloadable))min(ulong2 v1, ulong2 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))max(ulong3 v1, ulong3 v2);
+extern ulong3 __attribute__((const, overloadable))min(ulong3 v1, ulong3 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Return the maximum value from two arguments
+ * Return the minimum value from two arguments
*
* Supported by API versions 20 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))max(ulong4 v1, ulong4 v2);
+extern ulong4 __attribute__((const, overloadable))min(ulong4 v1, ulong4 v2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))clamp(float value, float min_value, float max_value);
+extern float __attribute__((const, overloadable))mix(float start, float stop, float amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))clamp(float2 value, float2 min_value, float2 max_value);
+extern float2 __attribute__((const, overloadable))mix(float2 start, float2 stop, float2 amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))clamp(float3 value, float3 min_value, float3 max_value);
+extern float3 __attribute__((const, overloadable))mix(float3 start, float3 stop, float3 amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))clamp(float4 value, float4 min_value, float4 max_value);
+extern float4 __attribute__((const, overloadable))mix(float4 start, float4 stop, float4 amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))clamp(float2 value, float min_value, float max_value);
+extern float2 __attribute__((const, overloadable))mix(float2 start, float2 stop, float amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))clamp(float3 value, float min_value, float max_value);
+extern float3 __attribute__((const, overloadable))mix(float3 start, float3 stop, float amount);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * return start + ((stop - start) * amount)
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))clamp(float4 value, float min_value, float max_value);
+extern float4 __attribute__((const, overloadable))mix(float4 start, float4 stop, float amount);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
+ * Return the integral and fractional components of a number.
*
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * @param x Source value
+ * @param iret iret[0] will be set to the integral portion of the number.
+ * @return The floating point portion of the value.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char __attribute__((const, overloadable))clamp(char value, char min_value, char max_value);
+extern float __attribute__((overloadable))modf(float x, float *iret);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
+ * Return the integral and fractional components of a number.
*
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * @param x Source value
+ * @param iret iret[0] will be set to the integral portion of the number.
+ * @return The floating point portion of the value.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char2 __attribute__((const, overloadable))clamp(char2 value, char2 min_value, char2 max_value);
+extern float2 __attribute__((overloadable))modf(float2 x, float2 *iret);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
+ * Return the integral and fractional components of a number.
*
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * @param x Source value
+ * @param iret iret[0] will be set to the integral portion of the number.
+ * @return The floating point portion of the value.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))clamp(char3 value, char3 min_value, char3 max_value);
+extern float3 __attribute__((overloadable))modf(float3 x, float3 *iret);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
+ * Return the integral and fractional components of a number.
*
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * @param x Source value
+ * @param iret iret[0] will be set to the integral portion of the number.
+ * @return The floating point portion of the value.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))clamp(char4 value, char4 min_value, char4 max_value);
+extern float4 __attribute__((overloadable))modf(float4 x, float4 *iret);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * generate a nan
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uchar __attribute__((const, overloadable))clamp(uchar value, uchar min_value, uchar max_value);
+extern float __attribute__((const, overloadable))nan(uint);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp
+ * valid for inputs -86.f to 86.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))clamp(uchar2 value, uchar2 min_value, uchar2 max_value);
+extern float __attribute__((const, overloadable))native_exp(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp
+ * valid for inputs -86.f to 86.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))clamp(uchar3 value, uchar3 min_value, uchar3 max_value);
+extern float2 __attribute__((const, overloadable))native_exp(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp
+ * valid for inputs -86.f to 86.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))clamp(uchar4 value, uchar4 min_value, uchar4 max_value);
+extern float3 __attribute__((const, overloadable))native_exp(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp
+ * valid for inputs -86.f to 86.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern short __attribute__((const, overloadable))clamp(short value, short min_value, short max_value);
+extern float4 __attribute__((const, overloadable))native_exp(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp10
+ * valid for inputs -37.f to 37.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern short2 __attribute__((const, overloadable))clamp(short2 value, short2 min_value, short2 max_value);
+extern float __attribute__((const, overloadable))native_exp10(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp10
+ * valid for inputs -37.f to 37.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern short3 __attribute__((const, overloadable))clamp(short3 value, short3 min_value, short3 max_value);
+extern float2 __attribute__((const, overloadable))native_exp10(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp10
+ * valid for inputs -37.f to 37.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern short4 __attribute__((const, overloadable))clamp(short4 value, short4 min_value, short4 max_value);
+extern float3 __attribute__((const, overloadable))native_exp10(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp10
+ * valid for inputs -37.f to 37.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ushort __attribute__((const, overloadable))clamp(ushort value, ushort min_value, ushort max_value);
+extern float4 __attribute__((const, overloadable))native_exp10(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp2
+ * valid for inputs -125.f to 125.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))clamp(ushort2 value, ushort2 min_value, ushort2 max_value);
+extern float __attribute__((const, overloadable))native_exp2(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp2
+ * valid for inputs -125.f to 125.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))clamp(ushort3 value, ushort3 min_value, ushort3 max_value);
+extern float2 __attribute__((const, overloadable))native_exp2(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp2
+ * valid for inputs -125.f to 125.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))clamp(ushort4 value, ushort4 min_value, ushort4 max_value);
+extern float3 __attribute__((const, overloadable))native_exp2(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate exp2
+ * valid for inputs -125.f to 125.f
+ * Max 8192 ulps of error
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern int __attribute__((const, overloadable))clamp(int value, int min_value, int max_value);
+extern float4 __attribute__((const, overloadable))native_exp2(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern int2 __attribute__((const, overloadable))clamp(int2 value, int2 min_value, int2 max_value);
+extern float __attribute__((const, overloadable))native_log(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern int3 __attribute__((const, overloadable))clamp(int3 value, int3 min_value, int3 max_value);
+extern float2 __attribute__((const, overloadable))native_log(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern int4 __attribute__((const, overloadable))clamp(int4 value, int4 min_value, int4 max_value);
+extern float3 __attribute__((const, overloadable))native_log(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uint __attribute__((const, overloadable))clamp(uint value, uint min_value, uint max_value);
+extern float4 __attribute__((const, overloadable))native_log(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log10
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uint2 __attribute__((const, overloadable))clamp(uint2 value, uint2 min_value, uint2 max_value);
+extern float __attribute__((const, overloadable))native_log10(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log10
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uint3 __attribute__((const, overloadable))clamp(uint3 value, uint3 min_value, uint3 max_value);
+extern float2 __attribute__((const, overloadable))native_log10(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log10
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern uint4 __attribute__((const, overloadable))clamp(uint4 value, uint4 min_value, uint4 max_value);
+extern float3 __attribute__((const, overloadable))native_log10(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log10
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern long __attribute__((const, overloadable))clamp(long value, long min_value, long max_value);
+extern float4 __attribute__((const, overloadable))native_log10(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log2
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern long2 __attribute__((const, overloadable))clamp(long2 value, long2 min_value, long2 max_value);
+extern float __attribute__((const, overloadable))native_log2(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log2
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern long3 __attribute__((const, overloadable))clamp(long3 value, long3 min_value, long3 max_value);
+extern float2 __attribute__((const, overloadable))native_log2(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log2
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern long4 __attribute__((const, overloadable))clamp(long4 value, long4 min_value, long4 max_value);
+extern float3 __attribute__((const, overloadable))native_log2(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate log2
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ulong __attribute__((const, overloadable))clamp(ulong value, ulong min_value, ulong max_value);
+extern float4 __attribute__((const, overloadable))native_log2(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate v ^ y
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))clamp(ulong2 value, ulong2 min_value, ulong2 max_value);
+extern float __attribute__((const, overloadable))native_powr(float v, float y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate v ^ y
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))clamp(ulong3 value, ulong3 min_value, ulong3 max_value);
+extern float2 __attribute__((const, overloadable))native_powr(float2 v, float2 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate v ^ y
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))clamp(ulong4 value, ulong4 min_value, ulong4 max_value);
+extern float3 __attribute__((const, overloadable))native_powr(float3 v, float3 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Fast approximate v ^ y
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 18 and newer.
*/
-extern char2 __attribute__((const, overloadable))clamp(char2 value, char min_value, char max_value);
+extern float4 __attribute__((const, overloadable))native_powr(float4 v, float4 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return the next floating point number from x towards y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char3 __attribute__((const, overloadable))clamp(char3 value, char min_value, char max_value);
+extern float __attribute__((const, overloadable))nextafter(float x, float y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return the next floating point number from x towards y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern char4 __attribute__((const, overloadable))clamp(char4 value, char min_value, char max_value);
+extern float2 __attribute__((const, overloadable))nextafter(float2 x, float2 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return the next floating point number from x towards y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uchar2 __attribute__((const, overloadable))clamp(uchar2 value, uchar min_value, uchar max_value);
+extern float3 __attribute__((const, overloadable))nextafter(float3 x, float3 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return the next floating point number from x towards y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uchar3 __attribute__((const, overloadable))clamp(uchar3 value, uchar min_value, uchar max_value);
+extern float4 __attribute__((const, overloadable))nextafter(float4 x, float4 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Normalize a vector.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uchar4 __attribute__((const, overloadable))clamp(uchar4 value, uchar min_value, uchar max_value);
+extern float __attribute__((const, overloadable))normalize(float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Normalize a vector.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern short2 __attribute__((const, overloadable))clamp(short2 value, short min_value, short max_value);
+extern float2 __attribute__((const, overloadable))normalize(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Normalize a vector.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern short3 __attribute__((const, overloadable))clamp(short3 value, short min_value, short max_value);
+extern float3 __attribute__((const, overloadable))normalize(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Normalize a vector.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern short4 __attribute__((const, overloadable))clamp(short4 value, short min_value, short max_value);
+extern float4 __attribute__((const, overloadable))normalize(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ushort2 __attribute__((const, overloadable))clamp(ushort2 value, ushort min_value, ushort max_value);
+extern float __attribute__((const, overloadable))pow(float x, float y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ushort3 __attribute__((const, overloadable))clamp(ushort3 value, ushort min_value, ushort max_value);
+extern float2 __attribute__((const, overloadable))pow(float2 x, float2 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ushort4 __attribute__((const, overloadable))clamp(ushort4 value, ushort min_value, ushort max_value);
+extern float3 __attribute__((const, overloadable))pow(float3 x, float3 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern int2 __attribute__((const, overloadable))clamp(int2 value, int min_value, int max_value);
+extern float4 __attribute__((const, overloadable))pow(float4 x, float4 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern int3 __attribute__((const, overloadable))clamp(int3 value, int min_value, int max_value);
+extern float __attribute__((const, overloadable))pown(float x, int y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern int4 __attribute__((const, overloadable))clamp(int4 value, int min_value, int max_value);
+extern float2 __attribute__((const, overloadable))pown(float2 x, int2 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uint2 __attribute__((const, overloadable))clamp(uint2 value, uint min_value, uint max_value);
+extern float3 __attribute__((const, overloadable))pown(float3 x, int3 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uint3 __attribute__((const, overloadable))clamp(uint3 value, uint min_value, uint max_value);
+extern float4 __attribute__((const, overloadable))pown(float4 x, int4 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
+ * y must be > 0
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern uint4 __attribute__((const, overloadable))clamp(uint4 value, uint min_value, uint max_value);
+extern float __attribute__((const, overloadable))powr(float x, float y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
+ * y must be > 0
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern long2 __attribute__((const, overloadable))clamp(long2 value, long min_value, long max_value);
+extern float2 __attribute__((const, overloadable))powr(float2 x, float2 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
+ * y must be > 0
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern long3 __attribute__((const, overloadable))clamp(long3 value, long min_value, long max_value);
+extern float3 __attribute__((const, overloadable))powr(float3 x, float3 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Return x ^ y.
+ * y must be > 0
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern long4 __attribute__((const, overloadable))clamp(long4 value, long min_value, long max_value);
+extern float4 __attribute__((const, overloadable))powr(float4 x, float4 y);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Convert from degrees to radians.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ulong2 __attribute__((const, overloadable))clamp(ulong2 value, ulong min_value, ulong max_value);
+extern float __attribute__((const, overloadable))radians(float value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Convert from degrees to radians.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ulong3 __attribute__((const, overloadable))clamp(ulong3 value, ulong min_value, ulong max_value);
+extern float2 __attribute__((const, overloadable))radians(float2 value);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 19))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Clamp a value to a specified high and low bound.
- *
- * @param amount value to be clamped. Supports 1,2,3,4 components
- * @param min_value Lower bound, must be scalar or matching vector.
- * @param max_value High bound, must match type of low
+ * Convert from degrees to radians.
*
- * Supported by API versions 19 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern ulong4 __attribute__((const, overloadable))clamp(ulong4 value, ulong min_value, ulong max_value);
+extern float3 __attribute__((const, overloadable))radians(float3 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from radians to degrees.
+ * Convert from degrees to radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))degrees(float value);
+extern float4 __attribute__((const, overloadable))radians(float4 value);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from radians to degrees.
+ * Return round x/y to the nearest integer then compute the remander.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))degrees(float2 value);
+extern float __attribute__((const, overloadable))remainder(float x, float y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from radians to degrees.
+ * Return round x/y to the nearest integer then compute the remander.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))degrees(float3 value);
+extern float2 __attribute__((const, overloadable))remainder(float2 x, float2 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from radians to degrees.
+ * Return round x/y to the nearest integer then compute the remander.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))degrees(float4 value);
+extern float3 __attribute__((const, overloadable))remainder(float3 x, float3 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * Return round x/y to the nearest integer then compute the remander.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))mix(float start, float stop, float amount);
+extern float4 __attribute__((const, overloadable))remainder(float4 x, float4 y);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * todo
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))mix(float2 start, float2 stop, float2 amount);
+extern float __attribute__((overloadable))remquo(float b, float c, int *d);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * todo
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))mix(float3 start, float3 stop, float3 amount);
+extern float2 __attribute__((overloadable))remquo(float2 b, float2 c, int2 *d);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * todo
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))mix(float4 start, float4 stop, float4 amount);
+extern float3 __attribute__((overloadable))remquo(float3 b, float3 c, int3 *d);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * todo
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))mix(float2 start, float2 stop, float amount);
+extern float4 __attribute__((overloadable))remquo(float4 b, float4 c, int4 *d);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * Round to the nearest integral value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))mix(float3 start, float3 stop, float amount);
+extern float __attribute__((const, overloadable))rint(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * return start + ((stop - start) * amount)
+ * Round to the nearest integral value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))mix(float4 start, float4 stop, float amount);
+extern float2 __attribute__((const, overloadable))rint(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from degrees to radians.
+ * Round to the nearest integral value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))radians(float value);
+extern float3 __attribute__((const, overloadable))rint(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from degrees to radians.
+ * Round to the nearest integral value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))radians(float2 value);
+extern float4 __attribute__((const, overloadable))rint(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from degrees to radians.
+ * Compute the Nth root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))radians(float3 value);
+extern float __attribute__((const, overloadable))rootn(float v, int n);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Convert from degrees to radians.
+ * Compute the Nth root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))radians(float4 value);
+extern float2 __attribute__((const, overloadable))rootn(float2 v, int2 n);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Compute the Nth root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))step(float edge, float v);
+extern float3 __attribute__((const, overloadable))rootn(float3 v, int3 n);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Compute the Nth root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))step(float2 edge, float2 v);
+extern float4 __attribute__((const, overloadable))rootn(float4 v, int4 n);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Round to the nearest integral value. Half values are rounded away from zero.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))step(float3 edge, float3 v);
+extern float __attribute__((const, overloadable))round(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Round to the nearest integral value. Half values are rounded away from zero.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))step(float4 edge, float4 v);
+extern float2 __attribute__((const, overloadable))round(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Round to the nearest integral value. Half values are rounded away from zero.
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))step(float2 edge, float v);
+extern float3 __attribute__((const, overloadable))round(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Round to the nearest integral value. Half values are rounded away from zero.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))step(float3 edge, float v);
+extern float4 __attribute__((const, overloadable))round(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Return (1 / sqrt(value)).
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))step(float4 edge, float v);
+extern float __attribute__((const, overloadable))rsqrt(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Return (1 / sqrt(value)).
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))step(float edge, float2 v);
+extern float2 __attribute__((const, overloadable))rsqrt(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Return (1 / sqrt(value)).
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))step(float edge, float3 v);
+extern float3 __attribute__((const, overloadable))rsqrt(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * if (v < edge)
- * return 0.f;
- * else
- * return 1.f;
+ * Return (1 / sqrt(value)).
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))step(float edge, float4 v);
+extern float4 __attribute__((const, overloadable))rsqrt(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
@@ -8003,656 +8163,498 @@ extern float4 __attribute__((const, overloadable))sign(float4 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the cross product of two vectors.
+ * Return the sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))cross(float3 lhs, float3 rhs);
+extern float __attribute__((const, overloadable))sin(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the cross product of two vectors.
+ * Return the sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))cross(float4 lhs, float4 rhs);
+extern float2 __attribute__((const, overloadable))sin(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the dot product of two vectors.
+ * Return the sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))dot(float lhs, float rhs);
+extern float3 __attribute__((const, overloadable))sin(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the dot product of two vectors.
+ * Return the sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))dot(float2 lhs, float2 rhs);
+extern float4 __attribute__((const, overloadable))sin(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the dot product of two vectors.
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))dot(float3 lhs, float3 rhs);
+extern float __attribute__((overloadable))sincos(float v, float *cosptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the dot product of two vectors.
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))dot(float4 lhs, float4 rhs);
+extern float2 __attribute__((overloadable))sincos(float2 v, float2 *cosptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the length of a vector.
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))length(float v);
+extern float3 __attribute__((overloadable))sincos(float3 v, float3 *cosptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the length of a vector.
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))length(float2 v);
+extern float4 __attribute__((overloadable))sincos(float4 v, float4 *cosptr);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the length of a vector.
+ * Return the hyperbolic sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))length(float3 v);
+extern float __attribute__((const, overloadable))sinh(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the length of a vector.
+ * Return the hyperbolic sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))length(float4 v);
+extern float2 __attribute__((const, overloadable))sinh(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the distance between two points.
+ * Return the hyperbolic sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))distance(float lhs, float rhs);
+extern float3 __attribute__((const, overloadable))sinh(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the distance between two points.
+ * Return the hyperbolic sine of a value specified in radians.
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))distance(float2 lhs, float2 rhs);
+extern float4 __attribute__((const, overloadable))sinh(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the distance between two points.
+ * Return the sin(v * PI).
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))distance(float3 lhs, float3 rhs);
+extern float __attribute__((const, overloadable))sinpi(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Compute the distance between two points.
+ * Return the sin(v * PI).
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))distance(float4 lhs, float4 rhs);
+extern float2 __attribute__((const, overloadable))sinpi(float2);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Normalize a vector.
+ * Return the sin(v * PI).
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))normalize(float v);
+extern float3 __attribute__((const, overloadable))sinpi(float3);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Normalize a vector.
+ * Return the sin(v * PI).
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))normalize(float2 v);
+extern float4 __attribute__((const, overloadable))sinpi(float4);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Normalize a vector.
+ * Return the square root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))normalize(float3 v);
+extern float __attribute__((const, overloadable))sqrt(float);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Normalize a vector.
+ * Return the square root of a value.
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))normalize(float4 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate reciprocal of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))half_recip(float v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate reciprocal of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float2 __attribute__((const, overloadable))half_recip(float2 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate reciprocal of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float3 __attribute__((const, overloadable))half_recip(float3 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate reciprocal of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float4 __attribute__((const, overloadable))half_recip(float4 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate square root of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))half_sqrt(float v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate square root of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float2 __attribute__((const, overloadable))half_sqrt(float2 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate square root of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float3 __attribute__((const, overloadable))half_sqrt(float3 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate square root of a value.
- *
- * Supported by API versions 17 and newer.
- */
-extern float4 __attribute__((const, overloadable))half_sqrt(float4 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate value of (1.f / sqrt(value)).
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))half_rsqrt(float v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate value of (1.f / sqrt(value)).
- *
- * Supported by API versions 17 and newer.
- */
-extern float2 __attribute__((const, overloadable))half_rsqrt(float2 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate value of (1.f / sqrt(value)).
- *
- * Supported by API versions 17 and newer.
- */
-extern float3 __attribute__((const, overloadable))half_rsqrt(float3 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Return the approximate value of (1.f / sqrt(value)).
- *
- * Supported by API versions 17 and newer.
- */
-extern float4 __attribute__((const, overloadable))half_rsqrt(float4 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate length of a vector.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_length(float v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate length of a vector.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_length(float2 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate length of a vector.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_length(float3 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate length of a vector.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_length(float4 v);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate distance between two points.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_distance(float lhs, float rhs);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate distance between two points.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_distance(float2 lhs, float2 rhs);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate distance between two points.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_distance(float3 lhs, float3 rhs);
-#endif
-
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
-/*
- * Compute the approximate distance between two points.
- *
- * Supported by API versions 17 and newer.
- */
-extern float __attribute__((const, overloadable))fast_distance(float4 lhs, float4 rhs);
+extern float2 __attribute__((const, overloadable))sqrt(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Approximately normalize a vector.
+ * Return the square root of a value.
*
- * Supported by API versions 17 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))fast_normalize(float v);
+extern float3 __attribute__((const, overloadable))sqrt(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Approximately normalize a vector.
+ * Return the square root of a value.
*
- * Supported by API versions 17 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))fast_normalize(float2 v);
+extern float4 __attribute__((const, overloadable))sqrt(float4);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Approximately normalize a vector.
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 17 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))fast_normalize(float3 v);
+extern float __attribute__((const, overloadable))step(float edge, float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 17))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Approximately normalize a vector.
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 17 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))fast_normalize(float4 v);
+extern float2 __attribute__((const, overloadable))step(float2 edge, float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp
- * valid for inputs -86.f to 86.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_exp(float v);
+extern float3 __attribute__((const, overloadable))step(float3 edge, float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp
- * valid for inputs -86.f to 86.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_exp(float2 v);
+extern float4 __attribute__((const, overloadable))step(float4 edge, float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp
- * valid for inputs -86.f to 86.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_exp(float3 v);
+extern float2 __attribute__((const, overloadable))step(float2 edge, float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp
- * valid for inputs -86.f to 86.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_exp(float4 v);
+extern float3 __attribute__((const, overloadable))step(float3 edge, float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp2
- * valid for inputs -125.f to 125.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_exp2(float v);
+extern float4 __attribute__((const, overloadable))step(float4 edge, float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Fast approximate exp2
- * valid for inputs -125.f to 125.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 20 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_exp2(float2 v);
+extern float2 __attribute__((const, overloadable))step(float edge, float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Fast approximate exp2
- * valid for inputs -125.f to 125.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 20 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_exp2(float3 v);
+extern float3 __attribute__((const, overloadable))step(float edge, float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 20))
/*
- * Fast approximate exp2
- * valid for inputs -125.f to 125.f
- * Max 8192 ulps of error
+ * if (v < edge)
+ * return 0.f;
+ * else
+ * return 1.f;
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 20 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_exp2(float4 v);
+extern float4 __attribute__((const, overloadable))step(float edge, float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp10
- * valid for inputs -37.f to 37.f
- * Max 8192 ulps of error
+ * Return the tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_exp10(float v);
+extern float __attribute__((const, overloadable))tan(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp10
- * valid for inputs -37.f to 37.f
- * Max 8192 ulps of error
+ * Return the tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_exp10(float2 v);
+extern float2 __attribute__((const, overloadable))tan(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp10
- * valid for inputs -37.f to 37.f
- * Max 8192 ulps of error
+ * Return the tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_exp10(float3 v);
+extern float3 __attribute__((const, overloadable))tan(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate exp10
- * valid for inputs -37.f to 37.f
- * Max 8192 ulps of error
+ * Return the tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_exp10(float4 v);
+extern float4 __attribute__((const, overloadable))tan(float4);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log
+ * Return the hyperbolic tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_log(float v);
+extern float __attribute__((const, overloadable))tanh(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log
+ * Return the hyperbolic tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_log(float2 v);
+extern float2 __attribute__((const, overloadable))tanh(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log
+ * Return the hyperbolic tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_log(float3 v);
+extern float3 __attribute__((const, overloadable))tanh(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log
+ * Return the hyperbolic tangent of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_log(float4 v);
+extern float4 __attribute__((const, overloadable))tanh(float4);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log2
+ * Return tan(v * PI)
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_log2(float v);
+extern float __attribute__((const, overloadable))tanpi(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log2
+ * Return tan(v * PI)
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_log2(float2 v);
+extern float2 __attribute__((const, overloadable))tanpi(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log2
+ * Return tan(v * PI)
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_log2(float3 v);
+extern float3 __attribute__((const, overloadable))tanpi(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log2
+ * Return tan(v * PI)
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_log2(float4 v);
+extern float4 __attribute__((const, overloadable))tanpi(float4);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log10
+ * Compute the gamma function of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_log10(float v);
+extern float __attribute__((const, overloadable))tgamma(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log10
+ * Compute the gamma function of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_log10(float2 v);
+extern float2 __attribute__((const, overloadable))tgamma(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log10
+ * Compute the gamma function of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_log10(float3 v);
+extern float3 __attribute__((const, overloadable))tgamma(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate log10
+ * Compute the gamma function of a value.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_log10(float4 v);
+extern float4 __attribute__((const, overloadable))tgamma(float4);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate v ^ y
+ * ound to integral using truncation.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))native_powr(float v, float y);
+extern float __attribute__((const, overloadable))trunc(float);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate v ^ y
+ * ound to integral using truncation.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))native_powr(float2 v, float2 y);
+extern float2 __attribute__((const, overloadable))trunc(float2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate v ^ y
+ * ound to integral using truncation.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))native_powr(float3 v, float3 y);
+extern float3 __attribute__((const, overloadable))trunc(float3);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 18))
+#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Fast approximate v ^ y
+ * ound to integral using truncation.
*
- * Supported by API versions 18 and newer.
+ * Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))native_powr(float4 v, float4 y);
+extern float4 __attribute__((const, overloadable))trunc(float4);
#endif
#endif // __rs_core_math_rsh__
diff --git a/renderscript/lib/arm/libRSSupport.so b/renderscript/lib/arm/libRSSupport.so
index ee91b5e..aa1948a 100755
--- a/renderscript/lib/arm/libRSSupport.so
+++ b/renderscript/lib/arm/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/arm/libc.so b/renderscript/lib/arm/libc.so
index f9cf757..0d90cea 100755
--- a/renderscript/lib/arm/libc.so
+++ b/renderscript/lib/arm/libc.so
Binary files differ
diff --git a/renderscript/lib/arm/libclcore.bc b/renderscript/lib/arm/libclcore.bc
index 78b374f..06c0e26 100644
--- a/renderscript/lib/arm/libclcore.bc
+++ b/renderscript/lib/arm/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/arm/libm.so b/renderscript/lib/arm/libm.so
index 8dff5b7..f548039 100755
--- a/renderscript/lib/arm/libm.so
+++ b/renderscript/lib/arm/libm.so
Binary files differ
diff --git a/renderscript/lib/arm/librsjni.so b/renderscript/lib/arm/librsjni.so
index 550867e..69582f0 100755
--- a/renderscript/lib/arm/librsjni.so
+++ b/renderscript/lib/arm/librsjni.so
Binary files differ
diff --git a/renderscript/lib/arm/librsrt_arm.bc b/renderscript/lib/arm/librsrt_arm.bc
index 78b374f..06c0e26 100644
--- a/renderscript/lib/arm/librsrt_arm.bc
+++ b/renderscript/lib/arm/librsrt_arm.bc
Binary files differ
diff --git a/renderscript/lib/javalib.jar b/renderscript/lib/javalib.jar
index 9e2e3ee..cbcfcf1 100644
--- a/renderscript/lib/javalib.jar
+++ b/renderscript/lib/javalib.jar
Binary files differ
diff --git a/renderscript/lib/mips/libRSSupport.so b/renderscript/lib/mips/libRSSupport.so
index c428302..c653e13 100755
--- a/renderscript/lib/mips/libRSSupport.so
+++ b/renderscript/lib/mips/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/mips/libc.so b/renderscript/lib/mips/libc.so
index 4f5a5c7..e21832c 100755
--- a/renderscript/lib/mips/libc.so
+++ b/renderscript/lib/mips/libc.so
Binary files differ
diff --git a/renderscript/lib/mips/libclcore.bc b/renderscript/lib/mips/libclcore.bc
index 2de535d..73ba2d3 100644
--- a/renderscript/lib/mips/libclcore.bc
+++ b/renderscript/lib/mips/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/mips/libm.so b/renderscript/lib/mips/libm.so
index 6d7817b..e489611 100755
--- a/renderscript/lib/mips/libm.so
+++ b/renderscript/lib/mips/libm.so
Binary files differ
diff --git a/renderscript/lib/mips/librsjni.so b/renderscript/lib/mips/librsjni.so
index 34c3c74..ca0f42c 100755
--- a/renderscript/lib/mips/librsjni.so
+++ b/renderscript/lib/mips/librsjni.so
Binary files differ
diff --git a/renderscript/lib/mips/librsrt_mips.bc b/renderscript/lib/mips/librsrt_mips.bc
index 2de535d..73ba2d3 100644
--- a/renderscript/lib/mips/librsrt_mips.bc
+++ b/renderscript/lib/mips/librsrt_mips.bc
Binary files differ
diff --git a/renderscript/lib/x86/libRSSupport.so b/renderscript/lib/x86/libRSSupport.so
index c7f7208..793e4fb 100755
--- a/renderscript/lib/x86/libRSSupport.so
+++ b/renderscript/lib/x86/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/x86/libc.so b/renderscript/lib/x86/libc.so
index 0446b62..d7035c3 100755
--- a/renderscript/lib/x86/libc.so
+++ b/renderscript/lib/x86/libc.so
Binary files differ
diff --git a/renderscript/lib/x86/libclcore.bc b/renderscript/lib/x86/libclcore.bc
index 5ec7550..7651034 100644
--- a/renderscript/lib/x86/libclcore.bc
+++ b/renderscript/lib/x86/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/x86/libm.so b/renderscript/lib/x86/libm.so
index 01c1f2f..4cf02a2 100755
--- a/renderscript/lib/x86/libm.so
+++ b/renderscript/lib/x86/libm.so
Binary files differ
diff --git a/renderscript/lib/x86/librsjni.so b/renderscript/lib/x86/librsjni.so
index 17911e8..148b7d2 100755
--- a/renderscript/lib/x86/librsjni.so
+++ b/renderscript/lib/x86/librsjni.so
Binary files differ
diff --git a/renderscript/lib/x86/librsrt_x86.bc b/renderscript/lib/x86/librsrt_x86.bc
index 0315c4b..6827d02 100644
--- a/renderscript/lib/x86/librsrt_x86.bc
+++ b/renderscript/lib/x86/librsrt_x86.bc
Binary files differ