libstdc++
simd.h
1// Definition of the public simd interfaces -*- C++ -*-
2
3// Copyright (C) 2020-2022 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25#ifndef _GLIBCXX_EXPERIMENTAL_SIMD_H
26#define _GLIBCXX_EXPERIMENTAL_SIMD_H
27
28#if __cplusplus >= 201703L
29
30#include "simd_detail.h"
31#include "numeric_traits.h"
32#include <bit>
33#include <bitset>
34#ifdef _GLIBCXX_DEBUG_UB
35#include <cstdio> // for stderr
36#endif
37#include <cstring>
38#include <cmath>
39#include <functional>
40#include <iosfwd>
41#include <utility>
42
43#if _GLIBCXX_SIMD_X86INTRIN
44#include <x86intrin.h>
45#elif _GLIBCXX_SIMD_HAVE_NEON
46#include <arm_neon.h>
47#endif
48
49/** @ingroup ts_simd
50 * @{
51 */
52/* There are several closely related types, with the following naming
53 * convention:
54 * _Tp: vectorizable (arithmetic) type (or any type)
55 * _TV: __vector_type_t<_Tp, _Np>
56 * _TW: _SimdWrapper<_Tp, _Np>
57 * _TI: __intrinsic_type_t<_Tp, _Np>
58 * _TVT: _VectorTraits<_TV> or _VectorTraits<_TW>
59 * If one additional type is needed use _U instead of _T.
60 * Otherwise use _T\d, _TV\d, _TW\d, TI\d, _TVT\d.
61 *
62 * More naming conventions:
63 * _Ap or _Abi: An ABI tag from the simd_abi namespace
64 * _Ip: often used for integer types with sizeof(_Ip) == sizeof(_Tp),
65 * _IV, _IW as for _TV, _TW
66 * _Np: number of elements (not bytes)
67 * _Bytes: number of bytes
68 *
69 * Variable names:
70 * __k: mask object (vector- or bitmask)
71 */
72_GLIBCXX_SIMD_BEGIN_NAMESPACE
73
74#if !_GLIBCXX_SIMD_X86INTRIN
75using __m128 [[__gnu__::__vector_size__(16)]] = float;
76using __m128d [[__gnu__::__vector_size__(16)]] = double;
77using __m128i [[__gnu__::__vector_size__(16)]] = long long;
78using __m256 [[__gnu__::__vector_size__(32)]] = float;
79using __m256d [[__gnu__::__vector_size__(32)]] = double;
80using __m256i [[__gnu__::__vector_size__(32)]] = long long;
81using __m512 [[__gnu__::__vector_size__(64)]] = float;
82using __m512d [[__gnu__::__vector_size__(64)]] = double;
83using __m512i [[__gnu__::__vector_size__(64)]] = long long;
84#endif
85
86namespace simd_abi {
87// simd_abi forward declarations {{{
88// implementation details:
89struct _Scalar;
90
91template <int _Np>
92 struct _Fixed;
93
94// There are two major ABIs that appear on different architectures.
95// Both have non-boolean values packed into an N Byte register
96// -> #elements = N / sizeof(T)
97// Masks differ:
98// 1. Use value vector registers for masks (all 0 or all 1)
99// 2. Use bitmasks (mask registers) with one bit per value in the corresponding
100// value vector
101//
102// Both can be partially used, masking off the rest when doing horizontal
103// operations or operations that can trap (e.g. FP_INVALID or integer division
104// by 0). This is encoded as the number of used bytes.
105template <int _UsedBytes>
106 struct _VecBuiltin;
107
108template <int _UsedBytes>
109 struct _VecBltnBtmsk;
110
111template <typename _Tp, int _Np>
112 using _VecN = _VecBuiltin<sizeof(_Tp) * _Np>;
113
114template <int _UsedBytes = 16>
115 using _Sse = _VecBuiltin<_UsedBytes>;
116
117template <int _UsedBytes = 32>
118 using _Avx = _VecBuiltin<_UsedBytes>;
119
120template <int _UsedBytes = 64>
121 using _Avx512 = _VecBltnBtmsk<_UsedBytes>;
122
123template <int _UsedBytes = 16>
124 using _Neon = _VecBuiltin<_UsedBytes>;
125
126// implementation-defined:
127using __sse = _Sse<>;
128using __avx = _Avx<>;
129using __avx512 = _Avx512<>;
130using __neon = _Neon<>;
131using __neon128 = _Neon<16>;
132using __neon64 = _Neon<8>;
133
134// standard:
135template <typename _Tp, size_t _Np, typename...>
136 struct deduce;
137
138template <int _Np>
139 using fixed_size = _Fixed<_Np>;
140
141using scalar = _Scalar;
142
143// }}}
144} // namespace simd_abi
145// forward declarations is_simd(_mask), simd(_mask), simd_size {{{
146template <typename _Tp>
147 struct is_simd;
148
149template <typename _Tp>
150 struct is_simd_mask;
151
152template <typename _Tp, typename _Abi>
153 class simd;
154
155template <typename _Tp, typename _Abi>
156 class simd_mask;
157
158template <typename _Tp, typename _Abi>
159 struct simd_size;
160
161// }}}
162// load/store flags {{{
163struct element_aligned_tag
164{
165 template <typename _Tp, typename _Up = typename _Tp::value_type>
166 static constexpr size_t _S_alignment = alignof(_Up);
167
168 template <typename _Tp, typename _Up>
169 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
170 _S_apply(_Up* __ptr)
171 { return __ptr; }
172};
173
174struct vector_aligned_tag
175{
176 template <typename _Tp, typename _Up = typename _Tp::value_type>
177 static constexpr size_t _S_alignment
178 = std::__bit_ceil(sizeof(_Up) * _Tp::size());
179
180 template <typename _Tp, typename _Up>
181 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
182 _S_apply(_Up* __ptr)
183 {
184 return static_cast<_Up*>(
185 __builtin_assume_aligned(__ptr, _S_alignment<_Tp, _Up>));
186 }
187};
188
189template <size_t _Np> struct overaligned_tag
190{
191 template <typename _Tp, typename _Up = typename _Tp::value_type>
192 static constexpr size_t _S_alignment = _Np;
193
194 template <typename _Tp, typename _Up>
195 _GLIBCXX_SIMD_INTRINSIC static constexpr _Up*
196 _S_apply(_Up* __ptr)
197 { return static_cast<_Up*>(__builtin_assume_aligned(__ptr, _Np)); }
198};
199
200inline constexpr element_aligned_tag element_aligned = {};
201
202inline constexpr vector_aligned_tag vector_aligned = {};
203
204template <size_t _Np>
205 inline constexpr overaligned_tag<_Np> overaligned = {};
206
207// }}}
208template <size_t _Xp>
209 using _SizeConstant = integral_constant<size_t, _Xp>;
210// constexpr feature detection{{{
211constexpr inline bool __have_mmx = _GLIBCXX_SIMD_HAVE_MMX;
212constexpr inline bool __have_sse = _GLIBCXX_SIMD_HAVE_SSE;
213constexpr inline bool __have_sse2 = _GLIBCXX_SIMD_HAVE_SSE2;
214constexpr inline bool __have_sse3 = _GLIBCXX_SIMD_HAVE_SSE3;
215constexpr inline bool __have_ssse3 = _GLIBCXX_SIMD_HAVE_SSSE3;
216constexpr inline bool __have_sse4_1 = _GLIBCXX_SIMD_HAVE_SSE4_1;
217constexpr inline bool __have_sse4_2 = _GLIBCXX_SIMD_HAVE_SSE4_2;
218constexpr inline bool __have_xop = _GLIBCXX_SIMD_HAVE_XOP;
219constexpr inline bool __have_avx = _GLIBCXX_SIMD_HAVE_AVX;
220constexpr inline bool __have_avx2 = _GLIBCXX_SIMD_HAVE_AVX2;
221constexpr inline bool __have_bmi = _GLIBCXX_SIMD_HAVE_BMI1;
222constexpr inline bool __have_bmi2 = _GLIBCXX_SIMD_HAVE_BMI2;
223constexpr inline bool __have_lzcnt = _GLIBCXX_SIMD_HAVE_LZCNT;
224constexpr inline bool __have_sse4a = _GLIBCXX_SIMD_HAVE_SSE4A;
225constexpr inline bool __have_fma = _GLIBCXX_SIMD_HAVE_FMA;
226constexpr inline bool __have_fma4 = _GLIBCXX_SIMD_HAVE_FMA4;
227constexpr inline bool __have_f16c = _GLIBCXX_SIMD_HAVE_F16C;
228constexpr inline bool __have_popcnt = _GLIBCXX_SIMD_HAVE_POPCNT;
229constexpr inline bool __have_avx512f = _GLIBCXX_SIMD_HAVE_AVX512F;
230constexpr inline bool __have_avx512dq = _GLIBCXX_SIMD_HAVE_AVX512DQ;
231constexpr inline bool __have_avx512vl = _GLIBCXX_SIMD_HAVE_AVX512VL;
232constexpr inline bool __have_avx512bw = _GLIBCXX_SIMD_HAVE_AVX512BW;
233constexpr inline bool __have_avx512dq_vl = __have_avx512dq && __have_avx512vl;
234constexpr inline bool __have_avx512bw_vl = __have_avx512bw && __have_avx512vl;
235constexpr inline bool __have_avx512bitalg = _GLIBCXX_SIMD_HAVE_AVX512BITALG;
236constexpr inline bool __have_avx512vbmi2 = _GLIBCXX_SIMD_HAVE_AVX512VBMI2;
237constexpr inline bool __have_avx512vbmi = _GLIBCXX_SIMD_HAVE_AVX512VBMI;
238constexpr inline bool __have_avx512ifma = _GLIBCXX_SIMD_HAVE_AVX512IFMA;
239constexpr inline bool __have_avx512cd = _GLIBCXX_SIMD_HAVE_AVX512CD;
240constexpr inline bool __have_avx512vnni = _GLIBCXX_SIMD_HAVE_AVX512VNNI;
241constexpr inline bool __have_avx512vpopcntdq = _GLIBCXX_SIMD_HAVE_AVX512VPOPCNTDQ;
242constexpr inline bool __have_avx512vp2intersect = _GLIBCXX_SIMD_HAVE_AVX512VP2INTERSECT;
243
244constexpr inline bool __have_neon = _GLIBCXX_SIMD_HAVE_NEON;
245constexpr inline bool __have_neon_a32 = _GLIBCXX_SIMD_HAVE_NEON_A32;
246constexpr inline bool __have_neon_a64 = _GLIBCXX_SIMD_HAVE_NEON_A64;
247constexpr inline bool __support_neon_float =
248#if defined __GCC_IEC_559
249 __GCC_IEC_559 == 0;
250#elif defined __FAST_MATH__
251 true;
252#else
253 false;
254#endif
255
256#ifdef _ARCH_PWR10
257constexpr inline bool __have_power10vec = true;
258#else
259constexpr inline bool __have_power10vec = false;
260#endif
261#ifdef __POWER9_VECTOR__
262constexpr inline bool __have_power9vec = true;
263#else
264constexpr inline bool __have_power9vec = false;
265#endif
266#if defined __POWER8_VECTOR__
267constexpr inline bool __have_power8vec = true;
268#else
269constexpr inline bool __have_power8vec = __have_power9vec;
270#endif
271#if defined __VSX__
272constexpr inline bool __have_power_vsx = true;
273#else
274constexpr inline bool __have_power_vsx = __have_power8vec;
275#endif
276#if defined __ALTIVEC__
277constexpr inline bool __have_power_vmx = true;
278#else
279constexpr inline bool __have_power_vmx = __have_power_vsx;
280#endif
281
282// }}}
283
284namespace __detail
285{
286#ifdef math_errhandling
287 // Determines _S_handle_fpexcept from math_errhandling if it is defined and expands to a constant
288 // expression. math_errhandling may expand to an extern symbol, in which case a constexpr value
289 // must be guessed.
290 template <int = math_errhandling>
291 constexpr bool __handle_fpexcept_impl(int)
292 { return math_errhandling & MATH_ERREXCEPT; }
293#endif
294
295 // Fallback if math_errhandling doesn't work: with fast-math assume floating-point exceptions are
296 // ignored, otherwise implement correct exception behavior.
297 constexpr bool __handle_fpexcept_impl(float)
298 {
299#if defined __FAST_MATH__
300 return false;
301#else
302 return true;
303#endif
304 }
305
306 /// True if math functions must raise floating-point exceptions as specified by C17.
307 static constexpr bool _S_handle_fpexcept = __handle_fpexcept_impl(0);
308
309 constexpr std::uint_least64_t
310 __floating_point_flags()
311 {
312 std::uint_least64_t __flags = 0;
313 if constexpr (_S_handle_fpexcept)
314 __flags |= 1;
315#ifdef __FAST_MATH__
316 __flags |= 1 << 1;
317#elif __FINITE_MATH_ONLY__
318 __flags |= 2 << 1;
319#elif __GCC_IEC_559 < 2
320 __flags |= 3 << 1;
321#endif
322 __flags |= (__FLT_EVAL_METHOD__ + 1) << 3;
323 return __flags;
324 }
325
326 constexpr std::uint_least64_t
327 __machine_flags()
328 {
329 if constexpr (__have_mmx || __have_sse)
330 return __have_mmx
331 | (__have_sse << 1)
332 | (__have_sse2 << 2)
333 | (__have_sse3 << 3)
334 | (__have_ssse3 << 4)
335 | (__have_sse4_1 << 5)
336 | (__have_sse4_2 << 6)
337 | (__have_xop << 7)
338 | (__have_avx << 8)
339 | (__have_avx2 << 9)
340 | (__have_bmi << 10)
341 | (__have_bmi2 << 11)
342 | (__have_lzcnt << 12)
343 | (__have_sse4a << 13)
344 | (__have_fma << 14)
345 | (__have_fma4 << 15)
346 | (__have_f16c << 16)
347 | (__have_popcnt << 17)
348 | (__have_avx512f << 18)
349 | (__have_avx512dq << 19)
350 | (__have_avx512vl << 20)
351 | (__have_avx512bw << 21)
352 | (__have_avx512bitalg << 22)
353 | (__have_avx512vbmi2 << 23)
354 | (__have_avx512vbmi << 24)
355 | (__have_avx512ifma << 25)
356 | (__have_avx512cd << 26)
357 | (__have_avx512vnni << 27)
358 | (__have_avx512vpopcntdq << 28)
359 | (__have_avx512vp2intersect << 29);
360 else if constexpr (__have_neon)
361 return __have_neon
362 | (__have_neon_a32 << 1)
363 | (__have_neon_a64 << 2)
364 | (__have_neon_a64 << 2)
365 | (__support_neon_float << 3);
366 else if constexpr (__have_power_vmx)
367 return __have_power_vmx
368 | (__have_power_vsx << 1)
369 | (__have_power8vec << 2)
370 | (__have_power9vec << 3)
371 | (__have_power10vec << 4);
372 else
373 return 0;
374 }
375
376 namespace
377 {
378 struct _OdrEnforcer {};
379 }
380
381 template <std::uint_least64_t...>
382 struct _MachineFlagsTemplate {};
383
384 /**@internal
385 * Use this type as default template argument to all function templates that
386 * are not declared always_inline. It ensures, that a function
387 * specialization, which the compiler decides not to inline, has a unique symbol
388 * (_OdrEnforcer) or a symbol matching the machine/architecture flags
389 * (_MachineFlagsTemplate). This helps to avoid ODR violations in cases where
390 * users link TUs compiled with different flags. This is especially important
391 * for using simd in libraries.
392 */
393 using __odr_helper
394 = conditional_t<__machine_flags() == 0, _OdrEnforcer,
395 _MachineFlagsTemplate<__machine_flags(), __floating_point_flags()>>;
396
397 struct _Minimum
398 {
399 template <typename _Tp>
400 _GLIBCXX_SIMD_INTRINSIC constexpr
401 _Tp
402 operator()(_Tp __a, _Tp __b) const
403 {
404 using std::min;
405 return min(__a, __b);
406 }
407 };
408
409 struct _Maximum
410 {
411 template <typename _Tp>
412 _GLIBCXX_SIMD_INTRINSIC constexpr
413 _Tp
414 operator()(_Tp __a, _Tp __b) const
415 {
416 using std::max;
417 return max(__a, __b);
418 }
419 };
420} // namespace __detail
421
422// unrolled/pack execution helpers
423// __execute_n_times{{{
424template <typename _Fp, size_t... _I>
425 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
426 void
427 __execute_on_index_sequence(_Fp&& __f, index_sequence<_I...>)
428 { ((void)__f(_SizeConstant<_I>()), ...); }
429
430template <typename _Fp>
431 _GLIBCXX_SIMD_INTRINSIC constexpr void
432 __execute_on_index_sequence(_Fp&&, index_sequence<>)
433 { }
434
435template <size_t _Np, typename _Fp>
436 _GLIBCXX_SIMD_INTRINSIC constexpr void
437 __execute_n_times(_Fp&& __f)
438 {
439 __execute_on_index_sequence(static_cast<_Fp&&>(__f),
440 make_index_sequence<_Np>{});
441 }
442
443// }}}
444// __generate_from_n_evaluations{{{
445template <typename _R, typename _Fp, size_t... _I>
446 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
447 _R
448 __execute_on_index_sequence_with_return(_Fp&& __f, index_sequence<_I...>)
449 { return _R{__f(_SizeConstant<_I>())...}; }
450
451template <size_t _Np, typename _R, typename _Fp>
452 _GLIBCXX_SIMD_INTRINSIC constexpr _R
453 __generate_from_n_evaluations(_Fp&& __f)
454 {
455 return __execute_on_index_sequence_with_return<_R>(
456 static_cast<_Fp&&>(__f), make_index_sequence<_Np>{});
457 }
458
459// }}}
460// __call_with_n_evaluations{{{
461template <size_t... _I, typename _F0, typename _FArgs>
462 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
463 auto
464 __call_with_n_evaluations(index_sequence<_I...>, _F0&& __f0, _FArgs&& __fargs)
465 { return __f0(__fargs(_SizeConstant<_I>())...); }
466
467template <size_t _Np, typename _F0, typename _FArgs>
468 _GLIBCXX_SIMD_INTRINSIC constexpr auto
469 __call_with_n_evaluations(_F0&& __f0, _FArgs&& __fargs)
470 {
471 return __call_with_n_evaluations(make_index_sequence<_Np>{},
472 static_cast<_F0&&>(__f0),
473 static_cast<_FArgs&&>(__fargs));
474 }
475
476// }}}
477// __call_with_subscripts{{{
478template <size_t _First = 0, size_t... _It, typename _Tp, typename _Fp>
479 [[__gnu__::__flatten__]] _GLIBCXX_SIMD_INTRINSIC constexpr
480 auto
481 __call_with_subscripts(_Tp&& __x, index_sequence<_It...>, _Fp&& __fun)
482 { return __fun(__x[_First + _It]...); }
483
484template <size_t _Np, size_t _First = 0, typename _Tp, typename _Fp>
485 _GLIBCXX_SIMD_INTRINSIC constexpr auto
486 __call_with_subscripts(_Tp&& __x, _Fp&& __fun)
487 {
488 return __call_with_subscripts<_First>(static_cast<_Tp&&>(__x),
489 make_index_sequence<_Np>(),
490 static_cast<_Fp&&>(__fun));
491 }
492
493// }}}
494
495// vvv ---- type traits ---- vvv
496// integer type aliases{{{
497using _UChar = unsigned char;
498using _SChar = signed char;
499using _UShort = unsigned short;
500using _UInt = unsigned int;
501using _ULong = unsigned long;
502using _ULLong = unsigned long long;
503using _LLong = long long;
504
505//}}}
506// __first_of_pack{{{
507template <typename _T0, typename...>
508 struct __first_of_pack
509 { using type = _T0; };
510
511template <typename... _Ts>
512 using __first_of_pack_t = typename __first_of_pack<_Ts...>::type;
513
514//}}}
515// __value_type_or_identity_t {{{
516template <typename _Tp>
517 typename _Tp::value_type
518 __value_type_or_identity_impl(int);
519
520template <typename _Tp>
521 _Tp
522 __value_type_or_identity_impl(float);
523
524template <typename _Tp>
525 using __value_type_or_identity_t
526 = decltype(__value_type_or_identity_impl<_Tp>(int()));
527
528// }}}
529// __is_vectorizable {{{
530template <typename _Tp>
531 struct __is_vectorizable : public is_arithmetic<_Tp> {};
532
533template <>
534 struct __is_vectorizable<bool> : public false_type {};
535
536template <typename _Tp>
537 inline constexpr bool __is_vectorizable_v = __is_vectorizable<_Tp>::value;
538
539// Deduces to a vectorizable type
540template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
541 using _Vectorizable = _Tp;
542
543// }}}
544// _LoadStorePtr / __is_possible_loadstore_conversion {{{
545template <typename _Ptr, typename _ValueType>
546 struct __is_possible_loadstore_conversion
547 : conjunction<__is_vectorizable<_Ptr>, __is_vectorizable<_ValueType>> {};
548
549template <>
550 struct __is_possible_loadstore_conversion<bool, bool> : true_type {};
551
552// Deduces to a type allowed for load/store with the given value type.
553template <typename _Ptr, typename _ValueType,
554 typename = enable_if_t<
555 __is_possible_loadstore_conversion<_Ptr, _ValueType>::value>>
556 using _LoadStorePtr = _Ptr;
557
558// }}}
559// __is_bitmask{{{
560template <typename _Tp, typename = void_t<>>
561 struct __is_bitmask : false_type {};
562
563template <typename _Tp>
564 inline constexpr bool __is_bitmask_v = __is_bitmask<_Tp>::value;
565
566// the __mmaskXX case:
567template <typename _Tp>
568 struct __is_bitmask<_Tp,
569 void_t<decltype(declval<unsigned&>() = declval<_Tp>() & 1u)>>
570 : true_type {};
571
572// }}}
573// __int_for_sizeof{{{
574#pragma GCC diagnostic push
575#pragma GCC diagnostic ignored "-Wpedantic"
576template <size_t _Bytes>
577 constexpr auto
578 __int_for_sizeof()
579 {
580 if constexpr (_Bytes == sizeof(int))
581 return int();
582 #ifdef __clang__
583 else if constexpr (_Bytes == sizeof(char))
584 return char();
585 #else
586 else if constexpr (_Bytes == sizeof(_SChar))
587 return _SChar();
588 #endif
589 else if constexpr (_Bytes == sizeof(short))
590 return short();
591 #ifndef __clang__
592 else if constexpr (_Bytes == sizeof(long))
593 return long();
594 #endif
595 else if constexpr (_Bytes == sizeof(_LLong))
596 return _LLong();
597 #ifdef __SIZEOF_INT128__
598 else if constexpr (_Bytes == sizeof(__int128))
599 return __int128();
600 #endif // __SIZEOF_INT128__
601 else if constexpr (_Bytes % sizeof(int) == 0)
602 {
603 constexpr size_t _Np = _Bytes / sizeof(int);
604 struct _Ip
605 {
606 int _M_data[_Np];
607
608 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
609 operator&(_Ip __rhs) const
610 {
611 return __generate_from_n_evaluations<_Np, _Ip>(
612 [&](auto __i) { return __rhs._M_data[__i] & _M_data[__i]; });
613 }
614
615 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
616 operator|(_Ip __rhs) const
617 {
618 return __generate_from_n_evaluations<_Np, _Ip>(
619 [&](auto __i) { return __rhs._M_data[__i] | _M_data[__i]; });
620 }
621
622 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
623 operator^(_Ip __rhs) const
624 {
625 return __generate_from_n_evaluations<_Np, _Ip>(
626 [&](auto __i) { return __rhs._M_data[__i] ^ _M_data[__i]; });
627 }
628
629 _GLIBCXX_SIMD_INTRINSIC constexpr _Ip
630 operator~() const
631 {
632 return __generate_from_n_evaluations<_Np, _Ip>(
633 [&](auto __i) { return ~_M_data[__i]; });
634 }
635 };
636 return _Ip{};
637 }
638 else
639 static_assert(_Bytes != _Bytes, "this should be unreachable");
640 }
641#pragma GCC diagnostic pop
642
643template <typename _Tp>
644 using __int_for_sizeof_t = decltype(__int_for_sizeof<sizeof(_Tp)>());
645
646template <size_t _Np>
647 using __int_with_sizeof_t = decltype(__int_for_sizeof<_Np>());
648
649// }}}
650// __is_fixed_size_abi{{{
651template <typename _Tp>
652 struct __is_fixed_size_abi : false_type {};
653
654template <int _Np>
655 struct __is_fixed_size_abi<simd_abi::fixed_size<_Np>> : true_type {};
656
657template <typename _Tp>
658 inline constexpr bool __is_fixed_size_abi_v = __is_fixed_size_abi<_Tp>::value;
659
660// }}}
661// __is_scalar_abi {{{
662template <typename _Abi>
663 constexpr bool
664 __is_scalar_abi()
665 { return is_same_v<simd_abi::scalar, _Abi>; }
666
667// }}}
668// __abi_bytes_v {{{
669template <template <int> class _Abi, int _Bytes>
670 constexpr int
671 __abi_bytes_impl(_Abi<_Bytes>*)
672 { return _Bytes; }
673
674template <typename _Tp>
675 constexpr int
676 __abi_bytes_impl(_Tp*)
677 { return -1; }
678
679template <typename _Abi>
680 inline constexpr int __abi_bytes_v
681 = __abi_bytes_impl(static_cast<_Abi*>(nullptr));
682
683// }}}
684// __is_builtin_bitmask_abi {{{
685template <typename _Abi>
686 constexpr bool
687 __is_builtin_bitmask_abi()
688 { return is_same_v<simd_abi::_VecBltnBtmsk<__abi_bytes_v<_Abi>>, _Abi>; }
689
690// }}}
691// __is_sse_abi {{{
692template <typename _Abi>
693 constexpr bool
694 __is_sse_abi()
695 {
696 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
697 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
698 }
699
700// }}}
701// __is_avx_abi {{{
702template <typename _Abi>
703 constexpr bool
704 __is_avx_abi()
705 {
706 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
707 return _Bytes > 16 && _Bytes <= 32
708 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
709 }
710
711// }}}
712// __is_avx512_abi {{{
713template <typename _Abi>
714 constexpr bool
715 __is_avx512_abi()
716 {
717 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
718 return _Bytes <= 64 && is_same_v<simd_abi::_Avx512<_Bytes>, _Abi>;
719 }
720
721// }}}
722// __is_neon_abi {{{
723template <typename _Abi>
724 constexpr bool
725 __is_neon_abi()
726 {
727 constexpr auto _Bytes = __abi_bytes_v<_Abi>;
728 return _Bytes <= 16 && is_same_v<simd_abi::_VecBuiltin<_Bytes>, _Abi>;
729 }
730
731// }}}
732// __make_dependent_t {{{
733template <typename, typename _Up>
734 struct __make_dependent
735 { using type = _Up; };
736
737template <typename _Tp, typename _Up>
738 using __make_dependent_t = typename __make_dependent<_Tp, _Up>::type;
739
740// }}}
741// ^^^ ---- type traits ---- ^^^
742
743// __invoke_ub{{{
744template <typename... _Args>
745 [[noreturn]] _GLIBCXX_SIMD_ALWAYS_INLINE void
746 __invoke_ub([[maybe_unused]] const char* __msg,
747 [[maybe_unused]] const _Args&... __args)
748 {
749#ifdef _GLIBCXX_DEBUG_UB
750 __builtin_fprintf(stderr, __msg, __args...);
751 __builtin_trap();
752#else
753 __builtin_unreachable();
754#endif
755 }
756
757// }}}
758// __assert_unreachable{{{
759template <typename _Tp>
760 struct __assert_unreachable
761 { static_assert(!is_same_v<_Tp, _Tp>, "this should be unreachable"); };
762
763// }}}
764// __size_or_zero_v {{{
765template <typename _Tp, typename _Ap, size_t _Np = simd_size<_Tp, _Ap>::value>
766 constexpr size_t
767 __size_or_zero_dispatch(int)
768 { return _Np; }
769
770template <typename _Tp, typename _Ap>
771 constexpr size_t
772 __size_or_zero_dispatch(float)
773 { return 0; }
774
775template <typename _Tp, typename _Ap>
776 inline constexpr size_t __size_or_zero_v
777 = __size_or_zero_dispatch<_Tp, _Ap>(0);
778
779// }}}
780// __div_roundup {{{
781inline constexpr size_t
782__div_roundup(size_t __a, size_t __b)
783{ return (__a + __b - 1) / __b; }
784
785// }}}
786// _ExactBool{{{
787class _ExactBool
788{
789 const bool _M_data;
790
791public:
792 _GLIBCXX_SIMD_INTRINSIC constexpr _ExactBool(bool __b) : _M_data(__b) {}
793
794 _ExactBool(int) = delete;
795
796 _GLIBCXX_SIMD_INTRINSIC constexpr operator bool() const { return _M_data; }
797};
798
799// }}}
800// __may_alias{{{
801/**@internal
802 * Helper __may_alias<_Tp> that turns _Tp into the type to be used for an
803 * aliasing pointer. This adds the __may_alias attribute to _Tp (with compilers
804 * that support it).
805 */
806template <typename _Tp>
807 using __may_alias [[__gnu__::__may_alias__]] = _Tp;
808
809// }}}
810// _UnsupportedBase {{{
811// simd and simd_mask base for unsupported <_Tp, _Abi>
812struct _UnsupportedBase
813{
814 _UnsupportedBase() = delete;
815 _UnsupportedBase(const _UnsupportedBase&) = delete;
816 _UnsupportedBase& operator=(const _UnsupportedBase&) = delete;
817 ~_UnsupportedBase() = delete;
818};
819
820// }}}
821// _InvalidTraits {{{
822/**
823 * @internal
824 * Defines the implementation of __a given <_Tp, _Abi>.
825 *
826 * Implementations must ensure that only valid <_Tp, _Abi> instantiations are
827 * possible. Static assertions in the type definition do not suffice. It is
828 * important that SFINAE works.
829 */
830struct _InvalidTraits
831{
832 using _IsValid = false_type;
833 using _SimdBase = _UnsupportedBase;
834 using _MaskBase = _UnsupportedBase;
835
836 static constexpr size_t _S_full_size = 0;
837 static constexpr bool _S_is_partial = false;
838
839 static constexpr size_t _S_simd_align = 1;
840 struct _SimdImpl;
841 struct _SimdMember {};
842 struct _SimdCastType;
843
844 static constexpr size_t _S_mask_align = 1;
845 struct _MaskImpl;
846 struct _MaskMember {};
847 struct _MaskCastType;
848};
849
850// }}}
851// _SimdTraits {{{
852template <typename _Tp, typename _Abi, typename = void_t<>>
853 struct _SimdTraits : _InvalidTraits {};
854
855// }}}
856// __private_init, __bitset_init{{{
857/**
858 * @internal
859 * Tag used for private init constructor of simd and simd_mask
860 */
861inline constexpr struct _PrivateInit {} __private_init = {};
862
863inline constexpr struct _BitsetInit {} __bitset_init = {};
864
865// }}}
866// __is_narrowing_conversion<_From, _To>{{{
867template <typename _From, typename _To, bool = is_arithmetic_v<_From>,
868 bool = is_arithmetic_v<_To>>
869 struct __is_narrowing_conversion;
870
871// ignore "signed/unsigned mismatch" in the following trait.
872// The implicit conversions will do the right thing here.
873template <typename _From, typename _To>
874 struct __is_narrowing_conversion<_From, _To, true, true>
875 : public __bool_constant<(
876 __digits_v<_From> > __digits_v<_To>
877 || __finite_max_v<_From> > __finite_max_v<_To>
878 || __finite_min_v<_From> < __finite_min_v<_To>
879 || (is_signed_v<_From> && is_unsigned_v<_To>))> {};
880
881template <typename _Tp>
882 struct __is_narrowing_conversion<_Tp, bool, true, true>
883 : public true_type {};
884
885template <>
886 struct __is_narrowing_conversion<bool, bool, true, true>
887 : public false_type {};
888
889template <typename _Tp>
890 struct __is_narrowing_conversion<_Tp, _Tp, true, true>
891 : public false_type {};
892
893template <typename _From, typename _To>
894 struct __is_narrowing_conversion<_From, _To, false, true>
895 : public negation<is_convertible<_From, _To>> {};
896
897// }}}
898// __converts_to_higher_integer_rank{{{
899template <typename _From, typename _To, bool = (sizeof(_From) < sizeof(_To))>
900 struct __converts_to_higher_integer_rank : public true_type {};
901
902// this may fail for char -> short if sizeof(char) == sizeof(short)
903template <typename _From, typename _To>
904 struct __converts_to_higher_integer_rank<_From, _To, false>
905 : public is_same<decltype(declval<_From>() + declval<_To>()), _To> {};
906
907// }}}
908// __data(simd/simd_mask) {{{
909template <typename _Tp, typename _Ap>
910 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
911 __data(const simd<_Tp, _Ap>& __x);
912
913template <typename _Tp, typename _Ap>
914 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
915 __data(simd<_Tp, _Ap>& __x);
916
917template <typename _Tp, typename _Ap>
918 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
919 __data(const simd_mask<_Tp, _Ap>& __x);
920
921template <typename _Tp, typename _Ap>
922 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
923 __data(simd_mask<_Tp, _Ap>& __x);
924
925// }}}
926// _SimdConverter {{{
927template <typename _FromT, typename _FromA, typename _ToT, typename _ToA,
928 typename = void>
929 struct _SimdConverter;
930
931template <typename _Tp, typename _Ap>
932 struct _SimdConverter<_Tp, _Ap, _Tp, _Ap, void>
933 {
934 template <typename _Up>
935 _GLIBCXX_SIMD_INTRINSIC const _Up&
936 operator()(const _Up& __x)
937 { return __x; }
938 };
939
940// }}}
941// __to_value_type_or_member_type {{{
942template <typename _V>
943 _GLIBCXX_SIMD_INTRINSIC constexpr auto
944 __to_value_type_or_member_type(const _V& __x) -> decltype(__data(__x))
945 { return __data(__x); }
946
947template <typename _V>
948 _GLIBCXX_SIMD_INTRINSIC constexpr const typename _V::value_type&
949 __to_value_type_or_member_type(const typename _V::value_type& __x)
950 { return __x; }
951
952// }}}
953// __bool_storage_member_type{{{
954template <size_t _Size>
955 struct __bool_storage_member_type;
956
957template <size_t _Size>
958 using __bool_storage_member_type_t =
959 typename __bool_storage_member_type<_Size>::type;
960
961// }}}
962// _SimdTuple {{{
963// why not tuple?
964// 1. tuple gives no guarantee about the storage order, but I require
965// storage
966// equivalent to array<_Tp, _Np>
967// 2. direct access to the element type (first template argument)
968// 3. enforces equal element type, only different _Abi types are allowed
969template <typename _Tp, typename... _Abis>
970 struct _SimdTuple;
971
972//}}}
973// __fixed_size_storage_t {{{
974template <typename _Tp, int _Np>
975 struct __fixed_size_storage;
976
977template <typename _Tp, int _Np>
978 using __fixed_size_storage_t = typename __fixed_size_storage<_Tp, _Np>::type;
979
980// }}}
981// _SimdWrapper fwd decl{{{
982template <typename _Tp, size_t _Size, typename = void_t<>>
983 struct _SimdWrapper;
984
985template <typename _Tp>
986 using _SimdWrapper8 = _SimdWrapper<_Tp, 8 / sizeof(_Tp)>;
987template <typename _Tp>
988 using _SimdWrapper16 = _SimdWrapper<_Tp, 16 / sizeof(_Tp)>;
989template <typename _Tp>
990 using _SimdWrapper32 = _SimdWrapper<_Tp, 32 / sizeof(_Tp)>;
991template <typename _Tp>
992 using _SimdWrapper64 = _SimdWrapper<_Tp, 64 / sizeof(_Tp)>;
993
994// }}}
995// __is_simd_wrapper {{{
996template <typename _Tp>
997 struct __is_simd_wrapper : false_type {};
998
999template <typename _Tp, size_t _Np>
1000 struct __is_simd_wrapper<_SimdWrapper<_Tp, _Np>> : true_type {};
1001
1002template <typename _Tp>
1003 inline constexpr bool __is_simd_wrapper_v = __is_simd_wrapper<_Tp>::value;
1004
1005// }}}
1006// _BitOps {{{
1007struct _BitOps
1008{
1009 // _S_bit_iteration {{{
1010 template <typename _Tp, typename _Fp>
1011 static void
1012 _S_bit_iteration(_Tp __mask, _Fp&& __f)
1013 {
1014 static_assert(sizeof(_ULLong) >= sizeof(_Tp));
1015 conditional_t<sizeof(_Tp) <= sizeof(_UInt), _UInt, _ULLong> __k;
1016 if constexpr (is_convertible_v<_Tp, decltype(__k)>)
1017 __k = __mask;
1018 else
1019 __k = __mask.to_ullong();
1020 while(__k)
1021 {
1022 __f(std::__countr_zero(__k));
1023 __k &= (__k - 1);
1024 }
1025 }
1026
1027 //}}}
1028};
1029
1030//}}}
1031// __increment, __decrement {{{
1032template <typename _Tp = void>
1033 struct __increment
1034 { constexpr _Tp operator()(_Tp __a) const { return ++__a; } };
1035
1036template <>
1037 struct __increment<void>
1038 {
1039 template <typename _Tp>
1040 constexpr _Tp
1041 operator()(_Tp __a) const
1042 { return ++__a; }
1043 };
1044
1045template <typename _Tp = void>
1046 struct __decrement
1047 { constexpr _Tp operator()(_Tp __a) const { return --__a; } };
1048
1049template <>
1050 struct __decrement<void>
1051 {
1052 template <typename _Tp>
1053 constexpr _Tp
1054 operator()(_Tp __a) const
1055 { return --__a; }
1056 };
1057
1058// }}}
1059// _ValuePreserving(OrInt) {{{
1060template <typename _From, typename _To,
1061 typename = enable_if_t<negation<
1062 __is_narrowing_conversion<__remove_cvref_t<_From>, _To>>::value>>
1063 using _ValuePreserving = _From;
1064
1065template <typename _From, typename _To,
1066 typename _DecayedFrom = __remove_cvref_t<_From>,
1067 typename = enable_if_t<conjunction<
1068 is_convertible<_From, _To>,
1069 disjunction<
1070 is_same<_DecayedFrom, _To>, is_same<_DecayedFrom, int>,
1071 conjunction<is_same<_DecayedFrom, _UInt>, is_unsigned<_To>>,
1072 negation<__is_narrowing_conversion<_DecayedFrom, _To>>>>::value>>
1073 using _ValuePreservingOrInt = _From;
1074
1075// }}}
1076// __intrinsic_type {{{
1077template <typename _Tp, size_t _Bytes, typename = void_t<>>
1078 struct __intrinsic_type;
1079
1080template <typename _Tp, size_t _Size>
1081 using __intrinsic_type_t =
1082 typename __intrinsic_type<_Tp, _Size * sizeof(_Tp)>::type;
1083
1084template <typename _Tp>
1085 using __intrinsic_type2_t = typename __intrinsic_type<_Tp, 2>::type;
1086template <typename _Tp>
1087 using __intrinsic_type4_t = typename __intrinsic_type<_Tp, 4>::type;
1088template <typename _Tp>
1089 using __intrinsic_type8_t = typename __intrinsic_type<_Tp, 8>::type;
1090template <typename _Tp>
1091 using __intrinsic_type16_t = typename __intrinsic_type<_Tp, 16>::type;
1092template <typename _Tp>
1093 using __intrinsic_type32_t = typename __intrinsic_type<_Tp, 32>::type;
1094template <typename _Tp>
1095 using __intrinsic_type64_t = typename __intrinsic_type<_Tp, 64>::type;
1096
1097// }}}
1098// _BitMask {{{
1099template <size_t _Np, bool _Sanitized = false>
1100 struct _BitMask;
1101
1102template <size_t _Np, bool _Sanitized>
1103 struct __is_bitmask<_BitMask<_Np, _Sanitized>, void> : true_type {};
1104
1105template <size_t _Np>
1106 using _SanitizedBitMask = _BitMask<_Np, true>;
1107
1108template <size_t _Np, bool _Sanitized>
1109 struct _BitMask
1110 {
1111 static_assert(_Np > 0);
1112
1113 static constexpr size_t _NBytes = __div_roundup(_Np, __CHAR_BIT__);
1114
1115 using _Tp = conditional_t<_Np == 1, bool,
1116 make_unsigned_t<__int_with_sizeof_t<std::min(
1117 sizeof(_ULLong), std::__bit_ceil(_NBytes))>>>;
1118
1119 static constexpr int _S_array_size = __div_roundup(_NBytes, sizeof(_Tp));
1120
1121 _Tp _M_bits[_S_array_size];
1122
1123 static constexpr int _S_unused_bits
1124 = _Np == 1 ? 0 : _S_array_size * sizeof(_Tp) * __CHAR_BIT__ - _Np;
1125
1126 static constexpr _Tp _S_bitmask = +_Tp(~_Tp()) >> _S_unused_bits;
1127
1128 constexpr _BitMask() noexcept = default;
1129
1130 constexpr _BitMask(unsigned long long __x) noexcept
1131 : _M_bits{static_cast<_Tp>(__x)} {}
1132
1133 _BitMask(bitset<_Np> __x) noexcept : _BitMask(__x.to_ullong()) {}
1134
1135 constexpr _BitMask(const _BitMask&) noexcept = default;
1136
1137 template <bool _RhsSanitized, typename = enable_if_t<_RhsSanitized == false
1138 && _Sanitized == true>>
1139 constexpr _BitMask(const _BitMask<_Np, _RhsSanitized>& __rhs) noexcept
1140 : _BitMask(__rhs._M_sanitized()) {}
1141
1142 constexpr operator _SimdWrapper<bool, _Np>() const noexcept
1143 {
1144 static_assert(_S_array_size == 1);
1145 return _M_bits[0];
1146 }
1147
1148 // precondition: is sanitized
1149 constexpr _Tp
1150 _M_to_bits() const noexcept
1151 {
1152 static_assert(_S_array_size == 1);
1153 return _M_bits[0];
1154 }
1155
1156 // precondition: is sanitized
1157 constexpr unsigned long long
1158 to_ullong() const noexcept
1159 {
1160 static_assert(_S_array_size == 1);
1161 return _M_bits[0];
1162 }
1163
1164 // precondition: is sanitized
1165 constexpr unsigned long
1166 to_ulong() const noexcept
1167 {
1168 static_assert(_S_array_size == 1);
1169 return _M_bits[0];
1170 }
1171
1172 constexpr bitset<_Np>
1173 _M_to_bitset() const noexcept
1174 {
1175 static_assert(_S_array_size == 1);
1176 return _M_bits[0];
1177 }
1178
1179 constexpr decltype(auto)
1180 _M_sanitized() const noexcept
1181 {
1182 if constexpr (_Sanitized)
1183 return *this;
1184 else if constexpr (_Np == 1)
1185 return _SanitizedBitMask<_Np>(_M_bits[0]);
1186 else
1187 {
1188 _SanitizedBitMask<_Np> __r = {};
1189 for (int __i = 0; __i < _S_array_size; ++__i)
1190 __r._M_bits[__i] = _M_bits[__i];
1191 if constexpr (_S_unused_bits > 0)
1192 __r._M_bits[_S_array_size - 1] &= _S_bitmask;
1193 return __r;
1194 }
1195 }
1196
1197 template <size_t _Mp, bool _LSanitized>
1198 constexpr _BitMask<_Np + _Mp, _Sanitized>
1199 _M_prepend(_BitMask<_Mp, _LSanitized> __lsb) const noexcept
1200 {
1201 constexpr size_t _RN = _Np + _Mp;
1202 using _Rp = _BitMask<_RN, _Sanitized>;
1203 if constexpr (_Rp::_S_array_size == 1)
1204 {
1205 _Rp __r{{_M_bits[0]}};
1206 __r._M_bits[0] <<= _Mp;
1207 __r._M_bits[0] |= __lsb._M_sanitized()._M_bits[0];
1208 return __r;
1209 }
1210 else
1211 __assert_unreachable<_Rp>();
1212 }
1213
1214 // Return a new _BitMask with size _NewSize while dropping _DropLsb least
1215 // significant bits. If the operation implicitly produces a sanitized bitmask,
1216 // the result type will have _Sanitized set.
1217 template <size_t _DropLsb, size_t _NewSize = _Np - _DropLsb>
1218 constexpr auto
1219 _M_extract() const noexcept
1220 {
1221 static_assert(_Np > _DropLsb);
1222 static_assert(_DropLsb + _NewSize <= sizeof(_ULLong) * __CHAR_BIT__,
1223 "not implemented for bitmasks larger than one ullong");
1224 if constexpr (_NewSize == 1)
1225 // must sanitize because the return _Tp is bool
1226 return _SanitizedBitMask<1>(_M_bits[0] & (_Tp(1) << _DropLsb));
1227 else
1228 return _BitMask<_NewSize,
1229 ((_NewSize + _DropLsb == sizeof(_Tp) * __CHAR_BIT__
1230 && _NewSize + _DropLsb <= _Np)
1231 || ((_Sanitized || _Np == sizeof(_Tp) * __CHAR_BIT__)
1232 && _NewSize + _DropLsb >= _Np))>(_M_bits[0]
1233 >> _DropLsb);
1234 }
1235
1236 // True if all bits are set. Implicitly sanitizes if _Sanitized == false.
1237 constexpr bool
1238 all() const noexcept
1239 {
1240 if constexpr (_Np == 1)
1241 return _M_bits[0];
1242 else if constexpr (!_Sanitized)
1243 return _M_sanitized().all();
1244 else
1245 {
1246 constexpr _Tp __allbits = ~_Tp();
1247 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1248 if (_M_bits[__i] != __allbits)
1249 return false;
1250 return _M_bits[_S_array_size - 1] == _S_bitmask;
1251 }
1252 }
1253
1254 // True if at least one bit is set. Implicitly sanitizes if _Sanitized ==
1255 // false.
1256 constexpr bool
1257 any() const noexcept
1258 {
1259 if constexpr (_Np == 1)
1260 return _M_bits[0];
1261 else if constexpr (!_Sanitized)
1262 return _M_sanitized().any();
1263 else
1264 {
1265 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1266 if (_M_bits[__i] != 0)
1267 return true;
1268 return _M_bits[_S_array_size - 1] != 0;
1269 }
1270 }
1271
1272 // True if no bit is set. Implicitly sanitizes if _Sanitized == false.
1273 constexpr bool
1274 none() const noexcept
1275 {
1276 if constexpr (_Np == 1)
1277 return !_M_bits[0];
1278 else if constexpr (!_Sanitized)
1279 return _M_sanitized().none();
1280 else
1281 {
1282 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1283 if (_M_bits[__i] != 0)
1284 return false;
1285 return _M_bits[_S_array_size - 1] == 0;
1286 }
1287 }
1288
1289 // Returns the number of set bits. Implicitly sanitizes if _Sanitized ==
1290 // false.
1291 constexpr int
1292 count() const noexcept
1293 {
1294 if constexpr (_Np == 1)
1295 return _M_bits[0];
1296 else if constexpr (!_Sanitized)
1297 return _M_sanitized().none();
1298 else
1299 {
1300 int __result = __builtin_popcountll(_M_bits[0]);
1301 for (int __i = 1; __i < _S_array_size; ++__i)
1302 __result += __builtin_popcountll(_M_bits[__i]);
1303 return __result;
1304 }
1305 }
1306
1307 // Returns the bit at offset __i as bool.
1308 constexpr bool
1309 operator[](size_t __i) const noexcept
1310 {
1311 if constexpr (_Np == 1)
1312 return _M_bits[0];
1313 else if constexpr (_S_array_size == 1)
1314 return (_M_bits[0] >> __i) & 1;
1315 else
1316 {
1317 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1318 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1319 return (_M_bits[__j] >> __shift) & 1;
1320 }
1321 }
1322
1323 template <size_t __i>
1324 constexpr bool
1325 operator[](_SizeConstant<__i>) const noexcept
1326 {
1327 static_assert(__i < _Np);
1328 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1329 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1330 return static_cast<bool>(_M_bits[__j] & (_Tp(1) << __shift));
1331 }
1332
1333 // Set the bit at offset __i to __x.
1334 constexpr void
1335 set(size_t __i, bool __x) noexcept
1336 {
1337 if constexpr (_Np == 1)
1338 _M_bits[0] = __x;
1339 else if constexpr (_S_array_size == 1)
1340 {
1341 _M_bits[0] &= ~_Tp(_Tp(1) << __i);
1342 _M_bits[0] |= _Tp(_Tp(__x) << __i);
1343 }
1344 else
1345 {
1346 const size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1347 const size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1348 _M_bits[__j] &= ~_Tp(_Tp(1) << __shift);
1349 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1350 }
1351 }
1352
1353 template <size_t __i>
1354 constexpr void
1355 set(_SizeConstant<__i>, bool __x) noexcept
1356 {
1357 static_assert(__i < _Np);
1358 if constexpr (_Np == 1)
1359 _M_bits[0] = __x;
1360 else
1361 {
1362 constexpr size_t __j = __i / (sizeof(_Tp) * __CHAR_BIT__);
1363 constexpr size_t __shift = __i % (sizeof(_Tp) * __CHAR_BIT__);
1364 constexpr _Tp __mask = ~_Tp(_Tp(1) << __shift);
1365 _M_bits[__j] &= __mask;
1366 _M_bits[__j] |= _Tp(_Tp(__x) << __shift);
1367 }
1368 }
1369
1370 // Inverts all bits. Sanitized input leads to sanitized output.
1371 constexpr _BitMask
1372 operator~() const noexcept
1373 {
1374 if constexpr (_Np == 1)
1375 return !_M_bits[0];
1376 else
1377 {
1378 _BitMask __result{};
1379 for (int __i = 0; __i < _S_array_size - 1; ++__i)
1380 __result._M_bits[__i] = ~_M_bits[__i];
1381 if constexpr (_Sanitized)
1382 __result._M_bits[_S_array_size - 1]
1383 = _M_bits[_S_array_size - 1] ^ _S_bitmask;
1384 else
1385 __result._M_bits[_S_array_size - 1] = ~_M_bits[_S_array_size - 1];
1386 return __result;
1387 }
1388 }
1389
1390 constexpr _BitMask&
1391 operator^=(const _BitMask& __b) & noexcept
1392 {
1393 __execute_n_times<_S_array_size>(
1394 [&](auto __i) { _M_bits[__i] ^= __b._M_bits[__i]; });
1395 return *this;
1396 }
1397
1398 constexpr _BitMask&
1399 operator|=(const _BitMask& __b) & noexcept
1400 {
1401 __execute_n_times<_S_array_size>(
1402 [&](auto __i) { _M_bits[__i] |= __b._M_bits[__i]; });
1403 return *this;
1404 }
1405
1406 constexpr _BitMask&
1407 operator&=(const _BitMask& __b) & noexcept
1408 {
1409 __execute_n_times<_S_array_size>(
1410 [&](auto __i) { _M_bits[__i] &= __b._M_bits[__i]; });
1411 return *this;
1412 }
1413
1414 friend constexpr _BitMask
1415 operator^(const _BitMask& __a, const _BitMask& __b) noexcept
1416 {
1417 _BitMask __r = __a;
1418 __r ^= __b;
1419 return __r;
1420 }
1421
1422 friend constexpr _BitMask
1423 operator|(const _BitMask& __a, const _BitMask& __b) noexcept
1424 {
1425 _BitMask __r = __a;
1426 __r |= __b;
1427 return __r;
1428 }
1429
1430 friend constexpr _BitMask
1431 operator&(const _BitMask& __a, const _BitMask& __b) noexcept
1432 {
1433 _BitMask __r = __a;
1434 __r &= __b;
1435 return __r;
1436 }
1437
1438 _GLIBCXX_SIMD_INTRINSIC
1439 constexpr bool
1440 _M_is_constprop() const
1441 {
1442 if constexpr (_S_array_size == 0)
1443 return __builtin_constant_p(_M_bits[0]);
1444 else
1445 {
1446 for (int __i = 0; __i < _S_array_size; ++__i)
1447 if (!__builtin_constant_p(_M_bits[__i]))
1448 return false;
1449 return true;
1450 }
1451 }
1452 };
1453
1454// }}}
1455
1456// vvv ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- vvv
1457// __min_vector_size {{{
1458template <typename _Tp = void>
1459 static inline constexpr int __min_vector_size = 2 * sizeof(_Tp);
1460
1461#if _GLIBCXX_SIMD_HAVE_NEON
1462template <>
1463 inline constexpr int __min_vector_size<void> = 8;
1464#else
1465template <>
1466 inline constexpr int __min_vector_size<void> = 16;
1467#endif
1468
1469// }}}
1470// __vector_type {{{
1471template <typename _Tp, size_t _Np, typename = void>
1472 struct __vector_type_n {};
1473
1474// substition failure for 0-element case
1475template <typename _Tp>
1476 struct __vector_type_n<_Tp, 0, void> {};
1477
1478// special case 1-element to be _Tp itself
1479template <typename _Tp>
1480 struct __vector_type_n<_Tp, 1, enable_if_t<__is_vectorizable_v<_Tp>>>
1481 { using type = _Tp; };
1482
1483// else, use GNU-style builtin vector types
1484template <typename _Tp, size_t _Np>
1485 struct __vector_type_n<_Tp, _Np,
1486 enable_if_t<__is_vectorizable_v<_Tp> && _Np >= 2>>
1487 {
1488 static constexpr size_t _S_Np2 = std::__bit_ceil(_Np * sizeof(_Tp));
1489
1490 static constexpr size_t _S_Bytes =
1491#ifdef __i386__
1492 // Using [[gnu::vector_size(8)]] would wreak havoc on the FPU because
1493 // those objects are passed via MMX registers and nothing ever calls EMMS.
1494 _S_Np2 == 8 ? 16 :
1495#endif
1496 _S_Np2 < __min_vector_size<_Tp> ? __min_vector_size<_Tp>
1497 : _S_Np2;
1498
1499 using type [[__gnu__::__vector_size__(_S_Bytes)]] = _Tp;
1500 };
1501
1502template <typename _Tp, size_t _Bytes, size_t = _Bytes % sizeof(_Tp)>
1503 struct __vector_type;
1504
1505template <typename _Tp, size_t _Bytes>
1506 struct __vector_type<_Tp, _Bytes, 0>
1507 : __vector_type_n<_Tp, _Bytes / sizeof(_Tp)> {};
1508
1509template <typename _Tp, size_t _Size>
1510 using __vector_type_t = typename __vector_type_n<_Tp, _Size>::type;
1511
1512template <typename _Tp>
1513 using __vector_type2_t = typename __vector_type<_Tp, 2>::type;
1514template <typename _Tp>
1515 using __vector_type4_t = typename __vector_type<_Tp, 4>::type;
1516template <typename _Tp>
1517 using __vector_type8_t = typename __vector_type<_Tp, 8>::type;
1518template <typename _Tp>
1519 using __vector_type16_t = typename __vector_type<_Tp, 16>::type;
1520template <typename _Tp>
1521 using __vector_type32_t = typename __vector_type<_Tp, 32>::type;
1522template <typename _Tp>
1523 using __vector_type64_t = typename __vector_type<_Tp, 64>::type;
1524
1525// }}}
1526// __is_vector_type {{{
1527template <typename _Tp, typename = void_t<>>
1528 struct __is_vector_type : false_type {};
1529
1530template <typename _Tp>
1531 struct __is_vector_type<
1532 _Tp, void_t<typename __vector_type<
1533 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1534 : is_same<_Tp, typename __vector_type<
1535 remove_reference_t<decltype(declval<_Tp>()[0])>,
1536 sizeof(_Tp)>::type> {};
1537
1538template <typename _Tp>
1539 inline constexpr bool __is_vector_type_v = __is_vector_type<_Tp>::value;
1540
1541// }}}
1542// __is_intrinsic_type {{{
1543#if _GLIBCXX_SIMD_HAVE_SSE_ABI
1544template <typename _Tp>
1545 using __is_intrinsic_type = __is_vector_type<_Tp>;
1546#else // not SSE (x86)
1547template <typename _Tp, typename = void_t<>>
1548 struct __is_intrinsic_type : false_type {};
1549
1550template <typename _Tp>
1551 struct __is_intrinsic_type<
1552 _Tp, void_t<typename __intrinsic_type<
1553 remove_reference_t<decltype(declval<_Tp>()[0])>, sizeof(_Tp)>::type>>
1554 : is_same<_Tp, typename __intrinsic_type<
1555 remove_reference_t<decltype(declval<_Tp>()[0])>,
1556 sizeof(_Tp)>::type> {};
1557#endif
1558
1559template <typename _Tp>
1560 inline constexpr bool __is_intrinsic_type_v = __is_intrinsic_type<_Tp>::value;
1561
1562// }}}
1563// _VectorTraits{{{
1564template <typename _Tp, typename = void_t<>>
1565 struct _VectorTraitsImpl;
1566
1567template <typename _Tp>
1568 struct _VectorTraitsImpl<_Tp, enable_if_t<__is_vector_type_v<_Tp>
1569 || __is_intrinsic_type_v<_Tp>>>
1570 {
1571 using type = _Tp;
1572 using value_type = remove_reference_t<decltype(declval<_Tp>()[0])>;
1573 static constexpr int _S_full_size = sizeof(_Tp) / sizeof(value_type);
1574 using _Wrapper = _SimdWrapper<value_type, _S_full_size>;
1575 template <typename _Up, int _W = _S_full_size>
1576 static constexpr bool _S_is
1577 = is_same_v<value_type, _Up> && _W == _S_full_size;
1578 };
1579
1580template <typename _Tp, size_t _Np>
1581 struct _VectorTraitsImpl<_SimdWrapper<_Tp, _Np>,
1582 void_t<__vector_type_t<_Tp, _Np>>>
1583 {
1584 using type = __vector_type_t<_Tp, _Np>;
1585 using value_type = _Tp;
1586 static constexpr int _S_full_size = sizeof(type) / sizeof(value_type);
1587 using _Wrapper = _SimdWrapper<_Tp, _Np>;
1588 static constexpr bool _S_is_partial = (_Np == _S_full_size);
1589 static constexpr int _S_partial_width = _Np;
1590 template <typename _Up, int _W = _S_full_size>
1591 static constexpr bool _S_is
1592 = is_same_v<value_type, _Up>&& _W == _S_full_size;
1593 };
1594
1595template <typename _Tp, typename = typename _VectorTraitsImpl<_Tp>::type>
1596 using _VectorTraits = _VectorTraitsImpl<_Tp>;
1597
1598// }}}
1599// __as_vector{{{
1600template <typename _V>
1601 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1602 __as_vector(_V __x)
1603 {
1604 if constexpr (__is_vector_type_v<_V>)
1605 return __x;
1606 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1607 return __data(__x)._M_data;
1608 else if constexpr (__is_vectorizable_v<_V>)
1609 return __vector_type_t<_V, 2>{__x};
1610 else
1611 return __x._M_data;
1612 }
1613
1614// }}}
1615// __as_wrapper{{{
1616template <size_t _Np = 0, typename _V>
1617 _GLIBCXX_SIMD_INTRINSIC constexpr auto
1618 __as_wrapper(_V __x)
1619 {
1620 if constexpr (__is_vector_type_v<_V>)
1621 return _SimdWrapper<typename _VectorTraits<_V>::value_type,
1622 (_Np > 0 ? _Np : _VectorTraits<_V>::_S_full_size)>(__x);
1623 else if constexpr (is_simd<_V>::value || is_simd_mask<_V>::value)
1624 {
1625 static_assert(_V::size() == _Np);
1626 return __data(__x);
1627 }
1628 else
1629 {
1630 static_assert(_V::_S_size == _Np);
1631 return __x;
1632 }
1633 }
1634
1635// }}}
1636// __intrin_bitcast{{{
1637template <typename _To, typename _From>
1638 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1639 __intrin_bitcast(_From __v)
1640 {
1641 static_assert((__is_vector_type_v<_From> || __is_intrinsic_type_v<_From>)
1642 && (__is_vector_type_v<_To> || __is_intrinsic_type_v<_To>));
1643 if constexpr (sizeof(_To) == sizeof(_From))
1644 return reinterpret_cast<_To>(__v);
1645 else if constexpr (sizeof(_From) > sizeof(_To))
1646 if constexpr (sizeof(_To) >= 16)
1647 return reinterpret_cast<const __may_alias<_To>&>(__v);
1648 else
1649 {
1650 _To __r;
1651 __builtin_memcpy(&__r, &__v, sizeof(_To));
1652 return __r;
1653 }
1654#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1655 else if constexpr (__have_avx && sizeof(_From) == 16 && sizeof(_To) == 32)
1656 return reinterpret_cast<_To>(__builtin_ia32_ps256_ps(
1657 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1658 else if constexpr (__have_avx512f && sizeof(_From) == 16
1659 && sizeof(_To) == 64)
1660 return reinterpret_cast<_To>(__builtin_ia32_ps512_ps(
1661 reinterpret_cast<__vector_type_t<float, 4>>(__v)));
1662 else if constexpr (__have_avx512f && sizeof(_From) == 32
1663 && sizeof(_To) == 64)
1664 return reinterpret_cast<_To>(__builtin_ia32_ps512_256ps(
1665 reinterpret_cast<__vector_type_t<float, 8>>(__v)));
1666#endif // _GLIBCXX_SIMD_X86INTRIN
1667 else if constexpr (sizeof(__v) <= 8)
1668 return reinterpret_cast<_To>(
1669 __vector_type_t<__int_for_sizeof_t<_From>, sizeof(_To) / sizeof(_From)>{
1670 reinterpret_cast<__int_for_sizeof_t<_From>>(__v)});
1671 else
1672 {
1673 static_assert(sizeof(_To) > sizeof(_From));
1674 _To __r = {};
1675 __builtin_memcpy(&__r, &__v, sizeof(_From));
1676 return __r;
1677 }
1678 }
1679
1680// }}}
1681// __vector_bitcast{{{
1682template <typename _To, size_t _NN = 0, typename _From,
1683 typename _FromVT = _VectorTraits<_From>,
1684 size_t _Np = _NN == 0 ? sizeof(_From) / sizeof(_To) : _NN>
1685 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1686 __vector_bitcast(_From __x)
1687 {
1688 using _R = __vector_type_t<_To, _Np>;
1689 return __intrin_bitcast<_R>(__x);
1690 }
1691
1692template <typename _To, size_t _NN = 0, typename _Tp, size_t _Nx,
1693 size_t _Np
1694 = _NN == 0 ? sizeof(_SimdWrapper<_Tp, _Nx>) / sizeof(_To) : _NN>
1695 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_To, _Np>
1696 __vector_bitcast(const _SimdWrapper<_Tp, _Nx>& __x)
1697 {
1698 static_assert(_Np > 1);
1699 return __intrin_bitcast<__vector_type_t<_To, _Np>>(__x._M_data);
1700 }
1701
1702// }}}
1703// __convert_x86 declarations {{{
1704#ifdef _GLIBCXX_SIMD_WORKAROUND_PR85048
1705template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1706 _To __convert_x86(_Tp);
1707
1708template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1709 _To __convert_x86(_Tp, _Tp);
1710
1711template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1712 _To __convert_x86(_Tp, _Tp, _Tp, _Tp);
1713
1714template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1715 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp);
1716
1717template <typename _To, typename _Tp, typename _TVT = _VectorTraits<_Tp>>
1718 _To __convert_x86(_Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp, _Tp,
1719 _Tp, _Tp, _Tp, _Tp);
1720#endif // _GLIBCXX_SIMD_WORKAROUND_PR85048
1721
1722//}}}
1723// __bit_cast {{{
1724template <typename _To, typename _From>
1725 _GLIBCXX_SIMD_INTRINSIC constexpr _To
1726 __bit_cast(const _From __x)
1727 {
1728#if __has_builtin(__builtin_bit_cast)
1729 return __builtin_bit_cast(_To, __x);
1730#else
1731 static_assert(sizeof(_To) == sizeof(_From));
1732 constexpr bool __to_is_vectorizable
1733 = is_arithmetic_v<_To> || is_enum_v<_To>;
1734 constexpr bool __from_is_vectorizable
1735 = is_arithmetic_v<_From> || is_enum_v<_From>;
1736 if constexpr (__is_vector_type_v<_To> && __is_vector_type_v<_From>)
1737 return reinterpret_cast<_To>(__x);
1738 else if constexpr (__is_vector_type_v<_To> && __from_is_vectorizable)
1739 {
1740 using _FV [[gnu::vector_size(sizeof(_From))]] = _From;
1741 return reinterpret_cast<_To>(_FV{__x});
1742 }
1743 else if constexpr (__to_is_vectorizable && __from_is_vectorizable)
1744 {
1745 using _TV [[gnu::vector_size(sizeof(_To))]] = _To;
1746 using _FV [[gnu::vector_size(sizeof(_From))]] = _From;
1747 return reinterpret_cast<_TV>(_FV{__x})[0];
1748 }
1749 else if constexpr (__to_is_vectorizable && __is_vector_type_v<_From>)
1750 {
1751 using _TV [[gnu::vector_size(sizeof(_To))]] = _To;
1752 return reinterpret_cast<_TV>(__x)[0];
1753 }
1754 else
1755 {
1756 _To __r;
1757 __builtin_memcpy(reinterpret_cast<char*>(&__r),
1758 reinterpret_cast<const char*>(&__x), sizeof(_To));
1759 return __r;
1760 }
1761#endif
1762 }
1763
1764// }}}
1765// __to_intrin {{{
1766template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
1767 typename _R
1768 = __intrinsic_type_t<typename _TVT::value_type, _TVT::_S_full_size>>
1769 _GLIBCXX_SIMD_INTRINSIC constexpr _R
1770 __to_intrin(_Tp __x)
1771 {
1772 static_assert(sizeof(__x) <= sizeof(_R),
1773 "__to_intrin may never drop values off the end");
1774 if constexpr (sizeof(__x) == sizeof(_R))
1775 return reinterpret_cast<_R>(__as_vector(__x));
1776 else
1777 {
1778 using _Up = __int_for_sizeof_t<_Tp>;
1779 return reinterpret_cast<_R>(
1780 __vector_type_t<_Up, sizeof(_R) / sizeof(_Up)>{__bit_cast<_Up>(__x)});
1781 }
1782 }
1783
1784// }}}
1785// __make_vector{{{
1786template <typename _Tp, typename... _Args>
1787 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, sizeof...(_Args)>
1788 __make_vector(const _Args&... __args)
1789 {
1790 return __vector_type_t<_Tp, sizeof...(_Args)>{static_cast<_Tp>(__args)...};
1791 }
1792
1793// }}}
1794// __vector_broadcast{{{
1795template <size_t _Np, typename _Tp>
1796 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1797 __vector_broadcast(_Tp __x)
1798 {
1799 return __call_with_n_evaluations<_Np>(
1800 [](auto... __xx) { return __vector_type_t<_Tp, _Np>{__xx...}; },
1801 [&__x](int) { return __x; });
1802 }
1803
1804// }}}
1805// __generate_vector{{{
1806 template <typename _Tp, size_t _Np, typename _Gp, size_t... _I>
1807 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1808 __generate_vector_impl(_Gp&& __gen, index_sequence<_I...>)
1809 {
1810 return __vector_type_t<_Tp, _Np>{
1811 static_cast<_Tp>(__gen(_SizeConstant<_I>()))...};
1812 }
1813
1814template <typename _V, typename _VVT = _VectorTraits<_V>, typename _Gp>
1815 _GLIBCXX_SIMD_INTRINSIC constexpr _V
1816 __generate_vector(_Gp&& __gen)
1817 {
1818 if constexpr (__is_vector_type_v<_V>)
1819 return __generate_vector_impl<typename _VVT::value_type,
1820 _VVT::_S_full_size>(
1821 static_cast<_Gp&&>(__gen), make_index_sequence<_VVT::_S_full_size>());
1822 else
1823 return __generate_vector_impl<typename _VVT::value_type,
1824 _VVT::_S_partial_width>(
1825 static_cast<_Gp&&>(__gen),
1826 make_index_sequence<_VVT::_S_partial_width>());
1827 }
1828
1829template <typename _Tp, size_t _Np, typename _Gp>
1830 _GLIBCXX_SIMD_INTRINSIC constexpr __vector_type_t<_Tp, _Np>
1831 __generate_vector(_Gp&& __gen)
1832 {
1833 return __generate_vector_impl<_Tp, _Np>(static_cast<_Gp&&>(__gen),
1834 make_index_sequence<_Np>());
1835 }
1836
1837// }}}
1838// __xor{{{
1839template <typename _TW>
1840 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1841 __xor(_TW __a, _TW __b) noexcept
1842 {
1843 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1844 {
1845 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1846 _VectorTraitsImpl<_TW>>::value_type;
1847 if constexpr (is_floating_point_v<_Tp>)
1848 {
1849 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1850 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1851 ^ __vector_bitcast<_Ip>(__b));
1852 }
1853 else if constexpr (__is_vector_type_v<_TW>)
1854 return __a ^ __b;
1855 else
1856 return __a._M_data ^ __b._M_data;
1857 }
1858 else
1859 return __a ^ __b;
1860 }
1861
1862// }}}
1863// __or{{{
1864template <typename _TW>
1865 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1866 __or(_TW __a, _TW __b) noexcept
1867 {
1868 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1869 {
1870 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1871 _VectorTraitsImpl<_TW>>::value_type;
1872 if constexpr (is_floating_point_v<_Tp>)
1873 {
1874 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1875 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1876 | __vector_bitcast<_Ip>(__b));
1877 }
1878 else if constexpr (__is_vector_type_v<_TW>)
1879 return __a | __b;
1880 else
1881 return __a._M_data | __b._M_data;
1882 }
1883 else
1884 return __a | __b;
1885 }
1886
1887// }}}
1888// __and{{{
1889template <typename _TW>
1890 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1891 __and(_TW __a, _TW __b) noexcept
1892 {
1893 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1894 {
1895 using _Tp = typename conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1896 _VectorTraitsImpl<_TW>>::value_type;
1897 if constexpr (is_floating_point_v<_Tp>)
1898 {
1899 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
1900 return __vector_bitcast<_Tp>(__vector_bitcast<_Ip>(__a)
1901 & __vector_bitcast<_Ip>(__b));
1902 }
1903 else if constexpr (__is_vector_type_v<_TW>)
1904 return __a & __b;
1905 else
1906 return __a._M_data & __b._M_data;
1907 }
1908 else
1909 return __a & __b;
1910 }
1911
1912// }}}
1913// __andnot{{{
1914#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1915static constexpr struct
1916{
1917 _GLIBCXX_SIMD_INTRINSIC __v4sf
1918 operator()(__v4sf __a, __v4sf __b) const noexcept
1919 { return __builtin_ia32_andnps(__a, __b); }
1920
1921 _GLIBCXX_SIMD_INTRINSIC __v2df
1922 operator()(__v2df __a, __v2df __b) const noexcept
1923 { return __builtin_ia32_andnpd(__a, __b); }
1924
1925 _GLIBCXX_SIMD_INTRINSIC __v2di
1926 operator()(__v2di __a, __v2di __b) const noexcept
1927 { return __builtin_ia32_pandn128(__a, __b); }
1928
1929 _GLIBCXX_SIMD_INTRINSIC __v8sf
1930 operator()(__v8sf __a, __v8sf __b) const noexcept
1931 { return __builtin_ia32_andnps256(__a, __b); }
1932
1933 _GLIBCXX_SIMD_INTRINSIC __v4df
1934 operator()(__v4df __a, __v4df __b) const noexcept
1935 { return __builtin_ia32_andnpd256(__a, __b); }
1936
1937 _GLIBCXX_SIMD_INTRINSIC __v4di
1938 operator()(__v4di __a, __v4di __b) const noexcept
1939 {
1940 if constexpr (__have_avx2)
1941 return __builtin_ia32_andnotsi256(__a, __b);
1942 else
1943 return reinterpret_cast<__v4di>(
1944 __builtin_ia32_andnpd256(reinterpret_cast<__v4df>(__a),
1945 reinterpret_cast<__v4df>(__b)));
1946 }
1947
1948 _GLIBCXX_SIMD_INTRINSIC __v16sf
1949 operator()(__v16sf __a, __v16sf __b) const noexcept
1950 {
1951 if constexpr (__have_avx512dq)
1952 return _mm512_andnot_ps(__a, __b);
1953 else
1954 return reinterpret_cast<__v16sf>(
1955 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
1956 reinterpret_cast<__v8di>(__b)));
1957 }
1958
1959 _GLIBCXX_SIMD_INTRINSIC __v8df
1960 operator()(__v8df __a, __v8df __b) const noexcept
1961 {
1962 if constexpr (__have_avx512dq)
1963 return _mm512_andnot_pd(__a, __b);
1964 else
1965 return reinterpret_cast<__v8df>(
1966 _mm512_andnot_si512(reinterpret_cast<__v8di>(__a),
1967 reinterpret_cast<__v8di>(__b)));
1968 }
1969
1970 _GLIBCXX_SIMD_INTRINSIC __v8di
1971 operator()(__v8di __a, __v8di __b) const noexcept
1972 { return _mm512_andnot_si512(__a, __b); }
1973} _S_x86_andnot;
1974#endif // _GLIBCXX_SIMD_X86INTRIN && !__clang__
1975
1976template <typename _TW>
1977 _GLIBCXX_SIMD_INTRINSIC constexpr _TW
1978 __andnot(_TW __a, _TW __b) noexcept
1979 {
1980 if constexpr (__is_vector_type_v<_TW> || __is_simd_wrapper_v<_TW>)
1981 {
1982 using _TVT = conditional_t<__is_simd_wrapper_v<_TW>, _TW,
1983 _VectorTraitsImpl<_TW>>;
1984 using _Tp = typename _TVT::value_type;
1985#if _GLIBCXX_SIMD_X86INTRIN && !defined __clang__
1986 if constexpr (sizeof(_TW) >= 16)
1987 {
1988 const auto __ai = __to_intrin(__a);
1989 const auto __bi = __to_intrin(__b);
1990 if (!__builtin_is_constant_evaluated()
1991 && !(__builtin_constant_p(__ai) && __builtin_constant_p(__bi)))
1992 {
1993 const auto __r = _S_x86_andnot(__ai, __bi);
1994 if constexpr (is_convertible_v<decltype(__r), _TW>)
1995 return __r;
1996 else
1997 return reinterpret_cast<typename _TVT::type>(__r);
1998 }
1999 }
2000#endif // _GLIBCXX_SIMD_X86INTRIN
2001 using _Ip = make_unsigned_t<__int_for_sizeof_t<_Tp>>;
2002 return __vector_bitcast<_Tp>(~__vector_bitcast<_Ip>(__a)
2003 & __vector_bitcast<_Ip>(__b));
2004 }
2005 else
2006 return ~__a & __b;
2007 }
2008
2009// }}}
2010// __not{{{
2011template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2012 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
2013 __not(_Tp __a) noexcept
2014 {
2015 if constexpr (is_floating_point_v<typename _TVT::value_type>)
2016 return reinterpret_cast<typename _TVT::type>(
2017 ~__vector_bitcast<unsigned>(__a));
2018 else
2019 return ~__a;
2020 }
2021
2022// }}}
2023// __concat{{{
2024template <typename _Tp, typename _TVT = _VectorTraits<_Tp>,
2025 typename _R = __vector_type_t<typename _TVT::value_type,
2026 _TVT::_S_full_size * 2>>
2027 constexpr _R
2028 __concat(_Tp a_, _Tp b_)
2029 {
2030#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2031 using _W
2032 = conditional_t<is_floating_point_v<typename _TVT::value_type>, double,
2033 conditional_t<(sizeof(_Tp) >= 2 * sizeof(long long)),
2034 long long, typename _TVT::value_type>>;
2035 constexpr int input_width = sizeof(_Tp) / sizeof(_W);
2036 const auto __a = __vector_bitcast<_W>(a_);
2037 const auto __b = __vector_bitcast<_W>(b_);
2038 using _Up = __vector_type_t<_W, sizeof(_R) / sizeof(_W)>;
2039#else
2040 constexpr int input_width = _TVT::_S_full_size;
2041 const _Tp& __a = a_;
2042 const _Tp& __b = b_;
2043 using _Up = _R;
2044#endif
2045 if constexpr (input_width == 2)
2046 return reinterpret_cast<_R>(_Up{__a[0], __a[1], __b[0], __b[1]});
2047 else if constexpr (input_width == 4)
2048 return reinterpret_cast<_R>(
2049 _Up{__a[0], __a[1], __a[2], __a[3], __b[0], __b[1], __b[2], __b[3]});
2050 else if constexpr (input_width == 8)
2051 return reinterpret_cast<_R>(
2052 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6], __a[7],
2053 __b[0], __b[1], __b[2], __b[3], __b[4], __b[5], __b[6], __b[7]});
2054 else if constexpr (input_width == 16)
2055 return reinterpret_cast<_R>(
2056 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2057 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2058 __a[14], __a[15], __b[0], __b[1], __b[2], __b[3], __b[4],
2059 __b[5], __b[6], __b[7], __b[8], __b[9], __b[10], __b[11],
2060 __b[12], __b[13], __b[14], __b[15]});
2061 else if constexpr (input_width == 32)
2062 return reinterpret_cast<_R>(
2063 _Up{__a[0], __a[1], __a[2], __a[3], __a[4], __a[5], __a[6],
2064 __a[7], __a[8], __a[9], __a[10], __a[11], __a[12], __a[13],
2065 __a[14], __a[15], __a[16], __a[17], __a[18], __a[19], __a[20],
2066 __a[21], __a[22], __a[23], __a[24], __a[25], __a[26], __a[27],
2067 __a[28], __a[29], __a[30], __a[31], __b[0], __b[1], __b[2],
2068 __b[3], __b[4], __b[5], __b[6], __b[7], __b[8], __b[9],
2069 __b[10], __b[11], __b[12], __b[13], __b[14], __b[15], __b[16],
2070 __b[17], __b[18], __b[19], __b[20], __b[21], __b[22], __b[23],
2071 __b[24], __b[25], __b[26], __b[27], __b[28], __b[29], __b[30],
2072 __b[31]});
2073 }
2074
2075// }}}
2076// __zero_extend {{{
2077template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2078 struct _ZeroExtendProxy
2079 {
2080 using value_type = typename _TVT::value_type;
2081 static constexpr size_t _Np = _TVT::_S_full_size;
2082 const _Tp __x;
2083
2084 template <typename _To, typename _ToVT = _VectorTraits<_To>,
2085 typename
2086 = enable_if_t<is_same_v<typename _ToVT::value_type, value_type>>>
2087 _GLIBCXX_SIMD_INTRINSIC operator _To() const
2088 {
2089 constexpr size_t _ToN = _ToVT::_S_full_size;
2090 if constexpr (_ToN == _Np)
2091 return __x;
2092 else if constexpr (_ToN == 2 * _Np)
2093 {
2094#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2095 if constexpr (__have_avx && _TVT::template _S_is<float, 4>)
2096 return __vector_bitcast<value_type>(
2097 _mm256_insertf128_ps(__m256(), __x, 0));
2098 else if constexpr (__have_avx && _TVT::template _S_is<double, 2>)
2099 return __vector_bitcast<value_type>(
2100 _mm256_insertf128_pd(__m256d(), __x, 0));
2101 else if constexpr (__have_avx2 && _Np * sizeof(value_type) == 16)
2102 return __vector_bitcast<value_type>(
2103 _mm256_insertf128_si256(__m256i(), __to_intrin(__x), 0));
2104 else if constexpr (__have_avx512f && _TVT::template _S_is<float, 8>)
2105 {
2106 if constexpr (__have_avx512dq)
2107 return __vector_bitcast<value_type>(
2108 _mm512_insertf32x8(__m512(), __x, 0));
2109 else
2110 return reinterpret_cast<__m512>(
2111 _mm512_insertf64x4(__m512d(),
2112 reinterpret_cast<__m256d>(__x), 0));
2113 }
2114 else if constexpr (__have_avx512f
2115 && _TVT::template _S_is<double, 4>)
2116 return __vector_bitcast<value_type>(
2117 _mm512_insertf64x4(__m512d(), __x, 0));
2118 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 32)
2119 return __vector_bitcast<value_type>(
2120 _mm512_inserti64x4(__m512i(), __to_intrin(__x), 0));
2121#endif
2122 return __concat(__x, _Tp());
2123 }
2124 else if constexpr (_ToN == 4 * _Np)
2125 {
2126#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_3
2127 if constexpr (__have_avx512dq && _TVT::template _S_is<double, 2>)
2128 {
2129 return __vector_bitcast<value_type>(
2130 _mm512_insertf64x2(__m512d(), __x, 0));
2131 }
2132 else if constexpr (__have_avx512f
2133 && is_floating_point_v<value_type>)
2134 {
2135 return __vector_bitcast<value_type>(
2136 _mm512_insertf32x4(__m512(), reinterpret_cast<__m128>(__x),
2137 0));
2138 }
2139 else if constexpr (__have_avx512f && _Np * sizeof(value_type) == 16)
2140 {
2141 return __vector_bitcast<value_type>(
2142 _mm512_inserti32x4(__m512i(), __to_intrin(__x), 0));
2143 }
2144#endif
2145 return __concat(__concat(__x, _Tp()),
2146 __vector_type_t<value_type, _Np * 2>());
2147 }
2148 else if constexpr (_ToN == 8 * _Np)
2149 return __concat(operator __vector_type_t<value_type, _Np * 4>(),
2150 __vector_type_t<value_type, _Np * 4>());
2151 else if constexpr (_ToN == 16 * _Np)
2152 return __concat(operator __vector_type_t<value_type, _Np * 8>(),
2153 __vector_type_t<value_type, _Np * 8>());
2154 else
2155 __assert_unreachable<_Tp>();
2156 }
2157 };
2158
2159template <typename _Tp, typename _TVT = _VectorTraits<_Tp>>
2160 _GLIBCXX_SIMD_INTRINSIC _ZeroExtendProxy<_Tp, _TVT>
2161 __zero_extend(_Tp __x)
2162 { return {__x}; }
2163
2164// }}}
2165// __extract<_Np, By>{{{
2166template <int _Offset,
2167 int _SplitBy,
2168 typename _Tp,
2169 typename _TVT = _VectorTraits<_Tp>,
2170 typename _R = __vector_type_t<typename _TVT::value_type,
2171 _TVT::_S_full_size / _SplitBy>>
2172 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2173 __extract(_Tp __in)
2174 {
2175 using value_type = typename _TVT::value_type;
2176#if _GLIBCXX_SIMD_X86INTRIN // {{{
2177 if constexpr (sizeof(_Tp) == 64 && _SplitBy == 4 && _Offset > 0)
2178 {
2179 if constexpr (__have_avx512dq && is_same_v<double, value_type>)
2180 return _mm512_extractf64x2_pd(__to_intrin(__in), _Offset);
2181 else if constexpr (is_floating_point_v<value_type>)
2182 return __vector_bitcast<value_type>(
2183 _mm512_extractf32x4_ps(__intrin_bitcast<__m512>(__in), _Offset));
2184 else
2185 return reinterpret_cast<_R>(
2186 _mm512_extracti32x4_epi32(__intrin_bitcast<__m512i>(__in),
2187 _Offset));
2188 }
2189 else
2190#endif // _GLIBCXX_SIMD_X86INTRIN }}}
2191 {
2192#ifdef _GLIBCXX_SIMD_WORKAROUND_XXX_1
2193 using _W = conditional_t<
2194 is_floating_point_v<value_type>, double,
2195 conditional_t<(sizeof(_R) >= 16), long long, value_type>>;
2196 static_assert(sizeof(_R) % sizeof(_W) == 0);
2197 constexpr int __return_width = sizeof(_R) / sizeof(_W);
2198 using _Up = __vector_type_t<_W, __return_width>;
2199 const auto __x = __vector_bitcast<_W>(__in);
2200#else
2201 constexpr int __return_width = _TVT::_S_full_size / _SplitBy;
2202 using _Up = _R;
2203 const __vector_type_t<value_type, _TVT::_S_full_size>& __x
2204 = __in; // only needed for _Tp = _SimdWrapper<value_type, _Np>
2205#endif
2206 constexpr int _O = _Offset * __return_width;
2207 return __call_with_subscripts<__return_width, _O>(
2208 __x, [](auto... __entries) {
2209 return reinterpret_cast<_R>(_Up{__entries...});
2210 });
2211 }
2212 }
2213
2214// }}}
2215// __lo/__hi64[z]{{{
2216template <typename _Tp,
2217 typename _R
2218 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2219 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2220 __lo64(_Tp __x)
2221 {
2222 _R __r{};
2223 __builtin_memcpy(&__r, &__x, 8);
2224 return __r;
2225 }
2226
2227template <typename _Tp,
2228 typename _R
2229 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2230 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2231 __hi64(_Tp __x)
2232 {
2233 static_assert(sizeof(_Tp) == 16, "use __hi64z if you meant it");
2234 _R __r{};
2235 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2236 return __r;
2237 }
2238
2239template <typename _Tp,
2240 typename _R
2241 = __vector_type8_t<typename _VectorTraits<_Tp>::value_type>>
2242 _GLIBCXX_SIMD_INTRINSIC constexpr _R
2243 __hi64z([[maybe_unused]] _Tp __x)
2244 {
2245 _R __r{};
2246 if constexpr (sizeof(_Tp) == 16)
2247 __builtin_memcpy(&__r, reinterpret_cast<const char*>(&__x) + 8, 8);
2248 return __r;
2249 }
2250
2251// }}}
2252// __lo/__hi128{{{
2253template <typename _Tp>
2254 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2255 __lo128(_Tp __x)
2256 { return __extract<0, sizeof(_Tp) / 16>(__x); }
2257
2258template <typename _Tp>
2259 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2260 __hi128(_Tp __x)
2261 {
2262 static_assert(sizeof(__x) == 32);
2263 return __extract<1, 2>(__x);
2264 }
2265
2266// }}}
2267// __lo/__hi256{{{
2268template <typename _Tp>
2269 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2270 __lo256(_Tp __x)
2271 {
2272 static_assert(sizeof(__x) == 64);
2273 return __extract<0, 2>(__x);
2274 }
2275
2276template <typename _Tp>
2277 _GLIBCXX_SIMD_INTRINSIC constexpr auto
2278 __hi256(_Tp __x)
2279 {
2280 static_assert(sizeof(__x) == 64);
2281 return __extract<1, 2>(__x);
2282 }
2283
2284// }}}
2285// __auto_bitcast{{{
2286template <typename _Tp>
2287 struct _AutoCast
2288 {
2289 static_assert(__is_vector_type_v<_Tp>);
2290
2291 const _Tp __x;
2292
2293 template <typename _Up, typename _UVT = _VectorTraits<_Up>>
2294 _GLIBCXX_SIMD_INTRINSIC constexpr operator _Up() const
2295 { return __intrin_bitcast<typename _UVT::type>(__x); }
2296 };
2297
2298template <typename _Tp>
2299 _GLIBCXX_SIMD_INTRINSIC constexpr _AutoCast<_Tp>
2300 __auto_bitcast(const _Tp& __x)
2301 { return {__x}; }
2302
2303template <typename _Tp, size_t _Np>
2304 _GLIBCXX_SIMD_INTRINSIC constexpr
2305 _AutoCast<typename _SimdWrapper<_Tp, _Np>::_BuiltinType>
2306 __auto_bitcast(const _SimdWrapper<_Tp, _Np>& __x)
2307 { return {__x._M_data}; }
2308
2309// }}}
2310// ^^^ ---- builtin vector types [[gnu::vector_size(N)]] and operations ---- ^^^
2311
2312#if _GLIBCXX_SIMD_HAVE_SSE_ABI
2313// __bool_storage_member_type{{{
2314#if _GLIBCXX_SIMD_HAVE_AVX512F && _GLIBCXX_SIMD_X86INTRIN
2315template <size_t _Size>
2316 struct __bool_storage_member_type
2317 {
2318 static_assert((_Size & (_Size - 1)) != 0,
2319 "This trait may only be used for non-power-of-2 sizes. "
2320 "Power-of-2 sizes must be specialized.");
2321 using type =
2322 typename __bool_storage_member_type<std::__bit_ceil(_Size)>::type;
2323 };
2324
2325template <>
2326 struct __bool_storage_member_type<1> { using type = bool; };
2327
2328template <>
2329 struct __bool_storage_member_type<2> { using type = __mmask8; };
2330
2331template <>
2332 struct __bool_storage_member_type<4> { using type = __mmask8; };
2333
2334template <>
2335 struct __bool_storage_member_type<8> { using type = __mmask8; };
2336
2337template <>
2338 struct __bool_storage_member_type<16> { using type = __mmask16; };
2339
2340template <>
2341 struct __bool_storage_member_type<32> { using type = __mmask32; };
2342
2343template <>
2344 struct __bool_storage_member_type<64> { using type = __mmask64; };
2345#endif // _GLIBCXX_SIMD_HAVE_AVX512F
2346
2347// }}}
2348// __intrinsic_type (x86){{{
2349// the following excludes bool via __is_vectorizable
2350#if _GLIBCXX_SIMD_HAVE_SSE
2351template <typename _Tp, size_t _Bytes>
2352 struct __intrinsic_type<_Tp, _Bytes,
2353 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 64>>
2354 {
2355 static_assert(!is_same_v<_Tp, long double>,
2356 "no __intrinsic_type support for long double on x86");
2357
2358 static constexpr size_t _S_VBytes = _Bytes <= 16 ? 16
2359 : _Bytes <= 32 ? 32
2360 : 64;
2361
2362 using type [[__gnu__::__vector_size__(_S_VBytes)]]
2363 = conditional_t<is_integral_v<_Tp>, long long int, _Tp>;
2364 };
2365#endif // _GLIBCXX_SIMD_HAVE_SSE
2366
2367// }}}
2368#endif // _GLIBCXX_SIMD_HAVE_SSE_ABI
2369// __intrinsic_type (ARM){{{
2370#if _GLIBCXX_SIMD_HAVE_NEON
2371template <>
2372 struct __intrinsic_type<float, 8, void>
2373 { using type = float32x2_t; };
2374
2375template <>
2376 struct __intrinsic_type<float, 16, void>
2377 { using type = float32x4_t; };
2378
2379#if _GLIBCXX_SIMD_HAVE_NEON_A64
2380template <>
2381 struct __intrinsic_type<double, 8, void>
2382 { using type = float64x1_t; };
2383
2384template <>
2385 struct __intrinsic_type<double, 16, void>
2386 { using type = float64x2_t; };
2387#endif
2388
2389#define _GLIBCXX_SIMD_ARM_INTRIN(_Bits, _Np) \
2390template <> \
2391 struct __intrinsic_type<__int_with_sizeof_t<_Bits / 8>, \
2392 _Np * _Bits / 8, void> \
2393 { using type = int##_Bits##x##_Np##_t; }; \
2394template <> \
2395 struct __intrinsic_type<make_unsigned_t<__int_with_sizeof_t<_Bits / 8>>, \
2396 _Np * _Bits / 8, void> \
2397 { using type = uint##_Bits##x##_Np##_t; }
2398_GLIBCXX_SIMD_ARM_INTRIN(8, 8);
2399_GLIBCXX_SIMD_ARM_INTRIN(8, 16);
2400_GLIBCXX_SIMD_ARM_INTRIN(16, 4);
2401_GLIBCXX_SIMD_ARM_INTRIN(16, 8);
2402_GLIBCXX_SIMD_ARM_INTRIN(32, 2);
2403_GLIBCXX_SIMD_ARM_INTRIN(32, 4);
2404_GLIBCXX_SIMD_ARM_INTRIN(64, 1);
2405_GLIBCXX_SIMD_ARM_INTRIN(64, 2);
2406#undef _GLIBCXX_SIMD_ARM_INTRIN
2407
2408template <typename _Tp, size_t _Bytes>
2409 struct __intrinsic_type<_Tp, _Bytes,
2410 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2411 {
2412 static constexpr int _SVecBytes = _Bytes <= 8 ? 8 : 16;
2413 using _Ip = __int_for_sizeof_t<_Tp>;
2414 using _Up = conditional_t<
2415 is_floating_point_v<_Tp>, _Tp,
2416 conditional_t<is_unsigned_v<_Tp>, make_unsigned_t<_Ip>, _Ip>>;
2417 static_assert(!is_same_v<_Tp, _Up> || _SVecBytes != _Bytes,
2418 "should use explicit specialization above");
2419 using type = typename __intrinsic_type<_Up, _SVecBytes>::type;
2420 };
2421#endif // _GLIBCXX_SIMD_HAVE_NEON
2422
2423// }}}
2424// __intrinsic_type (PPC){{{
2425#ifdef __ALTIVEC__
2426template <typename _Tp>
2427 struct __intrinsic_type_impl;
2428
2429#define _GLIBCXX_SIMD_PPC_INTRIN(_Tp) \
2430 template <> \
2431 struct __intrinsic_type_impl<_Tp> { using type = __vector _Tp; }
2432_GLIBCXX_SIMD_PPC_INTRIN(float);
2433#ifdef __VSX__
2434_GLIBCXX_SIMD_PPC_INTRIN(double);
2435#endif
2436_GLIBCXX_SIMD_PPC_INTRIN(signed char);
2437_GLIBCXX_SIMD_PPC_INTRIN(unsigned char);
2438_GLIBCXX_SIMD_PPC_INTRIN(signed short);
2439_GLIBCXX_SIMD_PPC_INTRIN(unsigned short);
2440_GLIBCXX_SIMD_PPC_INTRIN(signed int);
2441_GLIBCXX_SIMD_PPC_INTRIN(unsigned int);
2442#if defined __VSX__ || __SIZEOF_LONG__ == 4
2443_GLIBCXX_SIMD_PPC_INTRIN(signed long);
2444_GLIBCXX_SIMD_PPC_INTRIN(unsigned long);
2445#endif
2446#ifdef __VSX__
2447_GLIBCXX_SIMD_PPC_INTRIN(signed long long);
2448_GLIBCXX_SIMD_PPC_INTRIN(unsigned long long);
2449#endif
2450#undef _GLIBCXX_SIMD_PPC_INTRIN
2451
2452template <typename _Tp, size_t _Bytes>
2453 struct __intrinsic_type<_Tp, _Bytes,
2454 enable_if_t<__is_vectorizable_v<_Tp> && _Bytes <= 16>>
2455 {
2456 static constexpr bool _S_is_ldouble = is_same_v<_Tp, long double>;
2457 // allow _Tp == long double with -mlong-double-64
2458 static_assert(!(_S_is_ldouble && sizeof(long double) > sizeof(double)),
2459 "no __intrinsic_type support for 128-bit floating point on PowerPC");
2460#ifndef __VSX__
2461 static_assert(!(is_same_v<_Tp, double>
2462 || (_S_is_ldouble && sizeof(long double) == sizeof(double))),
2463 "no __intrinsic_type support for 64-bit floating point on PowerPC w/o VSX");
2464#endif
2465 using type =
2466 typename __intrinsic_type_impl<
2467 conditional_t<is_floating_point_v<_Tp>,
2468 conditional_t<_S_is_ldouble, double, _Tp>,
2469 __int_for_sizeof_t<_Tp>>>::type;
2470 };
2471#endif // __ALTIVEC__
2472
2473// }}}
2474// _SimdWrapper<bool>{{{1
2475template <size_t _Width>
2476 struct _SimdWrapper<bool, _Width,
2477 void_t<typename __bool_storage_member_type<_Width>::type>>
2478 {
2479 using _BuiltinType = typename __bool_storage_member_type<_Width>::type;
2480 using value_type = bool;
2481
2482 static constexpr size_t _S_full_size = sizeof(_BuiltinType) * __CHAR_BIT__;
2483
2484 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<bool, _S_full_size>
2485 __as_full_vector() const { return _M_data; }
2486
2487 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
2488 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_BuiltinType __k)
2489 : _M_data(__k) {};
2490
2491 _GLIBCXX_SIMD_INTRINSIC operator const _BuiltinType&() const
2492 { return _M_data; }
2493
2494 _GLIBCXX_SIMD_INTRINSIC operator _BuiltinType&()
2495 { return _M_data; }
2496
2497 _GLIBCXX_SIMD_INTRINSIC _BuiltinType __intrin() const
2498 { return _M_data; }
2499
2500 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator[](size_t __i) const
2501 { return _M_data & (_BuiltinType(1) << __i); }
2502
2503 template <size_t __i>
2504 _GLIBCXX_SIMD_INTRINSIC constexpr value_type
2505 operator[](_SizeConstant<__i>) const
2506 { return _M_data & (_BuiltinType(1) << __i); }
2507
2508 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, value_type __x)
2509 {
2510 if (__x)
2511 _M_data |= (_BuiltinType(1) << __i);
2512 else
2513 _M_data &= ~(_BuiltinType(1) << __i);
2514 }
2515
2516 _GLIBCXX_SIMD_INTRINSIC
2517 constexpr bool _M_is_constprop() const
2518 { return __builtin_constant_p(_M_data); }
2519
2520 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
2521 {
2522 if (__builtin_constant_p(_M_data))
2523 {
2524 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2525 constexpr _BuiltinType __active_mask
2526 = ~_BuiltinType() >> (__nbits - _Width);
2527 return (_M_data & __active_mask) == 0;
2528 }
2529 return false;
2530 }
2531
2532 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
2533 {
2534 if (__builtin_constant_p(_M_data))
2535 {
2536 constexpr int __nbits = sizeof(_BuiltinType) * __CHAR_BIT__;
2537 constexpr _BuiltinType __active_mask
2538 = ~_BuiltinType() >> (__nbits - _Width);
2539 return (_M_data & __active_mask) == __active_mask;
2540 }
2541 return false;
2542 }
2543
2544 _BuiltinType _M_data;
2545 };
2546
2547// _SimdWrapperBase{{{1
2548template <bool _MustZeroInitPadding, typename _BuiltinType>
2549 struct _SimdWrapperBase;
2550
2551template <typename _BuiltinType>
2552 struct _SimdWrapperBase<false, _BuiltinType> // no padding or no SNaNs
2553 {
2554 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() = default;
2555 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
2556 : _M_data(__init)
2557 {}
2558
2559 _BuiltinType _M_data;
2560 };
2561
2562template <typename _BuiltinType>
2563 struct _SimdWrapperBase<true, _BuiltinType> // with padding that needs to
2564 // never become SNaN
2565 {
2566 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase() : _M_data() {}
2567 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapperBase(_BuiltinType __init)
2568 : _M_data(__init)
2569 {}
2570
2571 _BuiltinType _M_data;
2572 };
2573
2574// }}}
2575// _SimdWrapper{{{
2576template <typename _Tp, size_t _Width>
2577 struct _SimdWrapper<
2578 _Tp, _Width,
2579 void_t<__vector_type_t<_Tp, _Width>, __intrinsic_type_t<_Tp, _Width>>>
2580 : _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2581 && sizeof(_Tp) * _Width
2582 == sizeof(__vector_type_t<_Tp, _Width>),
2583 __vector_type_t<_Tp, _Width>>
2584 {
2585 using _Base
2586 = _SimdWrapperBase<__has_iec559_behavior<__signaling_NaN, _Tp>::value
2587 && sizeof(_Tp) * _Width
2588 == sizeof(__vector_type_t<_Tp, _Width>),
2589 __vector_type_t<_Tp, _Width>>;
2590
2591 static_assert(__is_vectorizable_v<_Tp>);
2592 static_assert(_Width >= 2); // 1 doesn't make sense, use _Tp directly then
2593
2594 using _BuiltinType = __vector_type_t<_Tp, _Width>;
2595 using value_type = _Tp;
2596
2597 static inline constexpr size_t _S_full_size
2598 = sizeof(_BuiltinType) / sizeof(value_type);
2599 static inline constexpr int _S_size = _Width;
2600 static inline constexpr bool _S_is_partial = _S_full_size != _S_size;
2601
2602 using _Base::_M_data;
2603
2604 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper<_Tp, _S_full_size>
2605 __as_full_vector() const
2606 { return _M_data; }
2607
2608 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(initializer_list<_Tp> __init)
2609 : _Base(__generate_from_n_evaluations<_Width, _BuiltinType>(
2610 [&](auto __i) { return __init.begin()[__i.value]; })) {}
2611
2612 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper() = default;
2613 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(const _SimdWrapper&)
2614 = default;
2615 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_SimdWrapper&&) = default;
2616
2617 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2618 operator=(const _SimdWrapper&) = default;
2619 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper&
2620 operator=(_SimdWrapper&&) = default;
2621
2622 template <typename _V, typename = enable_if_t<disjunction_v<
2623 is_same<_V, __vector_type_t<_Tp, _Width>>,
2624 is_same<_V, __intrinsic_type_t<_Tp, _Width>>>>>
2625 _GLIBCXX_SIMD_INTRINSIC constexpr _SimdWrapper(_V __x)
2626 // __vector_bitcast can convert e.g. __m128 to __vector(2) float
2627 : _Base(__vector_bitcast<_Tp, _Width>(__x)) {}
2628
2629 template <typename... _As,
2630 typename = enable_if_t<((is_same_v<simd_abi::scalar, _As> && ...)
2631 && sizeof...(_As) <= _Width)>>
2632 _GLIBCXX_SIMD_INTRINSIC constexpr
2633 operator _SimdTuple<_Tp, _As...>() const
2634 {
2635 const auto& dd = _M_data; // workaround for GCC7 ICE
2636 return __generate_from_n_evaluations<sizeof...(_As),
2637 _SimdTuple<_Tp, _As...>>([&](
2638 auto __i) constexpr { return dd[int(__i)]; });
2639 }
2640
2641 _GLIBCXX_SIMD_INTRINSIC constexpr operator const _BuiltinType&() const
2642 { return _M_data; }
2643
2644 _GLIBCXX_SIMD_INTRINSIC constexpr operator _BuiltinType&()
2645 { return _M_data; }
2646
2647 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](size_t __i) const
2648 { return _M_data[__i]; }
2649
2650 template <size_t __i>
2651 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp operator[](_SizeConstant<__i>) const
2652 { return _M_data[__i]; }
2653
2654 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_set(size_t __i, _Tp __x)
2655 { _M_data[__i] = __x; }
2656
2657 _GLIBCXX_SIMD_INTRINSIC
2658 constexpr bool _M_is_constprop() const
2659 { return __builtin_constant_p(_M_data); }
2660
2661 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_none_of() const
2662 {
2663 if (__builtin_constant_p(_M_data))
2664 {
2665 bool __r = true;
2666 if constexpr (is_floating_point_v<_Tp>)
2667 {
2668 using _Ip = __int_for_sizeof_t<_Tp>;
2669 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2670 __execute_n_times<_Width>(
2671 [&](auto __i) { __r &= __intdata[__i.value] == _Ip(); });
2672 }
2673 else
2674 __execute_n_times<_Width>(
2675 [&](auto __i) { __r &= _M_data[__i.value] == _Tp(); });
2676 return __r;
2677 }
2678 return false;
2679 }
2680
2681 _GLIBCXX_SIMD_INTRINSIC constexpr bool _M_is_constprop_all_of() const
2682 {
2683 if (__builtin_constant_p(_M_data))
2684 {
2685 bool __r = true;
2686 if constexpr (is_floating_point_v<_Tp>)
2687 {
2688 using _Ip = __int_for_sizeof_t<_Tp>;
2689 const auto __intdata = __vector_bitcast<_Ip>(_M_data);
2690 __execute_n_times<_Width>(
2691 [&](auto __i) { __r &= __intdata[__i.value] == ~_Ip(); });
2692 }
2693 else
2694 __execute_n_times<_Width>(
2695 [&](auto __i) { __r &= _M_data[__i.value] == ~_Tp(); });
2696 return __r;
2697 }
2698 return false;
2699 }
2700 };
2701
2702// }}}
2703
2704// __vectorized_sizeof {{{
2705template <typename _Tp>
2706 constexpr size_t
2707 __vectorized_sizeof()
2708 {
2709 if constexpr (!__is_vectorizable_v<_Tp>)
2710 return 0;
2711
2712 if constexpr (sizeof(_Tp) <= 8)
2713 {
2714 // X86:
2715 if constexpr (__have_avx512bw)
2716 return 64;
2717 if constexpr (__have_avx512f && sizeof(_Tp) >= 4)
2718 return 64;
2719 if constexpr (__have_avx2)
2720 return 32;
2721 if constexpr (__have_avx && is_floating_point_v<_Tp>)
2722 return 32;
2723 if constexpr (__have_sse2)
2724 return 16;
2725 if constexpr (__have_sse && is_same_v<_Tp, float>)
2726 return 16;
2727 /* The following is too much trouble because of mixed MMX and x87 code.
2728 * While nothing here explicitly calls MMX instructions of registers,
2729 * they are still emitted but no EMMS cleanup is done.
2730 if constexpr (__have_mmx && sizeof(_Tp) <= 4 && is_integral_v<_Tp>)
2731 return 8;
2732 */
2733
2734 // PowerPC:
2735 if constexpr (__have_power8vec
2736 || (__have_power_vmx && (sizeof(_Tp) < 8))
2737 || (__have_power_vsx && is_floating_point_v<_Tp>) )
2738 return 16;
2739
2740 // ARM:
2741 if constexpr (__have_neon_a64
2742 || (__have_neon_a32 && !is_same_v<_Tp, double>) )
2743 return 16;
2744 if constexpr (__have_neon
2745 && sizeof(_Tp) < 8
2746 // Only allow fp if the user allows non-ICE559 fp (e.g.
2747 // via -ffast-math). ARMv7 NEON fp is not conforming to
2748 // IEC559.
2749 && (__support_neon_float || !is_floating_point_v<_Tp>))
2750 return 16;
2751 }
2752
2753 return sizeof(_Tp);
2754 }
2755
2756// }}}
2757namespace simd_abi {
2758// most of simd_abi is defined in simd_detail.h
2759template <typename _Tp>
2760 inline constexpr int max_fixed_size
2761 = (__have_avx512bw && sizeof(_Tp) == 1) ? 64 : 32;
2762
2763// compatible {{{
2764#if defined __x86_64__ || defined __aarch64__
2765template <typename _Tp>
2766 using compatible = conditional_t<(sizeof(_Tp) <= 8), _VecBuiltin<16>, scalar>;
2767#elif defined __ARM_NEON
2768// FIXME: not sure, probably needs to be scalar (or dependent on the hard-float
2769// ABI?)
2770template <typename _Tp>
2771 using compatible
2772 = conditional_t<(sizeof(_Tp) < 8
2773 && (__support_neon_float || !is_floating_point_v<_Tp>)),
2774 _VecBuiltin<16>, scalar>;
2775#else
2776template <typename>
2777 using compatible = scalar;
2778#endif
2779
2780// }}}
2781// native {{{
2782template <typename _Tp>
2783 constexpr auto
2784 __determine_native_abi()
2785 {
2786 constexpr size_t __bytes = __vectorized_sizeof<_Tp>();
2787 if constexpr (__bytes == sizeof(_Tp))
2788 return static_cast<scalar*>(nullptr);
2789 else if constexpr (__have_avx512vl || (__have_avx512f && __bytes == 64))
2790 return static_cast<_VecBltnBtmsk<__bytes>*>(nullptr);
2791 else
2792 return static_cast<_VecBuiltin<__bytes>*>(nullptr);
2793 }
2794
2795template <typename _Tp, typename = enable_if_t<__is_vectorizable_v<_Tp>>>
2796 using native = remove_pointer_t<decltype(__determine_native_abi<_Tp>())>;
2797
2798// }}}
2799// __default_abi {{{
2800#if defined _GLIBCXX_SIMD_DEFAULT_ABI
2801template <typename _Tp>
2802 using __default_abi = _GLIBCXX_SIMD_DEFAULT_ABI<_Tp>;
2803#else
2804template <typename _Tp>
2805 using __default_abi = compatible<_Tp>;
2806#endif
2807
2808// }}}
2809} // namespace simd_abi
2810
2811// traits {{{1
2812// is_abi_tag {{{2
2813template <typename _Tp, typename = void_t<>>
2814 struct is_abi_tag : false_type {};
2815
2816template <typename _Tp>
2817 struct is_abi_tag<_Tp, void_t<typename _Tp::_IsValidAbiTag>>
2818 : public _Tp::_IsValidAbiTag {};
2819
2820template <typename _Tp>
2821 inline constexpr bool is_abi_tag_v = is_abi_tag<_Tp>::value;
2822
2823// is_simd(_mask) {{{2
2824template <typename _Tp>
2825 struct is_simd : public false_type {};
2826
2827template <typename _Tp>
2828 inline constexpr bool is_simd_v = is_simd<_Tp>::value;
2829
2830template <typename _Tp>
2831 struct is_simd_mask : public false_type {};
2832
2833template <typename _Tp>
2834inline constexpr bool is_simd_mask_v = is_simd_mask<_Tp>::value;
2835
2836// simd_size {{{2
2837template <typename _Tp, typename _Abi, typename = void>
2838 struct __simd_size_impl {};
2839
2840template <typename _Tp, typename _Abi>
2841 struct __simd_size_impl<
2842 _Tp, _Abi,
2843 enable_if_t<conjunction_v<__is_vectorizable<_Tp>, is_abi_tag<_Abi>>>>
2844 : _SizeConstant<_Abi::template _S_size<_Tp>> {};
2845
2846template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2847 struct simd_size : __simd_size_impl<_Tp, _Abi> {};
2848
2849template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2850 inline constexpr size_t simd_size_v = simd_size<_Tp, _Abi>::value;
2851
2852// simd_abi::deduce {{{2
2853template <typename _Tp, size_t _Np, typename = void>
2854 struct __deduce_impl;
2855
2856namespace simd_abi {
2857/**
2858 * @tparam _Tp The requested `value_type` for the elements.
2859 * @tparam _Np The requested number of elements.
2860 * @tparam _Abis This parameter is ignored, since this implementation cannot
2861 * make any use of it. Either __a good native ABI is matched and used as `type`
2862 * alias, or the `fixed_size<_Np>` ABI is used, which internally is built from
2863 * the best matching native ABIs.
2864 */
2865template <typename _Tp, size_t _Np, typename...>
2866 struct deduce : __deduce_impl<_Tp, _Np> {};
2867
2868template <typename _Tp, size_t _Np, typename... _Abis>
2869 using deduce_t = typename deduce<_Tp, _Np, _Abis...>::type;
2870} // namespace simd_abi
2871
2872// }}}2
2873// rebind_simd {{{2
2874template <typename _Tp, typename _V, typename = void>
2875 struct rebind_simd;
2876
2877template <typename _Tp, typename _Up, typename _Abi>
2878 struct rebind_simd<
2879 _Tp, simd<_Up, _Abi>,
2880 void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
2881 {
2882 using type
2883 = simd<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
2884 };
2885
2886template <typename _Tp, typename _Up, typename _Abi>
2887 struct rebind_simd<
2888 _Tp, simd_mask<_Up, _Abi>,
2889 void_t<simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>>
2890 {
2891 using type
2892 = simd_mask<_Tp, simd_abi::deduce_t<_Tp, simd_size_v<_Up, _Abi>, _Abi>>;
2893 };
2894
2895template <typename _Tp, typename _V>
2896 using rebind_simd_t = typename rebind_simd<_Tp, _V>::type;
2897
2898// resize_simd {{{2
2899template <int _Np, typename _V, typename = void>
2900 struct resize_simd;
2901
2902template <int _Np, typename _Tp, typename _Abi>
2903 struct resize_simd<_Np, simd<_Tp, _Abi>,
2904 void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
2905 { using type = simd<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
2906
2907template <int _Np, typename _Tp, typename _Abi>
2908 struct resize_simd<_Np, simd_mask<_Tp, _Abi>,
2909 void_t<simd_abi::deduce_t<_Tp, _Np, _Abi>>>
2910 { using type = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np, _Abi>>; };
2911
2912template <int _Np, typename _V>
2913 using resize_simd_t = typename resize_simd<_Np, _V>::type;
2914
2915// }}}2
2916// memory_alignment {{{2
2917template <typename _Tp, typename _Up = typename _Tp::value_type>
2918 struct memory_alignment
2919 : public _SizeConstant<vector_aligned_tag::_S_alignment<_Tp, _Up>> {};
2920
2921template <typename _Tp, typename _Up = typename _Tp::value_type>
2922 inline constexpr size_t memory_alignment_v = memory_alignment<_Tp, _Up>::value;
2923
2924// class template simd [simd] {{{1
2925template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2926 class simd;
2927
2928template <typename _Tp, typename _Abi>
2929 struct is_simd<simd<_Tp, _Abi>> : public true_type {};
2930
2931template <typename _Tp>
2932 using native_simd = simd<_Tp, simd_abi::native<_Tp>>;
2933
2934template <typename _Tp, int _Np>
2935 using fixed_size_simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
2936
2937template <typename _Tp, size_t _Np>
2938 using __deduced_simd = simd<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
2939
2940// class template simd_mask [simd_mask] {{{1
2941template <typename _Tp, typename _Abi = simd_abi::__default_abi<_Tp>>
2942 class simd_mask;
2943
2944template <typename _Tp, typename _Abi>
2945 struct is_simd_mask<simd_mask<_Tp, _Abi>> : public true_type {};
2946
2947template <typename _Tp>
2948 using native_simd_mask = simd_mask<_Tp, simd_abi::native<_Tp>>;
2949
2950template <typename _Tp, int _Np>
2951 using fixed_size_simd_mask = simd_mask<_Tp, simd_abi::fixed_size<_Np>>;
2952
2953template <typename _Tp, size_t _Np>
2954 using __deduced_simd_mask = simd_mask<_Tp, simd_abi::deduce_t<_Tp, _Np>>;
2955
2956// casts [simd.casts] {{{1
2957// static_simd_cast {{{2
2958template <typename _Tp, typename _Up, typename _Ap, bool = is_simd_v<_Tp>,
2959 typename = void>
2960 struct __static_simd_cast_return_type;
2961
2962template <typename _Tp, typename _A0, typename _Up, typename _Ap>
2963 struct __static_simd_cast_return_type<simd_mask<_Tp, _A0>, _Up, _Ap, false,
2964 void>
2965 : __static_simd_cast_return_type<simd<_Tp, _A0>, _Up, _Ap> {};
2966
2967template <typename _Tp, typename _Up, typename _Ap>
2968 struct __static_simd_cast_return_type<
2969 _Tp, _Up, _Ap, true, enable_if_t<_Tp::size() == simd_size_v<_Up, _Ap>>>
2970 { using type = _Tp; };
2971
2972template <typename _Tp, typename _Ap>
2973 struct __static_simd_cast_return_type<_Tp, _Tp, _Ap, false,
2974#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
2975 enable_if_t<__is_vectorizable_v<_Tp>>
2976#else
2977 void
2978#endif
2979 >
2980 { using type = simd<_Tp, _Ap>; };
2981
2982template <typename _Tp, typename = void>
2983 struct __safe_make_signed { using type = _Tp;};
2984
2985template <typename _Tp>
2986 struct __safe_make_signed<_Tp, enable_if_t<is_integral_v<_Tp>>>
2987 {
2988 // the extra make_unsigned_t is because of PR85951
2989 using type = make_signed_t<make_unsigned_t<_Tp>>;
2990 };
2991
2992template <typename _Tp>
2993 using safe_make_signed_t = typename __safe_make_signed<_Tp>::type;
2994
2995template <typename _Tp, typename _Up, typename _Ap>
2996 struct __static_simd_cast_return_type<_Tp, _Up, _Ap, false,
2997#ifdef _GLIBCXX_SIMD_FIX_P2TS_ISSUE66
2998 enable_if_t<__is_vectorizable_v<_Tp>>
2999#else
3000 void
3001#endif
3002 >
3003 {
3004 using type = conditional_t<
3005 (is_integral_v<_Up> && is_integral_v<_Tp> &&
3006#ifndef _GLIBCXX_SIMD_FIX_P2TS_ISSUE65
3007 is_signed_v<_Up> != is_signed_v<_Tp> &&
3008#endif
3009 is_same_v<safe_make_signed_t<_Up>, safe_make_signed_t<_Tp>>),
3010 simd<_Tp, _Ap>, fixed_size_simd<_Tp, simd_size_v<_Up, _Ap>>>;
3011 };
3012
3013template <typename _Tp, typename _Up, typename _Ap,
3014 typename _R
3015 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3016 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _R
3017 static_simd_cast(const simd<_Up, _Ap>& __x)
3018 {
3019 if constexpr (is_same<_R, simd<_Up, _Ap>>::value)
3020 return __x;
3021 else
3022 {
3023 _SimdConverter<_Up, _Ap, typename _R::value_type, typename _R::abi_type>
3024 __c;
3025 return _R(__private_init, __c(__data(__x)));
3026 }
3027 }
3028
3029namespace __proposed {
3030template <typename _Tp, typename _Up, typename _Ap,
3031 typename _R
3032 = typename __static_simd_cast_return_type<_Tp, _Up, _Ap>::type>
3033 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR typename _R::mask_type
3034 static_simd_cast(const simd_mask<_Up, _Ap>& __x)
3035 {
3036 using _RM = typename _R::mask_type;
3037 return {__private_init, _RM::abi_type::_MaskImpl::template _S_convert<
3038 typename _RM::simd_type::value_type>(__x)};
3039 }
3040
3041template <typename _To, typename _Up, typename _Abi>
3042 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3043 _To
3044 simd_bit_cast(const simd<_Up, _Abi>& __x)
3045 {
3046 using _Tp = typename _To::value_type;
3047 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3048 using _From = simd<_Up, _Abi>;
3049 using _FromMember = typename _SimdTraits<_Up, _Abi>::_SimdMember;
3050 // with concepts, the following should be constraints
3051 static_assert(sizeof(_To) == sizeof(_From));
3052 static_assert(is_trivially_copyable_v<_Tp> && is_trivially_copyable_v<_Up>);
3053 static_assert(is_trivially_copyable_v<_ToMember> && is_trivially_copyable_v<_FromMember>);
3054#if __has_builtin(__builtin_bit_cast)
3055 return {__private_init, __builtin_bit_cast(_ToMember, __data(__x))};
3056#else
3057 return {__private_init, __bit_cast<_ToMember>(__data(__x))};
3058#endif
3059 }
3060
3061template <typename _To, typename _Up, typename _Abi>
3062 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3063 _To
3064 simd_bit_cast(const simd_mask<_Up, _Abi>& __x)
3065 {
3066 using _From = simd_mask<_Up, _Abi>;
3067 static_assert(sizeof(_To) == sizeof(_From));
3068 static_assert(is_trivially_copyable_v<_From>);
3069 // _To can be simd<T, A>, specifically simd<T, fixed_size<N>> in which case _To is not trivially
3070 // copyable.
3071 if constexpr (is_simd_v<_To>)
3072 {
3073 using _Tp = typename _To::value_type;
3074 using _ToMember = typename _SimdTraits<_Tp, typename _To::abi_type>::_SimdMember;
3075 static_assert(is_trivially_copyable_v<_ToMember>);
3076#if __has_builtin(__builtin_bit_cast)
3077 return {__private_init, __builtin_bit_cast(_ToMember, __x)};
3078#else
3079 return {__private_init, __bit_cast<_ToMember>(__x)};
3080#endif
3081 }
3082 else
3083 {
3084 static_assert(is_trivially_copyable_v<_To>);
3085#if __has_builtin(__builtin_bit_cast)
3086 return __builtin_bit_cast(_To, __x);
3087#else
3088 return __bit_cast<_To>(__x);
3089#endif
3090 }
3091 }
3092} // namespace __proposed
3093
3094// simd_cast {{{2
3095template <typename _Tp, typename _Up, typename _Ap,
3096 typename _To = __value_type_or_identity_t<_Tp>>
3097 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3098 simd_cast(const simd<_ValuePreserving<_Up, _To>, _Ap>& __x)
3099 -> decltype(static_simd_cast<_Tp>(__x))
3100 { return static_simd_cast<_Tp>(__x); }
3101
3102namespace __proposed {
3103template <typename _Tp, typename _Up, typename _Ap,
3104 typename _To = __value_type_or_identity_t<_Tp>>
3105 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR auto
3106 simd_cast(const simd_mask<_ValuePreserving<_Up, _To>, _Ap>& __x)
3107 -> decltype(static_simd_cast<_Tp>(__x))
3108 { return static_simd_cast<_Tp>(__x); }
3109} // namespace __proposed
3110
3111// }}}2
3112// resizing_simd_cast {{{
3113namespace __proposed {
3114/* Proposed spec:
3115
3116template <class T, class U, class Abi>
3117T resizing_simd_cast(const simd<U, Abi>& x)
3118
3119p1 Constraints:
3120 - is_simd_v<T> is true and
3121 - T::value_type is the same type as U
3122
3123p2 Returns:
3124 A simd object with the i^th element initialized to x[i] for all i in the
3125 range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3126 than simd_size_v<U, Abi>, the remaining elements are value-initialized.
3127
3128template <class T, class U, class Abi>
3129T resizing_simd_cast(const simd_mask<U, Abi>& x)
3130
3131p1 Constraints: is_simd_mask_v<T> is true
3132
3133p2 Returns:
3134 A simd_mask object with the i^th element initialized to x[i] for all i in
3135the range of [0, min(T::size(), simd_size_v<U, Abi>)). If T::size() is larger
3136 than simd_size_v<U, Abi>, the remaining elements are initialized to false.
3137
3138 */
3139
3140template <typename _Tp, typename _Up, typename _Ap>
3141 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR enable_if_t<
3142 conjunction_v<is_simd<_Tp>, is_same<typename _Tp::value_type, _Up>>, _Tp>
3143 resizing_simd_cast(const simd<_Up, _Ap>& __x)
3144 {
3145 if constexpr (is_same_v<typename _Tp::abi_type, _Ap>)
3146 return __x;
3147 else if constexpr (simd_size_v<_Up, _Ap> == 1)
3148 {
3149 _Tp __r{};
3150 __r[0] = __x[0];
3151 return __r;
3152 }
3153 else if constexpr (_Tp::size() == 1)
3154 return __x[0];
3155 else if constexpr (sizeof(_Tp) == sizeof(__x)
3156 && !__is_fixed_size_abi_v<_Ap>)
3157 return {__private_init,
3158 __vector_bitcast<typename _Tp::value_type, _Tp::size()>(
3159 _Ap::_S_masked(__data(__x))._M_data)};
3160 else
3161 {
3162 _Tp __r{};
3163 __builtin_memcpy(&__data(__r), &__data(__x),
3164 sizeof(_Up)
3165 * std::min(_Tp::size(), simd_size_v<_Up, _Ap>));
3166 return __r;
3167 }
3168 }
3169
3170template <typename _Tp, typename _Up, typename _Ap>
3171 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3172 enable_if_t<is_simd_mask_v<_Tp>, _Tp>
3173 resizing_simd_cast(const simd_mask<_Up, _Ap>& __x)
3174 {
3175 return {__private_init, _Tp::abi_type::_MaskImpl::template _S_convert<
3176 typename _Tp::simd_type::value_type>(__x)};
3177 }
3178} // namespace __proposed
3179
3180// }}}
3181// to_fixed_size {{{2
3182template <typename _Tp, int _Np>
3183 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd<_Tp, _Np>
3184 to_fixed_size(const fixed_size_simd<_Tp, _Np>& __x)
3185 { return __x; }
3186
3187template <typename _Tp, int _Np>
3188 _GLIBCXX_SIMD_INTRINSIC fixed_size_simd_mask<_Tp, _Np>
3189 to_fixed_size(const fixed_size_simd_mask<_Tp, _Np>& __x)
3190 { return __x; }
3191
3192template <typename _Tp, typename _Ap>
3193 _GLIBCXX_SIMD_INTRINSIC auto
3194 to_fixed_size(const simd<_Tp, _Ap>& __x)
3195 {
3196 return simd<_Tp, simd_abi::fixed_size<simd_size_v<_Tp, _Ap>>>([&__x](
3197 auto __i) constexpr { return __x[__i]; });
3198 }
3199
3200template <typename _Tp, typename _Ap>
3201 _GLIBCXX_SIMD_INTRINSIC auto
3202 to_fixed_size(const simd_mask<_Tp, _Ap>& __x)
3203 {
3204 constexpr int _Np = simd_mask<_Tp, _Ap>::size();
3205 fixed_size_simd_mask<_Tp, _Np> __r;
3206 __execute_n_times<_Np>([&](auto __i) constexpr { __r[__i] = __x[__i]; });
3207 return __r;
3208 }
3209
3210// to_native {{{2
3211template <typename _Tp, int _Np>
3212 _GLIBCXX_SIMD_INTRINSIC
3213 enable_if_t<(_Np == native_simd<_Tp>::size()), native_simd<_Tp>>
3214 to_native(const fixed_size_simd<_Tp, _Np>& __x)
3215 {
3216 alignas(memory_alignment_v<native_simd<_Tp>>) _Tp __mem[_Np];
3217 __x.copy_to(__mem, vector_aligned);
3218 return {__mem, vector_aligned};
3219 }
3220
3221template <typename _Tp, size_t _Np>
3222 _GLIBCXX_SIMD_INTRINSIC
3223 enable_if_t<(_Np == native_simd_mask<_Tp>::size()), native_simd_mask<_Tp>>
3224 to_native(const fixed_size_simd_mask<_Tp, _Np>& __x)
3225 {
3226 return native_simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; });
3227 }
3228
3229// to_compatible {{{2
3230template <typename _Tp, size_t _Np>
3231 _GLIBCXX_SIMD_INTRINSIC enable_if_t<(_Np == simd<_Tp>::size()), simd<_Tp>>
3232 to_compatible(const simd<_Tp, simd_abi::fixed_size<_Np>>& __x)
3233 {
3234 alignas(memory_alignment_v<simd<_Tp>>) _Tp __mem[_Np];
3235 __x.copy_to(__mem, vector_aligned);
3236 return {__mem, vector_aligned};
3237 }
3238
3239template <typename _Tp, size_t _Np>
3240 _GLIBCXX_SIMD_INTRINSIC
3241 enable_if_t<(_Np == simd_mask<_Tp>::size()), simd_mask<_Tp>>
3242 to_compatible(const simd_mask<_Tp, simd_abi::fixed_size<_Np>>& __x)
3243 { return simd_mask<_Tp>([&](auto __i) constexpr { return __x[__i]; }); }
3244
3245// masked assignment [simd_mask.where] {{{1
3246
3247// where_expression {{{1
3248// const_where_expression<M, T> {{{2
3249template <typename _M, typename _Tp>
3250 class const_where_expression
3251 {
3252 using _V = _Tp;
3253 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3254
3255 struct _Wrapper { using value_type = _V; };
3256
3257 protected:
3258 using _Impl = typename _V::_Impl;
3259
3260 using value_type =
3261 typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3262
3263 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3264 __get_mask(const const_where_expression& __x)
3265 { return __x._M_k; }
3266
3267 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3268 __get_lvalue(const const_where_expression& __x)
3269 { return __x._M_value; }
3270
3271 const _M& _M_k;
3272 _Tp& _M_value;
3273
3274 public:
3275 const_where_expression(const const_where_expression&) = delete;
3276 const_where_expression& operator=(const const_where_expression&) = delete;
3277
3278 _GLIBCXX_SIMD_INTRINSIC const_where_expression(const _M& __kk, const _Tp& dd)
3279 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3280
3281 _GLIBCXX_SIMD_INTRINSIC _V
3282 operator-() const&&
3283 {
3284 return {__private_init,
3285 _Impl::template _S_masked_unary<negate>(__data(_M_k),
3286 __data(_M_value))};
3287 }
3288
3289 template <typename _Up, typename _Flags>
3290 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
3291 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3292 {
3293 return {__private_init,
3294 _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3295 _Flags::template _S_apply<_V>(__mem))};
3296 }
3297
3298 template <typename _Up, typename _Flags>
3299 _GLIBCXX_SIMD_INTRINSIC void
3300 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3301 {
3302 _Impl::_S_masked_store(__data(_M_value),
3303 _Flags::template _S_apply<_V>(__mem),
3304 __data(_M_k));
3305 }
3306 };
3307
3308// const_where_expression<bool, T> {{{2
3309template <typename _Tp>
3310 class const_where_expression<bool, _Tp>
3311 {
3312 using _M = bool;
3313 using _V = _Tp;
3314
3315 static_assert(is_same_v<_V, __remove_cvref_t<_Tp>>);
3316
3317 struct _Wrapper { using value_type = _V; };
3318
3319 protected:
3320 using value_type =
3321 typename conditional_t<is_arithmetic_v<_V>, _Wrapper, _V>::value_type;
3322
3323 _GLIBCXX_SIMD_INTRINSIC friend const _M&
3324 __get_mask(const const_where_expression& __x)
3325 { return __x._M_k; }
3326
3327 _GLIBCXX_SIMD_INTRINSIC friend const _Tp&
3328 __get_lvalue(const const_where_expression& __x)
3329 { return __x._M_value; }
3330
3331 const bool _M_k;
3332 _Tp& _M_value;
3333
3334 public:
3335 const_where_expression(const const_where_expression&) = delete;
3336 const_where_expression& operator=(const const_where_expression&) = delete;
3337
3338 _GLIBCXX_SIMD_INTRINSIC const_where_expression(const bool __kk, const _Tp& dd)
3339 : _M_k(__kk), _M_value(const_cast<_Tp&>(dd)) {}
3340
3341 _GLIBCXX_SIMD_INTRINSIC _V operator-() const&&
3342 { return _M_k ? -_M_value : _M_value; }
3343
3344 template <typename _Up, typename _Flags>
3345 [[nodiscard]] _GLIBCXX_SIMD_INTRINSIC _V
3346 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3347 { return _M_k ? static_cast<_V>(__mem[0]) : _M_value; }
3348
3349 template <typename _Up, typename _Flags>
3350 _GLIBCXX_SIMD_INTRINSIC void
3351 copy_to(_LoadStorePtr<_Up, value_type>* __mem, _Flags) const&&
3352 {
3353 if (_M_k)
3354 __mem[0] = _M_value;
3355 }
3356 };
3357
3358// where_expression<M, T> {{{2
3359template <typename _M, typename _Tp>
3360 class where_expression : public const_where_expression<_M, _Tp>
3361 {
3362 using _Impl = typename const_where_expression<_M, _Tp>::_Impl;
3363
3364 static_assert(!is_const<_Tp>::value,
3365 "where_expression may only be instantiated with __a non-const "
3366 "_Tp parameter");
3367
3368 using typename const_where_expression<_M, _Tp>::value_type;
3369 using const_where_expression<_M, _Tp>::_M_k;
3370 using const_where_expression<_M, _Tp>::_M_value;
3371
3372 static_assert(
3373 is_same<typename _M::abi_type, typename _Tp::abi_type>::value, "");
3374 static_assert(_M::size() == _Tp::size(), "");
3375
3376 _GLIBCXX_SIMD_INTRINSIC friend _Tp& __get_lvalue(where_expression& __x)
3377 { return __x._M_value; }
3378
3379 public:
3380 where_expression(const where_expression&) = delete;
3381 where_expression& operator=(const where_expression&) = delete;
3382
3383 _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
3384 : const_where_expression<_M, _Tp>(__kk, dd) {}
3385
3386 template <typename _Up>
3387 _GLIBCXX_SIMD_INTRINSIC void operator=(_Up&& __x) &&
3388 {
3389 _Impl::_S_masked_assign(__data(_M_k), __data(_M_value),
3390 __to_value_type_or_member_type<_Tp>(
3391 static_cast<_Up&&>(__x)));
3392 }
3393
3394#define _GLIBCXX_SIMD_OP_(__op, __name) \
3395 template <typename _Up> \
3396 _GLIBCXX_SIMD_INTRINSIC void operator __op##=(_Up&& __x)&& \
3397 { \
3398 _Impl::template _S_masked_cassign( \
3399 __data(_M_k), __data(_M_value), \
3400 __to_value_type_or_member_type<_Tp>(static_cast<_Up&&>(__x)), \
3401 [](auto __impl, auto __lhs, auto __rhs) constexpr { \
3402 return __impl.__name(__lhs, __rhs); \
3403 }); \
3404 } \
3405 static_assert(true)
3406 _GLIBCXX_SIMD_OP_(+, _S_plus);
3407 _GLIBCXX_SIMD_OP_(-, _S_minus);
3408 _GLIBCXX_SIMD_OP_(*, _S_multiplies);
3409 _GLIBCXX_SIMD_OP_(/, _S_divides);
3410 _GLIBCXX_SIMD_OP_(%, _S_modulus);
3411 _GLIBCXX_SIMD_OP_(&, _S_bit_and);
3412 _GLIBCXX_SIMD_OP_(|, _S_bit_or);
3413 _GLIBCXX_SIMD_OP_(^, _S_bit_xor);
3414 _GLIBCXX_SIMD_OP_(<<, _S_shift_left);
3415 _GLIBCXX_SIMD_OP_(>>, _S_shift_right);
3416#undef _GLIBCXX_SIMD_OP_
3417
3418 _GLIBCXX_SIMD_INTRINSIC void operator++() &&
3419 {
3420 __data(_M_value)
3421 = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
3422 __data(_M_value));
3423 }
3424
3425 _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
3426 {
3427 __data(_M_value)
3428 = _Impl::template _S_masked_unary<__increment>(__data(_M_k),
3429 __data(_M_value));
3430 }
3431
3432 _GLIBCXX_SIMD_INTRINSIC void operator--() &&
3433 {
3434 __data(_M_value)
3435 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
3436 __data(_M_value));
3437 }
3438
3439 _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
3440 {
3441 __data(_M_value)
3442 = _Impl::template _S_masked_unary<__decrement>(__data(_M_k),
3443 __data(_M_value));
3444 }
3445
3446 // intentionally hides const_where_expression::copy_from
3447 template <typename _Up, typename _Flags>
3448 _GLIBCXX_SIMD_INTRINSIC void
3449 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
3450 {
3451 __data(_M_value)
3452 = _Impl::_S_masked_load(__data(_M_value), __data(_M_k),
3453 _Flags::template _S_apply<_Tp>(__mem));
3454 }
3455 };
3456
3457// where_expression<bool, T> {{{2
3458template <typename _Tp>
3459 class where_expression<bool, _Tp> : public const_where_expression<bool, _Tp>
3460 {
3461 using _M = bool;
3462 using typename const_where_expression<_M, _Tp>::value_type;
3463 using const_where_expression<_M, _Tp>::_M_k;
3464 using const_where_expression<_M, _Tp>::_M_value;
3465
3466 public:
3467 where_expression(const where_expression&) = delete;
3468 where_expression& operator=(const where_expression&) = delete;
3469
3470 _GLIBCXX_SIMD_INTRINSIC where_expression(const _M& __kk, _Tp& dd)
3471 : const_where_expression<_M, _Tp>(__kk, dd) {}
3472
3473#define _GLIBCXX_SIMD_OP_(__op) \
3474 template <typename _Up> \
3475 _GLIBCXX_SIMD_INTRINSIC void operator __op(_Up&& __x)&& \
3476 { if (_M_k) _M_value __op static_cast<_Up&&>(__x); }
3477
3478 _GLIBCXX_SIMD_OP_(=)
3479 _GLIBCXX_SIMD_OP_(+=)
3480 _GLIBCXX_SIMD_OP_(-=)
3481 _GLIBCXX_SIMD_OP_(*=)
3482 _GLIBCXX_SIMD_OP_(/=)
3483 _GLIBCXX_SIMD_OP_(%=)
3484 _GLIBCXX_SIMD_OP_(&=)
3485 _GLIBCXX_SIMD_OP_(|=)
3486 _GLIBCXX_SIMD_OP_(^=)
3487 _GLIBCXX_SIMD_OP_(<<=)
3488 _GLIBCXX_SIMD_OP_(>>=)
3489 #undef _GLIBCXX_SIMD_OP_
3490
3491 _GLIBCXX_SIMD_INTRINSIC void operator++() &&
3492 { if (_M_k) ++_M_value; }
3493
3494 _GLIBCXX_SIMD_INTRINSIC void operator++(int) &&
3495 { if (_M_k) ++_M_value; }
3496
3497 _GLIBCXX_SIMD_INTRINSIC void operator--() &&
3498 { if (_M_k) --_M_value; }
3499
3500 _GLIBCXX_SIMD_INTRINSIC void operator--(int) &&
3501 { if (_M_k) --_M_value; }
3502
3503 // intentionally hides const_where_expression::copy_from
3504 template <typename _Up, typename _Flags>
3505 _GLIBCXX_SIMD_INTRINSIC void
3506 copy_from(const _LoadStorePtr<_Up, value_type>* __mem, _Flags) &&
3507 { if (_M_k) _M_value = __mem[0]; }
3508 };
3509
3510// where {{{1
3511template <typename _Tp, typename _Ap>
3512 _GLIBCXX_SIMD_INTRINSIC where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3513 where(const typename simd<_Tp, _Ap>::mask_type& __k, simd<_Tp, _Ap>& __value)
3514 { return {__k, __value}; }
3515
3516template <typename _Tp, typename _Ap>
3517 _GLIBCXX_SIMD_INTRINSIC
3518 const_where_expression<simd_mask<_Tp, _Ap>, simd<_Tp, _Ap>>
3519 where(const typename simd<_Tp, _Ap>::mask_type& __k,
3520 const simd<_Tp, _Ap>& __value)
3521 { return {__k, __value}; }
3522
3523template <typename _Tp, typename _Ap>
3524 _GLIBCXX_SIMD_INTRINSIC
3525 where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3526 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
3527 simd_mask<_Tp, _Ap>& __value)
3528 { return {__k, __value}; }
3529
3530template <typename _Tp, typename _Ap>
3531 _GLIBCXX_SIMD_INTRINSIC
3532 const_where_expression<simd_mask<_Tp, _Ap>, simd_mask<_Tp, _Ap>>
3533 where(const remove_const_t<simd_mask<_Tp, _Ap>>& __k,
3534 const simd_mask<_Tp, _Ap>& __value)
3535 { return {__k, __value}; }
3536
3537template <typename _Tp>
3538 _GLIBCXX_SIMD_INTRINSIC where_expression<bool, _Tp>
3539 where(_ExactBool __k, _Tp& __value)
3540 { return {__k, __value}; }
3541
3542template <typename _Tp>
3543 _GLIBCXX_SIMD_INTRINSIC const_where_expression<bool, _Tp>
3544 where(_ExactBool __k, const _Tp& __value)
3545 { return {__k, __value}; }
3546
3547 template <typename _Tp, typename _Ap>
3548 void where(bool __k, simd<_Tp, _Ap>& __value) = delete;
3549
3550 template <typename _Tp, typename _Ap>
3551 void where(bool __k, const simd<_Tp, _Ap>& __value) = delete;
3552
3553// proposed mask iterations {{{1
3554namespace __proposed {
3555template <size_t _Np>
3556 class where_range
3557 {
3558 const bitset<_Np> __bits;
3559
3560 public:
3561 where_range(bitset<_Np> __b) : __bits(__b) {}
3562
3563 class iterator
3564 {
3565 size_t __mask;
3566 size_t __bit;
3567
3568 _GLIBCXX_SIMD_INTRINSIC void __next_bit()
3569 { __bit = __builtin_ctzl(__mask); }
3570
3571 _GLIBCXX_SIMD_INTRINSIC void __reset_lsb()
3572 {
3573 // 01100100 - 1 = 01100011
3574 __mask &= (__mask - 1);
3575 // __asm__("btr %1,%0" : "+r"(__mask) : "r"(__bit));
3576 }
3577
3578 public:
3579 iterator(decltype(__mask) __m) : __mask(__m) { __next_bit(); }
3580 iterator(const iterator&) = default;
3581 iterator(iterator&&) = default;
3582
3583 _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator->() const
3584 { return __bit; }
3585
3586 _GLIBCXX_SIMD_ALWAYS_INLINE size_t operator*() const
3587 { return __bit; }
3588
3589 _GLIBCXX_SIMD_ALWAYS_INLINE iterator& operator++()
3590 {
3591 __reset_lsb();
3592 __next_bit();
3593 return *this;
3594 }
3595
3596 _GLIBCXX_SIMD_ALWAYS_INLINE iterator operator++(int)
3597 {
3598 iterator __tmp = *this;
3599 __reset_lsb();
3600 __next_bit();
3601 return __tmp;
3602 }
3603
3604 _GLIBCXX_SIMD_ALWAYS_INLINE bool operator==(const iterator& __rhs) const
3605 { return __mask == __rhs.__mask; }
3606
3607 _GLIBCXX_SIMD_ALWAYS_INLINE bool operator!=(const iterator& __rhs) const
3608 { return __mask != __rhs.__mask; }
3609 };
3610
3611 iterator begin() const
3612 { return __bits.to_ullong(); }
3613
3614 iterator end() const
3615 { return 0; }
3616 };
3617
3618template <typename _Tp, typename _Ap>
3619 where_range<simd_size_v<_Tp, _Ap>>
3620 where(const simd_mask<_Tp, _Ap>& __k)
3621 { return __k.__to_bitset(); }
3622
3623} // namespace __proposed
3624
3625// }}}1
3626// reductions [simd.reductions] {{{1
3627template <typename _Tp, typename _Abi, typename _BinaryOperation = plus<>>
3628 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3629 reduce(const simd<_Tp, _Abi>& __v,
3630 _BinaryOperation __binary_op = _BinaryOperation())
3631 { return _Abi::_SimdImpl::_S_reduce(__v, __binary_op); }
3632
3633template <typename _M, typename _V, typename _BinaryOperation = plus<>>
3634 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3635 reduce(const const_where_expression<_M, _V>& __x,
3636 typename _V::value_type __identity_element,
3637 _BinaryOperation __binary_op)
3638 {
3639 if (__builtin_expect(none_of(__get_mask(__x)), false))
3640 return __identity_element;
3641
3642 _V __tmp = __identity_element;
3643 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3644 __data(__get_lvalue(__x)));
3645 return reduce(__tmp, __binary_op);
3646 }
3647
3648template <typename _M, typename _V>
3649 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3650 reduce(const const_where_expression<_M, _V>& __x, plus<> __binary_op = {})
3651 { return reduce(__x, 0, __binary_op); }
3652
3653template <typename _M, typename _V>
3654 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3655 reduce(const const_where_expression<_M, _V>& __x, multiplies<> __binary_op)
3656 { return reduce(__x, 1, __binary_op); }
3657
3658template <typename _M, typename _V>
3659 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3660 reduce(const const_where_expression<_M, _V>& __x, bit_and<> __binary_op)
3661 { return reduce(__x, ~typename _V::value_type(), __binary_op); }
3662
3663template <typename _M, typename _V>
3664 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3665 reduce(const const_where_expression<_M, _V>& __x, bit_or<> __binary_op)
3666 { return reduce(__x, 0, __binary_op); }
3667
3668template <typename _M, typename _V>
3669 _GLIBCXX_SIMD_INTRINSIC typename _V::value_type
3670 reduce(const const_where_expression<_M, _V>& __x, bit_xor<> __binary_op)
3671 { return reduce(__x, 0, __binary_op); }
3672
3673template <typename _Tp, typename _Abi>
3674 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3675 hmin(const simd<_Tp, _Abi>& __v) noexcept
3676 {
3677 return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Minimum());
3678 }
3679
3680template <typename _Tp, typename _Abi>
3681 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR _Tp
3682 hmax(const simd<_Tp, _Abi>& __v) noexcept
3683 {
3684 return _Abi::_SimdImpl::_S_reduce(__v, __detail::_Maximum());
3685 }
3686
3687template <typename _M, typename _V>
3688 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3689 typename _V::value_type
3690 hmin(const const_where_expression<_M, _V>& __x) noexcept
3691 {
3692 using _Tp = typename _V::value_type;
3693 constexpr _Tp __id_elem =
3694#ifdef __FINITE_MATH_ONLY__
3695 __finite_max_v<_Tp>;
3696#else
3697 __value_or<__infinity, _Tp>(__finite_max_v<_Tp>);
3698#endif
3699 _V __tmp = __id_elem;
3700 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3701 __data(__get_lvalue(__x)));
3702 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Minimum());
3703 }
3704
3705template <typename _M, typename _V>
3706 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3707 typename _V::value_type
3708 hmax(const const_where_expression<_M, _V>& __x) noexcept
3709 {
3710 using _Tp = typename _V::value_type;
3711 constexpr _Tp __id_elem =
3712#ifdef __FINITE_MATH_ONLY__
3713 __finite_min_v<_Tp>;
3714#else
3715 [] {
3716 if constexpr (__value_exists_v<__infinity, _Tp>)
3717 return -__infinity_v<_Tp>;
3718 else
3719 return __finite_min_v<_Tp>;
3720 }();
3721#endif
3722 _V __tmp = __id_elem;
3723 _V::_Impl::_S_masked_assign(__data(__get_mask(__x)), __data(__tmp),
3724 __data(__get_lvalue(__x)));
3725 return _V::abi_type::_SimdImpl::_S_reduce(__tmp, __detail::_Maximum());
3726 }
3727
3728// }}}1
3729// algorithms [simd.alg] {{{
3730template <typename _Tp, typename _Ap>
3731 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3732 min(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3733 { return {__private_init, _Ap::_SimdImpl::_S_min(__data(__a), __data(__b))}; }
3734
3735template <typename _Tp, typename _Ap>
3736 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3737 max(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3738 { return {__private_init, _Ap::_SimdImpl::_S_max(__data(__a), __data(__b))}; }
3739
3740template <typename _Tp, typename _Ap>
3741 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
3742 pair<simd<_Tp, _Ap>, simd<_Tp, _Ap>>
3743 minmax(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
3744 {
3745 const auto pair_of_members
3746 = _Ap::_SimdImpl::_S_minmax(__data(__a), __data(__b));
3747 return {simd<_Tp, _Ap>(__private_init, pair_of_members.first),
3748 simd<_Tp, _Ap>(__private_init, pair_of_members.second)};
3749 }
3750
3751template <typename _Tp, typename _Ap>
3752 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
3753 clamp(const simd<_Tp, _Ap>& __v, const simd<_Tp, _Ap>& __lo,
3754 const simd<_Tp, _Ap>& __hi)
3755 {
3756 using _Impl = typename _Ap::_SimdImpl;
3757 return {__private_init,
3758 _Impl::_S_min(__data(__hi),
3759 _Impl::_S_max(__data(__lo), __data(__v)))};
3760 }
3761
3762// }}}
3763
3764template <size_t... _Sizes, typename _Tp, typename _Ap,
3765 typename = enable_if_t<((_Sizes + ...) == simd<_Tp, _Ap>::size())>>
3766 inline tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
3767 split(const simd<_Tp, _Ap>&);
3768
3769// __extract_part {{{
3770template <int _Index, int _Total, int _Combine = 1, typename _Tp, size_t _Np>
3771 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_CONST
3772 _SimdWrapper<_Tp, _Np / _Total * _Combine>
3773 __extract_part(const _SimdWrapper<_Tp, _Np> __x);
3774
3775template <int Index, int Parts, int _Combine = 1, typename _Tp, typename _A0,
3776 typename... _As>
3777 _GLIBCXX_SIMD_INTRINSIC auto
3778 __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x);
3779
3780// }}}
3781// _SizeList {{{
3782template <size_t _V0, size_t... _Values>
3783 struct _SizeList
3784 {
3785 template <size_t _I>
3786 static constexpr size_t _S_at(_SizeConstant<_I> = {})
3787 {
3788 if constexpr (_I == 0)
3789 return _V0;
3790 else
3791 return _SizeList<_Values...>::template _S_at<_I - 1>();
3792 }
3793
3794 template <size_t _I>
3795 static constexpr auto _S_before(_SizeConstant<_I> = {})
3796 {
3797 if constexpr (_I == 0)
3798 return _SizeConstant<0>();
3799 else
3800 return _SizeConstant<
3801 _V0 + _SizeList<_Values...>::template _S_before<_I - 1>()>();
3802 }
3803
3804 template <size_t _Np>
3805 static constexpr auto _S_pop_front(_SizeConstant<_Np> = {})
3806 {
3807 if constexpr (_Np == 0)
3808 return _SizeList();
3809 else
3810 return _SizeList<_Values...>::template _S_pop_front<_Np - 1>();
3811 }
3812 };
3813
3814// }}}
3815// __extract_center {{{
3816template <typename _Tp, size_t _Np>
3817 _GLIBCXX_SIMD_INTRINSIC _SimdWrapper<_Tp, _Np / 2>
3818 __extract_center(_SimdWrapper<_Tp, _Np> __x)
3819 {
3820 static_assert(_Np >= 4);
3821 static_assert(_Np % 4 == 0); // x0 - x1 - x2 - x3 -> return {x1, x2}
3822#if _GLIBCXX_SIMD_X86INTRIN // {{{
3823 if constexpr (__have_avx512f && sizeof(_Tp) * _Np == 64)
3824 {
3825 const auto __intrin = __to_intrin(__x);
3826 if constexpr (is_integral_v<_Tp>)
3827 return __vector_bitcast<_Tp>(_mm512_castsi512_si256(
3828 _mm512_shuffle_i32x4(__intrin, __intrin,
3829 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3830 else if constexpr (sizeof(_Tp) == 4)
3831 return __vector_bitcast<_Tp>(_mm512_castps512_ps256(
3832 _mm512_shuffle_f32x4(__intrin, __intrin,
3833 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3834 else if constexpr (sizeof(_Tp) == 8)
3835 return __vector_bitcast<_Tp>(_mm512_castpd512_pd256(
3836 _mm512_shuffle_f64x2(__intrin, __intrin,
3837 1 + 2 * 0x4 + 2 * 0x10 + 3 * 0x40)));
3838 else
3839 __assert_unreachable<_Tp>();
3840 }
3841 else if constexpr (sizeof(_Tp) * _Np == 32 && is_floating_point_v<_Tp>)
3842 return __vector_bitcast<_Tp>(
3843 _mm_shuffle_pd(__lo128(__vector_bitcast<double>(__x)),
3844 __hi128(__vector_bitcast<double>(__x)), 1));
3845 else if constexpr (sizeof(__x) == 32 && sizeof(_Tp) * _Np <= 32)
3846 return __vector_bitcast<_Tp>(
3847 _mm_alignr_epi8(__hi128(__vector_bitcast<_LLong>(__x)),
3848 __lo128(__vector_bitcast<_LLong>(__x)),
3849 sizeof(_Tp) * _Np / 4));
3850 else
3851#endif // _GLIBCXX_SIMD_X86INTRIN }}}
3852 {
3853 __vector_type_t<_Tp, _Np / 2> __r;
3854 __builtin_memcpy(&__r,
3855 reinterpret_cast<const char*>(&__x)
3856 + sizeof(_Tp) * _Np / 4,
3857 sizeof(_Tp) * _Np / 2);
3858 return __r;
3859 }
3860 }
3861
3862template <typename _Tp, typename _A0, typename... _As>
3863 _GLIBCXX_SIMD_INTRINSIC
3864 _SimdWrapper<_Tp, _SimdTuple<_Tp, _A0, _As...>::_S_size() / 2>
3865 __extract_center(const _SimdTuple<_Tp, _A0, _As...>& __x)
3866 {
3867 if constexpr (sizeof...(_As) == 0)
3868 return __extract_center(__x.first);
3869 else
3870 return __extract_part<1, 4, 2>(__x);
3871 }
3872
3873// }}}
3874// __split_wrapper {{{
3875template <size_t... _Sizes, typename _Tp, typename... _As>
3876 auto
3877 __split_wrapper(_SizeList<_Sizes...>, const _SimdTuple<_Tp, _As...>& __x)
3878 {
3879 return split<_Sizes...>(
3880 fixed_size_simd<_Tp, _SimdTuple<_Tp, _As...>::_S_size()>(__private_init,
3881 __x));
3882 }
3883
3884// }}}
3885
3886// split<simd>(simd) {{{
3887template <typename _V, typename _Ap,
3888 size_t Parts = simd_size_v<typename _V::value_type, _Ap> / _V::size()>
3889 enable_if_t<simd_size_v<typename _V::value_type, _Ap> == Parts * _V::size()
3890 && is_simd_v<_V>, array<_V, Parts>>
3891 split(const simd<typename _V::value_type, _Ap>& __x)
3892 {
3893 using _Tp = typename _V::value_type;
3894 if constexpr (Parts == 1)
3895 {
3896 return {simd_cast<_V>(__x)};
3897 }
3898 else if (__x._M_is_constprop())
3899 {
3900 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3901 auto __i) constexpr {
3902 return _V([&](auto __j) constexpr {
3903 return __x[__i * _V::size() + __j];
3904 });
3905 });
3906 }
3907 else if constexpr (
3908 __is_fixed_size_abi_v<_Ap>
3909 && (is_same_v<typename _V::abi_type, simd_abi::scalar>
3910 || (__is_fixed_size_abi_v<typename _V::abi_type>
3911 && sizeof(_V) == sizeof(_Tp) * _V::size() // _V doesn't have padding
3912 )))
3913 {
3914 // fixed_size -> fixed_size (w/o padding) or scalar
3915#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
3916 const __may_alias<_Tp>* const __element_ptr
3917 = reinterpret_cast<const __may_alias<_Tp>*>(&__data(__x));
3918 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3919 auto __i) constexpr {
3920 return _V(__element_ptr + __i * _V::size(), vector_aligned);
3921 });
3922#else
3923 const auto& __xx = __data(__x);
3924 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3925 auto __i) constexpr {
3926 [[maybe_unused]] constexpr size_t __offset
3927 = decltype(__i)::value * _V::size();
3928 return _V([&](auto __j) constexpr {
3929 constexpr _SizeConstant<__j + __offset> __k;
3930 return __xx[__k];
3931 });
3932 });
3933#endif
3934 }
3935 else if constexpr (is_same_v<typename _V::abi_type, simd_abi::scalar>)
3936 {
3937 // normally memcpy should work here as well
3938 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3939 auto __i) constexpr { return __x[__i]; });
3940 }
3941 else
3942 {
3943 return __generate_from_n_evaluations<Parts, array<_V, Parts>>([&](
3944 auto __i) constexpr {
3945 if constexpr (__is_fixed_size_abi_v<typename _V::abi_type>)
3946 return _V([&](auto __j) constexpr {
3947 return __x[__i * _V::size() + __j];
3948 });
3949 else
3950 return _V(__private_init,
3951 __extract_part<decltype(__i)::value, Parts>(__data(__x)));
3952 });
3953 }
3954 }
3955
3956// }}}
3957// split<simd_mask>(simd_mask) {{{
3958template <typename _V, typename _Ap,
3959 size_t _Parts
3960 = simd_size_v<typename _V::simd_type::value_type, _Ap> / _V::size()>
3961 enable_if_t<is_simd_mask_v<_V> && simd_size_v<typename
3962 _V::simd_type::value_type, _Ap> == _Parts * _V::size(), array<_V, _Parts>>
3963 split(const simd_mask<typename _V::simd_type::value_type, _Ap>& __x)
3964 {
3965 if constexpr (is_same_v<_Ap, typename _V::abi_type>)
3966 return {__x};
3967 else if constexpr (_Parts == 1)
3968 return {__proposed::static_simd_cast<_V>(__x)};
3969 else if constexpr (_Parts == 2 && __is_sse_abi<typename _V::abi_type>()
3970 && __is_avx_abi<_Ap>())
3971 return {_V(__private_init, __lo128(__data(__x))),
3972 _V(__private_init, __hi128(__data(__x)))};
3973 else if constexpr (_V::size() <= __CHAR_BIT__ * sizeof(_ULLong))
3974 {
3975 const bitset __bits = __x.__to_bitset();
3976 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
3977 auto __i) constexpr {
3978 constexpr size_t __offset = __i * _V::size();
3979 return _V(__bitset_init, (__bits >> __offset).to_ullong());
3980 });
3981 }
3982 else
3983 {
3984 return __generate_from_n_evaluations<_Parts, array<_V, _Parts>>([&](
3985 auto __i) constexpr {
3986 constexpr size_t __offset = __i * _V::size();
3987 return _V(
3988 __private_init, [&](auto __j) constexpr {
3989 return __x[__j + __offset];
3990 });
3991 });
3992 }
3993 }
3994
3995// }}}
3996// split<_Sizes...>(simd) {{{
3997template <size_t... _Sizes, typename _Tp, typename _Ap, typename>
3998 _GLIBCXX_SIMD_ALWAYS_INLINE
3999 tuple<simd<_Tp, simd_abi::deduce_t<_Tp, _Sizes>>...>
4000 split(const simd<_Tp, _Ap>& __x)
4001 {
4002 using _SL = _SizeList<_Sizes...>;
4003 using _Tuple = tuple<__deduced_simd<_Tp, _Sizes>...>;
4004 constexpr size_t _Np = simd_size_v<_Tp, _Ap>;
4005 constexpr size_t _N0 = _SL::template _S_at<0>();
4006 using _V = __deduced_simd<_Tp, _N0>;
4007
4008 if (__x._M_is_constprop())
4009 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4010 auto __i) constexpr {
4011 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4012 constexpr size_t __offset = _SL::_S_before(__i);
4013 return _Vi([&](auto __j) constexpr { return __x[__offset + __j]; });
4014 });
4015 else if constexpr (_Np == _N0)
4016 {
4017 static_assert(sizeof...(_Sizes) == 1);
4018 return {simd_cast<_V>(__x)};
4019 }
4020 else if constexpr // split from fixed_size, such that __x::first.size == _N0
4021 (__is_fixed_size_abi_v<
4022 _Ap> && __fixed_size_storage_t<_Tp, _Np>::_S_first_size == _N0)
4023 {
4024 static_assert(
4025 !__is_fixed_size_abi_v<typename _V::abi_type>,
4026 "How can <_Tp, _Np> be __a single _SimdTuple entry but __a "
4027 "fixed_size_simd "
4028 "when deduced?");
4029 // extract first and recurse (__split_wrapper is needed to deduce a new
4030 // _Sizes pack)
4031 return tuple_cat(make_tuple(_V(__private_init, __data(__x).first)),
4032 __split_wrapper(_SL::template _S_pop_front<1>(),
4033 __data(__x).second));
4034 }
4035 else if constexpr ((!is_same_v<simd_abi::scalar,
4036 simd_abi::deduce_t<_Tp, _Sizes>> && ...)
4037 && (!__is_fixed_size_abi_v<
4038 simd_abi::deduce_t<_Tp, _Sizes>> && ...))
4039 {
4040 if constexpr (((_Sizes * 2 == _Np) && ...))
4041 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4042 {__private_init, __extract_part<1, 2>(__data(__x))}};
4043 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4044 _SizeList<_Np / 3, _Np / 3, _Np / 3>>)
4045 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4046 {__private_init, __extract_part<1, 3>(__data(__x))},
4047 {__private_init, __extract_part<2, 3>(__data(__x))}};
4048 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4049 _SizeList<2 * _Np / 3, _Np / 3>>)
4050 return {{__private_init, __extract_part<0, 3, 2>(__data(__x))},
4051 {__private_init, __extract_part<2, 3>(__data(__x))}};
4052 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4053 _SizeList<_Np / 3, 2 * _Np / 3>>)
4054 return {{__private_init, __extract_part<0, 3>(__data(__x))},
4055 {__private_init, __extract_part<1, 3, 2>(__data(__x))}};
4056 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4057 _SizeList<_Np / 2, _Np / 4, _Np / 4>>)
4058 return {{__private_init, __extract_part<0, 2>(__data(__x))},
4059 {__private_init, __extract_part<2, 4>(__data(__x))},
4060 {__private_init, __extract_part<3, 4>(__data(__x))}};
4061 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4062 _SizeList<_Np / 4, _Np / 4, _Np / 2>>)
4063 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4064 {__private_init, __extract_part<1, 4>(__data(__x))},
4065 {__private_init, __extract_part<1, 2>(__data(__x))}};
4066 else if constexpr (is_same_v<_SizeList<_Sizes...>,
4067 _SizeList<_Np / 4, _Np / 2, _Np / 4>>)
4068 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4069 {__private_init, __extract_center(__data(__x))},
4070 {__private_init, __extract_part<3, 4>(__data(__x))}};
4071 else if constexpr (((_Sizes * 4 == _Np) && ...))
4072 return {{__private_init, __extract_part<0, 4>(__data(__x))},
4073 {__private_init, __extract_part<1, 4>(__data(__x))},
4074 {__private_init, __extract_part<2, 4>(__data(__x))},
4075 {__private_init, __extract_part<3, 4>(__data(__x))}};
4076 // else fall through
4077 }
4078#ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
4079 const __may_alias<_Tp>* const __element_ptr
4080 = reinterpret_cast<const __may_alias<_Tp>*>(&__x);
4081 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4082 auto __i) constexpr {
4083 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4084 constexpr size_t __offset = _SL::_S_before(__i);
4085 constexpr size_t __base_align = alignof(simd<_Tp, _Ap>);
4086 constexpr size_t __a
4087 = __base_align - ((__offset * sizeof(_Tp)) % __base_align);
4088 constexpr size_t __b = ((__a - 1) & __a) ^ __a;
4089 constexpr size_t __alignment = __b == 0 ? __a : __b;
4090 return _Vi(__element_ptr + __offset, overaligned<__alignment>);
4091 });
4092#else
4093 return __generate_from_n_evaluations<sizeof...(_Sizes), _Tuple>([&](
4094 auto __i) constexpr {
4095 using _Vi = __deduced_simd<_Tp, _SL::_S_at(__i)>;
4096 const auto& __xx = __data(__x);
4097 using _Offset = decltype(_SL::_S_before(__i));
4098 return _Vi([&](auto __j) constexpr {
4099 constexpr _SizeConstant<_Offset::value + __j> __k;
4100 return __xx[__k];
4101 });
4102 });
4103#endif
4104 }
4105
4106// }}}
4107
4108// __subscript_in_pack {{{
4109template <size_t _I, typename _Tp, typename _Ap, typename... _As>
4110 _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
4111 __subscript_in_pack(const simd<_Tp, _Ap>& __x, const simd<_Tp, _As>&... __xs)
4112 {
4113 if constexpr (_I < simd_size_v<_Tp, _Ap>)
4114 return __x[_I];
4115 else
4116 return __subscript_in_pack<_I - simd_size_v<_Tp, _Ap>>(__xs...);
4117 }
4118
4119// }}}
4120// __store_pack_of_simd {{{
4121template <typename _Tp, typename _A0, typename... _As>
4122 _GLIBCXX_SIMD_INTRINSIC void
4123 __store_pack_of_simd(char* __mem, const simd<_Tp, _A0>& __x0,
4124 const simd<_Tp, _As>&... __xs)
4125 {
4126 constexpr size_t __n_bytes = sizeof(_Tp) * simd_size_v<_Tp, _A0>;
4127 __builtin_memcpy(__mem, &__data(__x0), __n_bytes);
4128 if constexpr (sizeof...(__xs) > 0)
4129 __store_pack_of_simd(__mem + __n_bytes, __xs...);
4130 }
4131
4132// }}}
4133// concat(simd...) {{{
4134template <typename _Tp, typename... _As, typename = __detail::__odr_helper>
4135 inline _GLIBCXX_SIMD_CONSTEXPR
4136 simd<_Tp, simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>
4137 concat(const simd<_Tp, _As>&... __xs)
4138 {
4139 using _Rp = __deduced_simd<_Tp, (simd_size_v<_Tp, _As> + ...)>;
4140 if constexpr (sizeof...(__xs) == 1)
4141 return simd_cast<_Rp>(__xs...);
4142 else if ((... && __xs._M_is_constprop()))
4143 return simd<_Tp,
4144 simd_abi::deduce_t<_Tp, (simd_size_v<_Tp, _As> + ...)>>([&](
4145 auto __i) constexpr { return __subscript_in_pack<__i>(__xs...); });
4146 else
4147 {
4148 _Rp __r{};
4149 __store_pack_of_simd(reinterpret_cast<char*>(&__data(__r)), __xs...);
4150 return __r;
4151 }
4152 }
4153
4154// }}}
4155// concat(array<simd>) {{{
4156template <typename _Tp, typename _Abi, size_t _Np>
4157 _GLIBCXX_SIMD_ALWAYS_INLINE
4158 _GLIBCXX_SIMD_CONSTEXPR __deduced_simd<_Tp, simd_size_v<_Tp, _Abi> * _Np>
4159 concat(const array<simd<_Tp, _Abi>, _Np>& __x)
4160 {
4161 return __call_with_subscripts<_Np>(__x, [](const auto&... __xs) {
4162 return concat(__xs...);
4163 });
4164 }
4165
4166// }}}
4167
4168/// @cond undocumented
4169// _SmartReference {{{
4170template <typename _Up, typename _Accessor = _Up,
4171 typename _ValueType = typename _Up::value_type>
4172 class _SmartReference
4173 {
4174 friend _Accessor;
4175 int _M_index;
4176 _Up& _M_obj;
4177
4178 _GLIBCXX_SIMD_INTRINSIC constexpr _ValueType _M_read() const noexcept
4179 {
4180 if constexpr (is_arithmetic_v<_Up>)
4181 return _M_obj;
4182 else
4183 return _M_obj[_M_index];
4184 }
4185
4186 template <typename _Tp>
4187 _GLIBCXX_SIMD_INTRINSIC constexpr void _M_write(_Tp&& __x) const
4188 { _Accessor::_S_set(_M_obj, _M_index, static_cast<_Tp&&>(__x)); }
4189
4190 public:
4191 _GLIBCXX_SIMD_INTRINSIC constexpr
4192 _SmartReference(_Up& __o, int __i) noexcept
4193 : _M_index(__i), _M_obj(__o) {}
4194
4195 using value_type = _ValueType;
4196
4197 _GLIBCXX_SIMD_INTRINSIC _SmartReference(const _SmartReference&) = delete;
4198
4199 _GLIBCXX_SIMD_INTRINSIC constexpr operator value_type() const noexcept
4200 { return _M_read(); }
4201
4202 template <typename _Tp,
4203 typename
4204 = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, value_type>>
4205 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator=(_Tp&& __x) &&
4206 {
4207 _M_write(static_cast<_Tp&&>(__x));
4208 return {_M_obj, _M_index};
4209 }
4210
4211#define _GLIBCXX_SIMD_OP_(__op) \
4212 template <typename _Tp, \
4213 typename _TT \
4214 = decltype(declval<value_type>() __op declval<_Tp>()), \
4215 typename = _ValuePreservingOrInt<__remove_cvref_t<_Tp>, _TT>, \
4216 typename = _ValuePreservingOrInt<_TT, value_type>> \
4217 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference \
4218 operator __op##=(_Tp&& __x) && \
4219 { \
4220 const value_type& __lhs = _M_read(); \
4221 _M_write(__lhs __op __x); \
4222 return {_M_obj, _M_index}; \
4223 }
4224 _GLIBCXX_SIMD_ALL_ARITHMETICS(_GLIBCXX_SIMD_OP_);
4225 _GLIBCXX_SIMD_ALL_SHIFTS(_GLIBCXX_SIMD_OP_);
4226 _GLIBCXX_SIMD_ALL_BINARY(_GLIBCXX_SIMD_OP_);
4227#undef _GLIBCXX_SIMD_OP_
4228
4229 template <typename _Tp = void,
4230 typename
4231 = decltype(++declval<conditional_t<true, value_type, _Tp>&>())>
4232 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator++() &&
4233 {
4234 value_type __x = _M_read();
4235 _M_write(++__x);
4236 return {_M_obj, _M_index};
4237 }
4238
4239 template <typename _Tp = void,
4240 typename
4241 = decltype(declval<conditional_t<true, value_type, _Tp>&>()++)>
4242 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator++(int) &&
4243 {
4244 const value_type __r = _M_read();
4245 value_type __x = __r;
4246 _M_write(++__x);
4247 return __r;
4248 }
4249
4250 template <typename _Tp = void,
4251 typename
4252 = decltype(--declval<conditional_t<true, value_type, _Tp>&>())>
4253 _GLIBCXX_SIMD_INTRINSIC constexpr _SmartReference operator--() &&
4254 {
4255 value_type __x = _M_read();
4256 _M_write(--__x);
4257 return {_M_obj, _M_index};
4258 }
4259
4260 template <typename _Tp = void,
4261 typename
4262 = decltype(declval<conditional_t<true, value_type, _Tp>&>()--)>
4263 _GLIBCXX_SIMD_INTRINSIC constexpr value_type operator--(int) &&
4264 {
4265 const value_type __r = _M_read();
4266 value_type __x = __r;
4267 _M_write(--__x);
4268 return __r;
4269 }
4270
4271 _GLIBCXX_SIMD_INTRINSIC friend void
4272 swap(_SmartReference&& __a, _SmartReference&& __b) noexcept(
4273 conjunction<
4274 is_nothrow_constructible<value_type, _SmartReference&&>,
4275 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4276 {
4277 value_type __tmp = static_cast<_SmartReference&&>(__a);
4278 static_cast<_SmartReference&&>(__a) = static_cast<value_type>(__b);
4279 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4280 }
4281
4282 _GLIBCXX_SIMD_INTRINSIC friend void
4283 swap(value_type& __a, _SmartReference&& __b) noexcept(
4284 conjunction<
4285 is_nothrow_constructible<value_type, value_type&&>,
4286 is_nothrow_assignable<value_type&, value_type&&>,
4287 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4288 {
4289 value_type __tmp(std::move(__a));
4290 __a = static_cast<value_type>(__b);
4291 static_cast<_SmartReference&&>(__b) = std::move(__tmp);
4292 }
4293
4294 _GLIBCXX_SIMD_INTRINSIC friend void
4295 swap(_SmartReference&& __a, value_type& __b) noexcept(
4296 conjunction<
4297 is_nothrow_constructible<value_type, _SmartReference&&>,
4298 is_nothrow_assignable<value_type&, value_type&&>,
4299 is_nothrow_assignable<_SmartReference&&, value_type&&>>::value)
4300 {
4301 value_type __tmp(__a);
4302 static_cast<_SmartReference&&>(__a) = std::move(__b);
4303 __b = std::move(__tmp);
4304 }
4305 };
4306
4307// }}}
4308// __scalar_abi_wrapper {{{
4309template <int _Bytes>
4310 struct __scalar_abi_wrapper
4311 {
4312 template <typename _Tp> static constexpr size_t _S_full_size = 1;
4313 template <typename _Tp> static constexpr size_t _S_size = 1;
4314 template <typename _Tp> static constexpr size_t _S_is_partial = false;
4315
4316 template <typename _Tp, typename _Abi = simd_abi::scalar>
4317 static constexpr bool _S_is_valid_v
4318 = _Abi::template _IsValid<_Tp>::value && sizeof(_Tp) == _Bytes;
4319 };
4320
4321// }}}
4322// __decay_abi metafunction {{{
4323template <typename _Tp>
4324 struct __decay_abi { using type = _Tp; };
4325
4326template <int _Bytes>
4327 struct __decay_abi<__scalar_abi_wrapper<_Bytes>>
4328 { using type = simd_abi::scalar; };
4329
4330// }}}
4331// __find_next_valid_abi metafunction {{{1
4332// Given an ABI tag A<N>, find an N2 < N such that A<N2>::_S_is_valid_v<_Tp> ==
4333// true, N2 is a power-of-2, and A<N2>::_S_is_partial<_Tp> is false. Break
4334// recursion at 2 elements in the resulting ABI tag. In this case
4335// type::_S_is_valid_v<_Tp> may be false.
4336template <template <int> class _Abi, int _Bytes, typename _Tp>
4337 struct __find_next_valid_abi
4338 {
4339 static constexpr auto _S_choose()
4340 {
4341 constexpr int _NextBytes = std::__bit_ceil(_Bytes) / 2;
4342 using _NextAbi = _Abi<_NextBytes>;
4343 if constexpr (_NextBytes < sizeof(_Tp) * 2) // break recursion
4344 return _Abi<_Bytes>();
4345 else if constexpr (_NextAbi::template _S_is_partial<_Tp> == false
4346 && _NextAbi::template _S_is_valid_v<_Tp>)
4347 return _NextAbi();
4348 else
4349 return __find_next_valid_abi<_Abi, _NextBytes, _Tp>::_S_choose();
4350 }
4351
4352 using type = decltype(_S_choose());
4353 };
4354
4355template <int _Bytes, typename _Tp>
4356 struct __find_next_valid_abi<__scalar_abi_wrapper, _Bytes, _Tp>
4357 { using type = simd_abi::scalar; };
4358
4359// _AbiList {{{1
4360template <template <int> class...>
4361 struct _AbiList
4362 {
4363 template <typename, int> static constexpr bool _S_has_valid_abi = false;
4364 template <typename, int> using _FirstValidAbi = void;
4365 template <typename, int> using _BestAbi = void;
4366 };
4367
4368template <template <int> class _A0, template <int> class... _Rest>
4369 struct _AbiList<_A0, _Rest...>
4370 {
4371 template <typename _Tp, int _Np>
4372 static constexpr bool _S_has_valid_abi
4373 = _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<
4374 _Tp> || _AbiList<_Rest...>::template _S_has_valid_abi<_Tp, _Np>;
4375
4376 template <typename _Tp, int _Np>
4377 using _FirstValidAbi = conditional_t<
4378 _A0<sizeof(_Tp) * _Np>::template _S_is_valid_v<_Tp>,
4379 typename __decay_abi<_A0<sizeof(_Tp) * _Np>>::type,
4380 typename _AbiList<_Rest...>::template _FirstValidAbi<_Tp, _Np>>;
4381
4382 template <typename _Tp, int _Np>
4383 static constexpr auto _S_determine_best_abi()
4384 {
4385 static_assert(_Np >= 1);
4386 constexpr int _Bytes = sizeof(_Tp) * _Np;
4387 if constexpr (_Np == 1)
4388 return __make_dependent_t<_Tp, simd_abi::scalar>{};
4389 else
4390 {
4391 constexpr int __fullsize = _A0<_Bytes>::template _S_full_size<_Tp>;
4392 // _A0<_Bytes> is good if:
4393 // 1. The ABI tag is valid for _Tp
4394 // 2. The storage overhead is no more than padding to fill the next
4395 // power-of-2 number of bytes
4396 if constexpr (_A0<_Bytes>::template _S_is_valid_v<
4397 _Tp> && __fullsize / 2 < _Np)
4398 return typename __decay_abi<_A0<_Bytes>>::type{};
4399 else
4400 {
4401 using _Bp =
4402 typename __find_next_valid_abi<_A0, _Bytes, _Tp>::type;
4403 if constexpr (_Bp::template _S_is_valid_v<
4404 _Tp> && _Bp::template _S_size<_Tp> <= _Np)
4405 return _Bp{};
4406 else
4407 return
4408 typename _AbiList<_Rest...>::template _BestAbi<_Tp, _Np>{};
4409 }
4410 }
4411 }
4412
4413 template <typename _Tp, int _Np>
4414 using _BestAbi = decltype(_S_determine_best_abi<_Tp, _Np>());
4415 };
4416
4417// }}}1
4418
4419// the following lists all native ABIs, which makes them accessible to
4420// simd_abi::deduce and select_best_vector_type_t (for fixed_size). Order
4421// matters: Whatever comes first has higher priority.
4422using _AllNativeAbis = _AbiList<simd_abi::_VecBltnBtmsk, simd_abi::_VecBuiltin,
4423 __scalar_abi_wrapper>;
4424
4425// valid _SimdTraits specialization {{{1
4426template <typename _Tp, typename _Abi>
4427 struct _SimdTraits<_Tp, _Abi, void_t<typename _Abi::template _IsValid<_Tp>>>
4428 : _Abi::template __traits<_Tp> {};
4429
4430// __deduce_impl specializations {{{1
4431// try all native ABIs (including scalar) first
4432template <typename _Tp, size_t _Np>
4433 struct __deduce_impl<
4434 _Tp, _Np, enable_if_t<_AllNativeAbis::template _S_has_valid_abi<_Tp, _Np>>>
4435 { using type = _AllNativeAbis::_FirstValidAbi<_Tp, _Np>; };
4436
4437// fall back to fixed_size only if scalar and native ABIs don't match
4438template <typename _Tp, size_t _Np, typename = void>
4439 struct __deduce_fixed_size_fallback {};
4440
4441template <typename _Tp, size_t _Np>
4442 struct __deduce_fixed_size_fallback<_Tp, _Np,
4443 enable_if_t<simd_abi::fixed_size<_Np>::template _S_is_valid_v<_Tp>>>
4444 { using type = simd_abi::fixed_size<_Np>; };
4445
4446template <typename _Tp, size_t _Np, typename>
4447 struct __deduce_impl : public __deduce_fixed_size_fallback<_Tp, _Np> {};
4448
4449//}}}1
4450/// @endcond
4451
4452// simd_mask {{{
4453template <typename _Tp, typename _Abi>
4454 class simd_mask : public _SimdTraits<_Tp, _Abi>::_MaskBase
4455 {
4456 // types, tags, and friends {{{
4457 using _Traits = _SimdTraits<_Tp, _Abi>;
4458 using _MemberType = typename _Traits::_MaskMember;
4459
4460 // We map all masks with equal element sizeof to a single integer type, the
4461 // one given by __int_for_sizeof_t<_Tp>. This is the approach
4462 // [[gnu::vector_size(N)]] types take as well and it reduces the number of
4463 // template specializations in the implementation classes.
4464 using _Ip = __int_for_sizeof_t<_Tp>;
4465 static constexpr _Ip* _S_type_tag = nullptr;
4466
4467 friend typename _Traits::_MaskBase;
4468 friend class simd<_Tp, _Abi>; // to construct masks on return
4469 friend typename _Traits::_SimdImpl; // to construct masks on return and
4470 // inspect data on masked operations
4471 public:
4472 using _Impl = typename _Traits::_MaskImpl;
4473 friend _Impl;
4474
4475 // }}}
4476 // member types {{{
4477 using value_type = bool;
4478 using reference = _SmartReference<_MemberType, _Impl, value_type>;
4479 using simd_type = simd<_Tp, _Abi>;
4480 using abi_type = _Abi;
4481
4482 // }}}
4483 static constexpr size_t size() // {{{
4484 { return __size_or_zero_v<_Tp, _Abi>; }
4485
4486 // }}}
4487 // constructors & assignment {{{
4488 simd_mask() = default;
4489 simd_mask(const simd_mask&) = default;
4490 simd_mask(simd_mask&&) = default;
4491 simd_mask& operator=(const simd_mask&) = default;
4492 simd_mask& operator=(simd_mask&&) = default;
4493
4494 // }}}
4495 // access to internal representation (optional feature) {{{
4496 _GLIBCXX_SIMD_ALWAYS_INLINE explicit
4497 simd_mask(typename _Traits::_MaskCastType __init)
4498 : _M_data{__init} {}
4499 // conversions to internal type is done in _MaskBase
4500
4501 // }}}
4502 // bitset interface (extension to be proposed) {{{
4503 // TS_FEEDBACK:
4504 // Conversion of simd_mask to and from bitset makes it much easier to
4505 // interface with other facilities. I suggest adding `static
4506 // simd_mask::from_bitset` and `simd_mask::to_bitset`.
4507 _GLIBCXX_SIMD_ALWAYS_INLINE static simd_mask
4508 __from_bitset(bitset<size()> bs)
4509 { return {__bitset_init, bs}; }
4510
4511 _GLIBCXX_SIMD_ALWAYS_INLINE bitset<size()>
4512 __to_bitset() const
4513 { return _Impl::_S_to_bits(_M_data)._M_to_bitset(); }
4514
4515 // }}}
4516 // explicit broadcast constructor {{{
4517 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
4518 simd_mask(value_type __x)
4519 : _M_data(_Impl::template _S_broadcast<_Ip>(__x)) {}
4520
4521 // }}}
4522 // implicit type conversion constructor {{{
4523 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4524 // proposed improvement
4525 template <typename _Up, typename _A2,
4526 typename = enable_if_t<simd_size_v<_Up, _A2> == size()>>
4527 _GLIBCXX_SIMD_ALWAYS_INLINE explicit(sizeof(_MemberType)
4528 != sizeof(typename _SimdTraits<_Up, _A2>::_MaskMember))
4529 simd_mask(const simd_mask<_Up, _A2>& __x)
4530 : simd_mask(__proposed::static_simd_cast<simd_mask>(__x)) {}
4531 #else
4532 // conforming to ISO/IEC 19570:2018
4533 template <typename _Up, typename = enable_if_t<conjunction<
4534 is_same<abi_type, simd_abi::fixed_size<size()>>,
4535 is_same<_Up, _Up>>::value>>
4536 _GLIBCXX_SIMD_ALWAYS_INLINE
4537 simd_mask(const simd_mask<_Up, simd_abi::fixed_size<size()>>& __x)
4538 : _M_data(_Impl::_S_from_bitmask(__data(__x), _S_type_tag)) {}
4539 #endif
4540
4541 // }}}
4542 // load constructor {{{
4543 template <typename _Flags>
4544 _GLIBCXX_SIMD_ALWAYS_INLINE
4545 simd_mask(const value_type* __mem, _Flags)
4546 : _M_data(_Impl::template _S_load<_Ip>(
4547 _Flags::template _S_apply<simd_mask>(__mem))) {}
4548
4549 template <typename _Flags>
4550 _GLIBCXX_SIMD_ALWAYS_INLINE
4551 simd_mask(const value_type* __mem, simd_mask __k, _Flags)
4552 : _M_data{}
4553 {
4554 _M_data
4555 = _Impl::_S_masked_load(_M_data, __k._M_data,
4556 _Flags::template _S_apply<simd_mask>(__mem));
4557 }
4558
4559 // }}}
4560 // loads [simd_mask.load] {{{
4561 template <typename _Flags>
4562 _GLIBCXX_SIMD_ALWAYS_INLINE void
4563 copy_from(const value_type* __mem, _Flags)
4564 {
4565 _M_data = _Impl::template _S_load<_Ip>(
4566 _Flags::template _S_apply<simd_mask>(__mem));
4567 }
4568
4569 // }}}
4570 // stores [simd_mask.store] {{{
4571 template <typename _Flags>
4572 _GLIBCXX_SIMD_ALWAYS_INLINE void
4573 copy_to(value_type* __mem, _Flags) const
4574 { _Impl::_S_store(_M_data, _Flags::template _S_apply<simd_mask>(__mem)); }
4575
4576 // }}}
4577 // scalar access {{{
4578 _GLIBCXX_SIMD_ALWAYS_INLINE reference
4579 operator[](size_t __i)
4580 {
4581 if (__i >= size())
4582 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4583 return {_M_data, int(__i)};
4584 }
4585
4586 _GLIBCXX_SIMD_ALWAYS_INLINE value_type
4587 operator[](size_t __i) const
4588 {
4589 if (__i >= size())
4590 __invoke_ub("Subscript %d is out of range [0, %d]", __i, size() - 1);
4591 if constexpr (__is_scalar_abi<_Abi>())
4592 return _M_data;
4593 else
4594 return static_cast<bool>(_M_data[__i]);
4595 }
4596
4597 // }}}
4598 // negation {{{
4599 _GLIBCXX_SIMD_ALWAYS_INLINE simd_mask
4600 operator!() const
4601 { return {__private_init, _Impl::_S_bit_not(_M_data)}; }
4602
4603 // }}}
4604 // simd_mask binary operators [simd_mask.binary] {{{
4605 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4606 // simd_mask<int> && simd_mask<uint> needs disambiguation
4607 template <typename _Up, typename _A2,
4608 typename
4609 = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4610 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4611 operator&&(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4612 {
4613 return {__private_init,
4614 _Impl::_S_logical_and(__x._M_data, simd_mask(__y)._M_data)};
4615 }
4616
4617 template <typename _Up, typename _A2,
4618 typename
4619 = enable_if_t<is_convertible_v<simd_mask<_Up, _A2>, simd_mask>>>
4620 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4621 operator||(const simd_mask& __x, const simd_mask<_Up, _A2>& __y)
4622 {
4623 return {__private_init,
4624 _Impl::_S_logical_or(__x._M_data, simd_mask(__y)._M_data)};
4625 }
4626 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4627
4628 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4629 operator&&(const simd_mask& __x, const simd_mask& __y)
4630 {
4631 return {__private_init, _Impl::_S_logical_and(__x._M_data, __y._M_data)};
4632 }
4633
4634 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4635 operator||(const simd_mask& __x, const simd_mask& __y)
4636 {
4637 return {__private_init, _Impl::_S_logical_or(__x._M_data, __y._M_data)};
4638 }
4639
4640 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4641 operator&(const simd_mask& __x, const simd_mask& __y)
4642 { return {__private_init, _Impl::_S_bit_and(__x._M_data, __y._M_data)}; }
4643
4644 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4645 operator|(const simd_mask& __x, const simd_mask& __y)
4646 { return {__private_init, _Impl::_S_bit_or(__x._M_data, __y._M_data)}; }
4647
4648 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask
4649 operator^(const simd_mask& __x, const simd_mask& __y)
4650 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4651
4652 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4653 operator&=(simd_mask& __x, const simd_mask& __y)
4654 {
4655 __x._M_data = _Impl::_S_bit_and(__x._M_data, __y._M_data);
4656 return __x;
4657 }
4658
4659 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4660 operator|=(simd_mask& __x, const simd_mask& __y)
4661 {
4662 __x._M_data = _Impl::_S_bit_or(__x._M_data, __y._M_data);
4663 return __x;
4664 }
4665
4666 _GLIBCXX_SIMD_ALWAYS_INLINE friend simd_mask&
4667 operator^=(simd_mask& __x, const simd_mask& __y)
4668 {
4669 __x._M_data = _Impl::_S_bit_xor(__x._M_data, __y._M_data);
4670 return __x;
4671 }
4672
4673 // }}}
4674 // simd_mask compares [simd_mask.comparison] {{{
4675 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4676 operator==(const simd_mask& __x, const simd_mask& __y)
4677 { return !operator!=(__x, __y); }
4678
4679 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4680 operator!=(const simd_mask& __x, const simd_mask& __y)
4681 { return {__private_init, _Impl::_S_bit_xor(__x._M_data, __y._M_data)}; }
4682
4683 // }}}
4684 // private_init ctor {{{
4685 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
4686 simd_mask(_PrivateInit, typename _Traits::_MaskMember __init)
4687 : _M_data(__init) {}
4688
4689 // }}}
4690 // private_init generator ctor {{{
4691 template <typename _Fp, typename = decltype(bool(declval<_Fp>()(size_t())))>
4692 _GLIBCXX_SIMD_INTRINSIC constexpr
4693 simd_mask(_PrivateInit, _Fp&& __gen)
4694 : _M_data()
4695 {
4696 __execute_n_times<size()>([&](auto __i) constexpr {
4697 _Impl::_S_set(_M_data, __i, __gen(__i));
4698 });
4699 }
4700
4701 // }}}
4702 // bitset_init ctor {{{
4703 _GLIBCXX_SIMD_INTRINSIC simd_mask(_BitsetInit, bitset<size()> __init)
4704 : _M_data(
4705 _Impl::_S_from_bitmask(_SanitizedBitMask<size()>(__init), _S_type_tag))
4706 {}
4707
4708 // }}}
4709 // __cvt {{{
4710 // TS_FEEDBACK:
4711 // The conversion operator this implements should be a ctor on simd_mask.
4712 // Once you call .__cvt() on a simd_mask it converts conveniently.
4713 // A useful variation: add `explicit(sizeof(_Tp) != sizeof(_Up))`
4714 struct _CvtProxy
4715 {
4716 template <typename _Up, typename _A2,
4717 typename
4718 = enable_if_t<simd_size_v<_Up, _A2> == simd_size_v<_Tp, _Abi>>>
4719 _GLIBCXX_SIMD_ALWAYS_INLINE
4720 operator simd_mask<_Up, _A2>() &&
4721 {
4722 using namespace std::experimental::__proposed;
4723 return static_simd_cast<simd_mask<_Up, _A2>>(_M_data);
4724 }
4725
4726 const simd_mask<_Tp, _Abi>& _M_data;
4727 };
4728
4729 _GLIBCXX_SIMD_INTRINSIC _CvtProxy
4730 __cvt() const
4731 { return {*this}; }
4732
4733 // }}}
4734 // operator?: overloads (suggested extension) {{{
4735 #ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
4736 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4737 operator?:(const simd_mask& __k, const simd_mask& __where_true,
4738 const simd_mask& __where_false)
4739 {
4740 auto __ret = __where_false;
4741 _Impl::_S_masked_assign(__k._M_data, __ret._M_data, __where_true._M_data);
4742 return __ret;
4743 }
4744
4745 template <typename _U1, typename _U2,
4746 typename _Rp = simd<common_type_t<_U1, _U2>, _Abi>,
4747 typename = enable_if_t<conjunction_v<
4748 is_convertible<_U1, _Rp>, is_convertible<_U2, _Rp>,
4749 is_convertible<simd_mask, typename _Rp::mask_type>>>>
4750 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend _Rp
4751 operator?:(const simd_mask& __k, const _U1& __where_true,
4752 const _U2& __where_false)
4753 {
4754 _Rp __ret = __where_false;
4755 _Rp::_Impl::_S_masked_assign(
4756 __data(static_cast<typename _Rp::mask_type>(__k)), __data(__ret),
4757 __data(static_cast<_Rp>(__where_true)));
4758 return __ret;
4759 }
4760
4761 #ifdef _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4762 template <typename _Kp, typename _Ak, typename _Up, typename _Au,
4763 typename = enable_if_t<
4764 conjunction_v<is_convertible<simd_mask<_Kp, _Ak>, simd_mask>,
4765 is_convertible<simd_mask<_Up, _Au>, simd_mask>>>>
4766 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd_mask
4767 operator?:(const simd_mask<_Kp, _Ak>& __k, const simd_mask& __where_true,
4768 const simd_mask<_Up, _Au>& __where_false)
4769 {
4770 simd_mask __ret = __where_false;
4771 _Impl::_S_masked_assign(simd_mask(__k)._M_data, __ret._M_data,
4772 __where_true._M_data);
4773 return __ret;
4774 }
4775 #endif // _GLIBCXX_SIMD_ENABLE_IMPLICIT_MASK_CAST
4776 #endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
4777
4778 // }}}
4779 // _M_is_constprop {{{
4780 _GLIBCXX_SIMD_INTRINSIC constexpr bool
4781 _M_is_constprop() const
4782 {
4783 if constexpr (__is_scalar_abi<_Abi>())
4784 return __builtin_constant_p(_M_data);
4785 else
4786 return _M_data._M_is_constprop();
4787 }
4788
4789 // }}}
4790
4791 private:
4792 friend const auto& __data<_Tp, abi_type>(const simd_mask&);
4793 friend auto& __data<_Tp, abi_type>(simd_mask&);
4794 alignas(_Traits::_S_mask_align) _MemberType _M_data;
4795 };
4796
4797// }}}
4798
4799/// @cond undocumented
4800// __data(simd_mask) {{{
4801template <typename _Tp, typename _Ap>
4802 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
4803 __data(const simd_mask<_Tp, _Ap>& __x)
4804 { return __x._M_data; }
4805
4806template <typename _Tp, typename _Ap>
4807 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
4808 __data(simd_mask<_Tp, _Ap>& __x)
4809 { return __x._M_data; }
4810
4811// }}}
4812/// @endcond
4813
4814// simd_mask reductions [simd_mask.reductions] {{{
4815template <typename _Tp, typename _Abi>
4816 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4817 all_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4818 {
4819 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4820 {
4821 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4822 if (!__k[__i])
4823 return false;
4824 return true;
4825 }
4826 else
4827 return _Abi::_MaskImpl::_S_all_of(__k);
4828 }
4829
4830template <typename _Tp, typename _Abi>
4831 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4832 any_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4833 {
4834 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4835 {
4836 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4837 if (__k[__i])
4838 return true;
4839 return false;
4840 }
4841 else
4842 return _Abi::_MaskImpl::_S_any_of(__k);
4843 }
4844
4845template <typename _Tp, typename _Abi>
4846 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4847 none_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4848 {
4849 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4850 {
4851 for (size_t __i = 0; __i < simd_size_v<_Tp, _Abi>; ++__i)
4852 if (__k[__i])
4853 return false;
4854 return true;
4855 }
4856 else
4857 return _Abi::_MaskImpl::_S_none_of(__k);
4858 }
4859
4860template <typename _Tp, typename _Abi>
4861 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4862 some_of(const simd_mask<_Tp, _Abi>& __k) noexcept
4863 {
4864 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4865 {
4866 for (size_t __i = 1; __i < simd_size_v<_Tp, _Abi>; ++__i)
4867 if (__k[__i] != __k[__i - 1])
4868 return true;
4869 return false;
4870 }
4871 else
4872 return _Abi::_MaskImpl::_S_some_of(__k);
4873 }
4874
4875template <typename _Tp, typename _Abi>
4876 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4877 popcount(const simd_mask<_Tp, _Abi>& __k) noexcept
4878 {
4879 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4880 {
4881 const int __r = __call_with_subscripts<simd_size_v<_Tp, _Abi>>(
4882 __k, [](auto... __elements) { return ((__elements != 0) + ...); });
4883 if (__builtin_is_constant_evaluated() || __builtin_constant_p(__r))
4884 return __r;
4885 }
4886 return _Abi::_MaskImpl::_S_popcount(__k);
4887 }
4888
4889template <typename _Tp, typename _Abi>
4890 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4891 find_first_set(const simd_mask<_Tp, _Abi>& __k)
4892 {
4893 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4894 {
4895 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4896 const size_t _Idx = __call_with_n_evaluations<_Np>(
4897 [](auto... __indexes) { return std::min({__indexes...}); },
4898 [&](auto __i) { return __k[__i] ? +__i : _Np; });
4899 if (_Idx >= _Np)
4900 __invoke_ub("find_first_set(empty mask) is UB");
4901 if (__builtin_constant_p(_Idx))
4902 return _Idx;
4903 }
4904 return _Abi::_MaskImpl::_S_find_first_set(__k);
4905 }
4906
4907template <typename _Tp, typename _Abi>
4908 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4909 find_last_set(const simd_mask<_Tp, _Abi>& __k)
4910 {
4911 if (__builtin_is_constant_evaluated() || __k._M_is_constprop())
4912 {
4913 constexpr size_t _Np = simd_size_v<_Tp, _Abi>;
4914 const int _Idx = __call_with_n_evaluations<_Np>(
4915 [](auto... __indexes) { return std::max({__indexes...}); },
4916 [&](auto __i) { return __k[__i] ? int(__i) : -1; });
4917 if (_Idx < 0)
4918 __invoke_ub("find_first_set(empty mask) is UB");
4919 if (__builtin_constant_p(_Idx))
4920 return _Idx;
4921 }
4922 return _Abi::_MaskImpl::_S_find_last_set(__k);
4923 }
4924
4925_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4926all_of(_ExactBool __x) noexcept
4927{ return __x; }
4928
4929_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4930any_of(_ExactBool __x) noexcept
4931{ return __x; }
4932
4933_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4934none_of(_ExactBool __x) noexcept
4935{ return !__x; }
4936
4937_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR bool
4938some_of(_ExactBool) noexcept
4939{ return false; }
4940
4941_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4942popcount(_ExactBool __x) noexcept
4943{ return __x; }
4944
4945_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4946find_first_set(_ExactBool)
4947{ return 0; }
4948
4949_GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR int
4950find_last_set(_ExactBool)
4951{ return 0; }
4952
4953// }}}
4954
4955/// @cond undocumented
4956// _SimdIntOperators{{{1
4957template <typename _V, typename _Tp, typename _Abi, bool>
4958 class _SimdIntOperators {};
4959
4960template <typename _V, typename _Tp, typename _Abi>
4961 class _SimdIntOperators<_V, _Tp, _Abi, true>
4962 {
4963 using _Impl = typename _SimdTraits<_Tp, _Abi>::_SimdImpl;
4964
4965 _GLIBCXX_SIMD_INTRINSIC const _V& __derived() const
4966 { return *static_cast<const _V*>(this); }
4967
4968 template <typename _Up>
4969 _GLIBCXX_SIMD_INTRINSIC static _GLIBCXX_SIMD_CONSTEXPR _V
4970 _S_make_derived(_Up&& __d)
4971 { return {__private_init, static_cast<_Up&&>(__d)}; }
4972
4973 public:
4974 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4975 _V&
4976 operator%=(_V& __lhs, const _V& __x)
4977 { return __lhs = __lhs % __x; }
4978
4979 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4980 _V&
4981 operator&=(_V& __lhs, const _V& __x)
4982 { return __lhs = __lhs & __x; }
4983
4984 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4985 _V&
4986 operator|=(_V& __lhs, const _V& __x)
4987 { return __lhs = __lhs | __x; }
4988
4989 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4990 _V&
4991 operator^=(_V& __lhs, const _V& __x)
4992 { return __lhs = __lhs ^ __x; }
4993
4994 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
4995 _V&
4996 operator<<=(_V& __lhs, const _V& __x)
4997 { return __lhs = __lhs << __x; }
4998
4999 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5000 _V&
5001 operator>>=(_V& __lhs, const _V& __x)
5002 { return __lhs = __lhs >> __x; }
5003
5004 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5005 _V&
5006 operator<<=(_V& __lhs, int __x)
5007 { return __lhs = __lhs << __x; }
5008
5009 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5010 _V&
5011 operator>>=(_V& __lhs, int __x)
5012 { return __lhs = __lhs >> __x; }
5013
5014 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5015 _V
5016 operator%(const _V& __x, const _V& __y)
5017 {
5018 return _SimdIntOperators::_S_make_derived(
5019 _Impl::_S_modulus(__data(__x), __data(__y)));
5020 }
5021
5022 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5023 _V
5024 operator&(const _V& __x, const _V& __y)
5025 {
5026 return _SimdIntOperators::_S_make_derived(
5027 _Impl::_S_bit_and(__data(__x), __data(__y)));
5028 }
5029
5030 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5031 _V
5032 operator|(const _V& __x, const _V& __y)
5033 {
5034 return _SimdIntOperators::_S_make_derived(
5035 _Impl::_S_bit_or(__data(__x), __data(__y)));
5036 }
5037
5038 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5039 _V
5040 operator^(const _V& __x, const _V& __y)
5041 {
5042 return _SimdIntOperators::_S_make_derived(
5043 _Impl::_S_bit_xor(__data(__x), __data(__y)));
5044 }
5045
5046 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5047 _V
5048 operator<<(const _V& __x, const _V& __y)
5049 {
5050 return _SimdIntOperators::_S_make_derived(
5051 _Impl::_S_bit_shift_left(__data(__x), __data(__y)));
5052 }
5053
5054 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5055 _V
5056 operator>>(const _V& __x, const _V& __y)
5057 {
5058 return _SimdIntOperators::_S_make_derived(
5059 _Impl::_S_bit_shift_right(__data(__x), __data(__y)));
5060 }
5061
5062 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5063 _V
5064 operator<<(const _V& __x, int __y)
5065 {
5066 if (__y < 0)
5067 __invoke_ub("The behavior is undefined if the right operand of a "
5068 "shift operation is negative. [expr.shift]\nA shift by "
5069 "%d was requested",
5070 __y);
5071 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5072 __invoke_ub(
5073 "The behavior is undefined if the right operand of a "
5074 "shift operation is greater than or equal to the width of the "
5075 "promoted left operand. [expr.shift]\nA shift by %d was requested",
5076 __y);
5077 return _SimdIntOperators::_S_make_derived(
5078 _Impl::_S_bit_shift_left(__data(__x), __y));
5079 }
5080
5081 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend
5082 _V
5083 operator>>(const _V& __x, int __y)
5084 {
5085 if (__y < 0)
5086 __invoke_ub(
5087 "The behavior is undefined if the right operand of a shift "
5088 "operation is negative. [expr.shift]\nA shift by %d was requested",
5089 __y);
5090 if (size_t(__y) >= sizeof(declval<_Tp>() << __y) * __CHAR_BIT__)
5091 __invoke_ub(
5092 "The behavior is undefined if the right operand of a shift "
5093 "operation is greater than or equal to the width of the promoted "
5094 "left operand. [expr.shift]\nA shift by %d was requested",
5095 __y);
5096 return _SimdIntOperators::_S_make_derived(
5097 _Impl::_S_bit_shift_right(__data(__x), __y));
5098 }
5099
5100 // unary operators (for integral _Tp)
5101 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5102 _V
5103 operator~() const
5104 { return {__private_init, _Impl::_S_complement(__derived()._M_data)}; }
5105 };
5106
5107//}}}1
5108/// @endcond
5109
5110// simd {{{
5111template <typename _Tp, typename _Abi>
5112 class simd : public _SimdIntOperators<
5113 simd<_Tp, _Abi>, _Tp, _Abi,
5114 conjunction<is_integral<_Tp>,
5115 typename _SimdTraits<_Tp, _Abi>::_IsValid>::value>,
5116 public _SimdTraits<_Tp, _Abi>::_SimdBase
5117 {
5118 using _Traits = _SimdTraits<_Tp, _Abi>;
5119 using _MemberType = typename _Traits::_SimdMember;
5120 using _CastType = typename _Traits::_SimdCastType;
5121 static constexpr _Tp* _S_type_tag = nullptr;
5122 friend typename _Traits::_SimdBase;
5123
5124 public:
5125 using _Impl = typename _Traits::_SimdImpl;
5126 friend _Impl;
5127 friend _SimdIntOperators<simd, _Tp, _Abi, true>;
5128
5129 using value_type = _Tp;
5130 using reference = _SmartReference<_MemberType, _Impl, value_type>;
5131 using mask_type = simd_mask<_Tp, _Abi>;
5132 using abi_type = _Abi;
5133
5134 static constexpr size_t size()
5135 { return __size_or_zero_v<_Tp, _Abi>; }
5136
5137 _GLIBCXX_SIMD_CONSTEXPR simd() = default;
5138 _GLIBCXX_SIMD_CONSTEXPR simd(const simd&) = default;
5139 _GLIBCXX_SIMD_CONSTEXPR simd(simd&&) noexcept = default;
5140 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(const simd&) = default;
5141 _GLIBCXX_SIMD_CONSTEXPR simd& operator=(simd&&) noexcept = default;
5142
5143 // implicit broadcast constructor
5144 template <typename _Up,
5145 typename = enable_if_t<!is_same_v<__remove_cvref_t<_Up>, bool>>>
5146 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5147 simd(_ValuePreservingOrInt<_Up, value_type>&& __x)
5148 : _M_data(
5149 _Impl::_S_broadcast(static_cast<value_type>(static_cast<_Up&&>(__x))))
5150 {}
5151
5152 // implicit type conversion constructor (convert from fixed_size to
5153 // fixed_size)
5154 template <typename _Up>
5155 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR
5156 simd(const simd<_Up, simd_abi::fixed_size<size()>>& __x,
5158 conjunction<
5159 is_same<simd_abi::fixed_size<size()>, abi_type>,
5160 negation<__is_narrowing_conversion<_Up, value_type>>,
5161 __converts_to_higher_integer_rank<_Up, value_type>>::value,
5162 void*> = nullptr)
5163 : simd{static_cast<array<_Up, size()>>(__x).data(), vector_aligned} {}
5164
5165 // explicit type conversion constructor
5166#ifdef _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5167 template <typename _Up, typename _A2,
5168 typename = decltype(static_simd_cast<simd>(
5169 declval<const simd<_Up, _A2>&>()))>
5170 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5171 simd(const simd<_Up, _A2>& __x)
5172 : simd(static_simd_cast<simd>(__x)) {}
5173#endif // _GLIBCXX_SIMD_ENABLE_STATIC_CAST
5174
5175 // generator constructor
5176 template <typename _Fp>
5177 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5178 simd(_Fp&& __gen, _ValuePreservingOrInt<decltype(declval<_Fp>()(
5179 declval<_SizeConstant<0>&>())),
5180 value_type>* = nullptr)
5181 : _M_data(_Impl::_S_generator(static_cast<_Fp&&>(__gen), _S_type_tag)) {}
5182
5183 // load constructor
5184 template <typename _Up, typename _Flags>
5185 _GLIBCXX_SIMD_ALWAYS_INLINE
5186 simd(const _Up* __mem, _Flags)
5187 : _M_data(
5188 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag))
5189 {}
5190
5191 // loads [simd.load]
5192 template <typename _Up, typename _Flags>
5193 _GLIBCXX_SIMD_ALWAYS_INLINE void
5194 copy_from(const _Vectorizable<_Up>* __mem, _Flags)
5195 {
5196 _M_data = static_cast<decltype(_M_data)>(
5197 _Impl::_S_load(_Flags::template _S_apply<simd>(__mem), _S_type_tag));
5198 }
5199
5200 // stores [simd.store]
5201 template <typename _Up, typename _Flags>
5202 _GLIBCXX_SIMD_ALWAYS_INLINE void
5203 copy_to(_Vectorizable<_Up>* __mem, _Flags) const
5204 {
5205 _Impl::_S_store(_M_data, _Flags::template _S_apply<simd>(__mem),
5206 _S_type_tag);
5207 }
5208
5209 // scalar access
5210 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR reference
5211 operator[](size_t __i)
5212 { return {_M_data, int(__i)}; }
5213
5214 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR value_type
5215 operator[]([[maybe_unused]] size_t __i) const
5216 {
5217 if constexpr (__is_scalar_abi<_Abi>())
5218 {
5219 _GLIBCXX_DEBUG_ASSERT(__i == 0);
5220 return _M_data;
5221 }
5222 else
5223 return _M_data[__i];
5224 }
5225
5226 // increment and decrement:
5227 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5228 operator++()
5229 {
5230 _Impl::_S_increment(_M_data);
5231 return *this;
5232 }
5233
5234 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5235 operator++(int)
5236 {
5237 simd __r = *this;
5238 _Impl::_S_increment(_M_data);
5239 return __r;
5240 }
5241
5242 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd&
5243 operator--()
5244 {
5245 _Impl::_S_decrement(_M_data);
5246 return *this;
5247 }
5248
5249 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5250 operator--(int)
5251 {
5252 simd __r = *this;
5253 _Impl::_S_decrement(_M_data);
5254 return __r;
5255 }
5256
5257 // unary operators (for any _Tp)
5258 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR mask_type
5259 operator!() const
5260 { return {__private_init, _Impl::_S_negate(_M_data)}; }
5261
5262 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5263 operator+() const
5264 { return *this; }
5265
5266 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR simd
5267 operator-() const
5268 { return {__private_init, _Impl::_S_unary_minus(_M_data)}; }
5269
5270 // access to internal representation (suggested extension)
5271 _GLIBCXX_SIMD_ALWAYS_INLINE explicit _GLIBCXX_SIMD_CONSTEXPR
5272 simd(_CastType __init) : _M_data(__init) {}
5273
5274 // compound assignment [simd.cassign]
5275 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5276 operator+=(simd& __lhs, const simd& __x)
5277 { return __lhs = __lhs + __x; }
5278
5279 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5280 operator-=(simd& __lhs, const simd& __x)
5281 { return __lhs = __lhs - __x; }
5282
5283 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5284 operator*=(simd& __lhs, const simd& __x)
5285 { return __lhs = __lhs * __x; }
5286
5287 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd&
5288 operator/=(simd& __lhs, const simd& __x)
5289 { return __lhs = __lhs / __x; }
5290
5291 // binary operators [simd.binary]
5292 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5293 operator+(const simd& __x, const simd& __y)
5294 { return {__private_init, _Impl::_S_plus(__x._M_data, __y._M_data)}; }
5295
5296 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5297 operator-(const simd& __x, const simd& __y)
5298 { return {__private_init, _Impl::_S_minus(__x._M_data, __y._M_data)}; }
5299
5300 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5301 operator*(const simd& __x, const simd& __y)
5302 { return {__private_init, _Impl::_S_multiplies(__x._M_data, __y._M_data)}; }
5303
5304 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5305 operator/(const simd& __x, const simd& __y)
5306 { return {__private_init, _Impl::_S_divides(__x._M_data, __y._M_data)}; }
5307
5308 // compares [simd.comparison]
5309 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5310 operator==(const simd& __x, const simd& __y)
5311 { return simd::_S_make_mask(_Impl::_S_equal_to(__x._M_data, __y._M_data)); }
5312
5313 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5314 operator!=(const simd& __x, const simd& __y)
5315 {
5316 return simd::_S_make_mask(
5317 _Impl::_S_not_equal_to(__x._M_data, __y._M_data));
5318 }
5319
5320 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5321 operator<(const simd& __x, const simd& __y)
5322 { return simd::_S_make_mask(_Impl::_S_less(__x._M_data, __y._M_data)); }
5323
5324 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5325 operator<=(const simd& __x, const simd& __y)
5326 {
5327 return simd::_S_make_mask(_Impl::_S_less_equal(__x._M_data, __y._M_data));
5328 }
5329
5330 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5331 operator>(const simd& __x, const simd& __y)
5332 { return simd::_S_make_mask(_Impl::_S_less(__y._M_data, __x._M_data)); }
5333
5334 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend mask_type
5335 operator>=(const simd& __x, const simd& __y)
5336 {
5337 return simd::_S_make_mask(_Impl::_S_less_equal(__y._M_data, __x._M_data));
5338 }
5339
5340 // operator?: overloads (suggested extension) {{{
5341#ifdef __GXX_CONDITIONAL_IS_OVERLOADABLE__
5342 _GLIBCXX_SIMD_ALWAYS_INLINE _GLIBCXX_SIMD_CONSTEXPR friend simd
5343 operator?:(const mask_type& __k, const simd& __where_true,
5344 const simd& __where_false)
5345 {
5346 auto __ret = __where_false;
5347 _Impl::_S_masked_assign(__data(__k), __data(__ret), __data(__where_true));
5348 return __ret;
5349 }
5350
5351#endif // __GXX_CONDITIONAL_IS_OVERLOADABLE__
5352 // }}}
5353
5354 // "private" because of the first arguments's namespace
5355 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5356 simd(_PrivateInit, const _MemberType& __init)
5357 : _M_data(__init) {}
5358
5359 // "private" because of the first arguments's namespace
5360 _GLIBCXX_SIMD_INTRINSIC
5361 simd(_BitsetInit, bitset<size()> __init) : _M_data()
5362 { where(mask_type(__bitset_init, __init), *this) = ~*this; }
5363
5364 _GLIBCXX_SIMD_INTRINSIC constexpr bool
5365 _M_is_constprop() const
5366 {
5367 if constexpr (__is_scalar_abi<_Abi>())
5368 return __builtin_constant_p(_M_data);
5369 else
5370 return _M_data._M_is_constprop();
5371 }
5372
5373 private:
5374 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR static mask_type
5375 _S_make_mask(typename mask_type::_MemberType __k)
5376 { return {__private_init, __k}; }
5377
5378 friend const auto& __data<value_type, abi_type>(const simd&);
5379 friend auto& __data<value_type, abi_type>(simd&);
5380 alignas(_Traits::_S_simd_align) _MemberType _M_data;
5381 };
5382
5383// }}}
5384/// @cond undocumented
5385// __data {{{
5386template <typename _Tp, typename _Ap>
5387 _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
5388 __data(const simd<_Tp, _Ap>& __x)
5389 { return __x._M_data; }
5390
5391template <typename _Tp, typename _Ap>
5392 _GLIBCXX_SIMD_INTRINSIC constexpr auto&
5393 __data(simd<_Tp, _Ap>& __x)
5394 { return __x._M_data; }
5395
5396// }}}
5397namespace __float_bitwise_operators { //{{{
5398template <typename _Tp, typename _Ap>
5399 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5400 operator^(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5401 {
5402 return {__private_init,
5403 _Ap::_SimdImpl::_S_bit_xor(__data(__a), __data(__b))};
5404 }
5405
5406template <typename _Tp, typename _Ap>
5407 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5408 operator|(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5409 {
5410 return {__private_init,
5411 _Ap::_SimdImpl::_S_bit_or(__data(__a), __data(__b))};
5412 }
5413
5414template <typename _Tp, typename _Ap>
5415 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR simd<_Tp, _Ap>
5416 operator&(const simd<_Tp, _Ap>& __a, const simd<_Tp, _Ap>& __b)
5417 {
5418 return {__private_init,
5419 _Ap::_SimdImpl::_S_bit_and(__data(__a), __data(__b))};
5420 }
5421
5422template <typename _Tp, typename _Ap>
5423 _GLIBCXX_SIMD_INTRINSIC _GLIBCXX_SIMD_CONSTEXPR
5424 enable_if_t<is_floating_point_v<_Tp>, simd<_Tp, _Ap>>
5425 operator~(const simd<_Tp, _Ap>& __a)
5426 { return {__private_init, _Ap::_SimdImpl::_S_complement(__data(__a))}; }
5427} // namespace __float_bitwise_operators }}}
5428/// @endcond
5429
5430/// @}
5431_GLIBCXX_SIMD_END_NAMESPACE
5432
5433#endif // __cplusplus >= 201703L
5434#endif // _GLIBCXX_EXPERIMENTAL_SIMD_H
5435
5436// vim: foldmethod=marker foldmarker={{{,}}}
constexpr _If_is_unsigned_integer< _Tp, int > popcount(_Tp __x) noexcept
The number of bits set in x.
Definition: bit:426
constexpr complex< _Tp > operator*(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x times y.
Definition: complex:392
constexpr complex< _Tp > operator-(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x minus y.
Definition: complex:362
constexpr complex< _Tp > operator+(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x plus y.
Definition: complex:332
constexpr complex< _Tp > operator/(const complex< _Tp > &__x, const complex< _Tp > &__y)
Return new complex value x divided by y.
Definition: complex:422
typename remove_reference< _Tp >::type remove_reference_t
Alias template for remove_reference.
Definition: type_traits:1670
typename make_unsigned< _Tp >::type make_unsigned_t
Alias template for make_unsigned.
Definition: type_traits:2009
void void_t
A metafunction that always yields void, used for detecting valid types.
Definition: type_traits:2636
integral_constant< bool, true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:82
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
Definition: type_traits:2618
integral_constant< bool, false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:85
typename remove_const< _Tp >::type remove_const_t
Alias template for remove_const.
Definition: type_traits:1601
typename enable_if< _Cond, _Tp >::type enable_if_t
Alias template for enable_if.
Definition: type_traits:2614
constexpr auto tuple_cat(_Tpls &&... __tpls) -> typename __tuple_cat_result< _Tpls... >::__type
tuple_cat
Definition: tuple:1730
auto declval() noexcept -> decltype(__declval< _Tp >(0))
Definition: type_traits:2393
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:104
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:254
constexpr const _Tp & min(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:230
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: numeric:287
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1435
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1444
std::basic_istream< _CharT, _Traits > & operator>>(std::basic_istream< _CharT, _Traits > &__is, bitset< _Nb > &__x)
Global I/O operators for bitsets.
Definition: bitset:1472
std::basic_ostream< _CharT, _Traits > & operator<<(std::basic_ostream< _CharT, _Traits > &__os, const bitset< _Nb > &__x)
Global I/O operators for bitsets.
Definition: bitset:1540
bitset< _Nb > operator^(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1453