17#if defined(_MSC_VER) && !defined(__clang__)
25 unsigned __int8 m128i_u8[16];
41#define __ATTRIBUTE_SSE2__
45typedef double __m128d
__attribute__((__vector_size__(16), __aligned__(16)));
46typedef long long __m128i
__attribute__((__vector_size__(16), __aligned__(16)));
48typedef double __m128d_u
__attribute__((__vector_size__(16), __aligned__(1)));
49typedef long long __m128i_u
__attribute__((__vector_size__(16), __aligned__(1)));
58typedef unsigned long long __v2du
__attribute__((__vector_size__(16)));
59typedef unsigned short __v8hu
__attribute__((__vector_size__(16)));
60typedef unsigned char __v16qu
__attribute__((__vector_size__(16)));
64typedef signed char __v16qs
__attribute__((__vector_size__(16)));
67#define __ATTRIBUTE_SSE2__ __attribute__((__target__("sse2"),__min_vector_width__(128)))
68#define __ATTRIBUTE_MMXSSE2__ __attribute__((__target__("mmx,sse2"),__min_vector_width__(128)))
70#define __ATTRIBUTE_SSE2__ __attribute__((__target__("sse2")))
71#define __ATTRIBUTE_MMXSSE2__ __attribute__((__target__("mmx,sse2")))
73#define __INTRIN_INLINE_SSE2 __INTRIN_INLINE __ATTRIBUTE_SSE2__
74#define __INTRIN_INLINE_MMXSSE2 __INTRIN_INLINE __ATTRIBUTE_MMXSSE2__
237extern __m128d _mm_cvtsi64_sd(__m128d
a,
long long b);
238extern long long _mm_cvtsd_si64(__m128d
a);
239extern long long _mm_cvttsd_si64(__m128d
a);
261extern __m128i
_mm_set_epi16(
short w7,
short w6,
short w5,
short w4,
short w3,
short w2,
short w1,
short w0);
263extern __m128i
_mm_set_epi8(
char b15,
char b14,
char b13,
char b12,
char b11,
char b10,
char b9,
char b8,
char b7,
char b6,
char b5,
char b4,
char b3,
char b2,
char b1,
char b0);
274extern __m128i
_mm_setr_epi16(
short w0,
short w1,
short w2,
short w3,
short w4,
short w5,
short w6,
short w7);
276extern __m128i
_mm_setr_epi8(
char b15,
char b14,
char b13,
char b12,
char b11,
char b10,
char b9,
char b8,
char b7,
char b6,
char b5,
char b4,
char b3,
char b2,
char b1,
char b0);
324#define _mm_set_pd1(a) _mm_set1_pd(a)
325#define _mm_load_pd1(p) _mm_load1_pd(p)
326#define _mm_store_pd1(p, a) _mm_store1_pd((p), (a))
327#define _mm_bslli_si128 _mm_slli_si128
328#define _mm_bsrli_si128 _mm_srli_si128
329#define _mm_stream_si64 _mm_stream_si64x
331#if defined(_MSC_VER) && !defined(__clang__)
333#pragma intrinsic(_mm_add_sd)
334#pragma intrinsic(_mm_add_pd)
335#pragma intrinsic(_mm_sub_sd)
336#pragma intrinsic(_mm_sub_pd)
337#pragma intrinsic(_mm_mul_sd)
338#pragma intrinsic(_mm_mul_pd)
339#pragma intrinsic(_mm_div_sd)
340#pragma intrinsic(_mm_div_pd)
341#pragma intrinsic(_mm_sqrt_sd)
342#pragma intrinsic(_mm_sqrt_pd)
343#pragma intrinsic(_mm_min_sd)
344#pragma intrinsic(_mm_min_pd)
345#pragma intrinsic(_mm_max_sd)
346#pragma intrinsic(_mm_max_pd)
347#pragma intrinsic(_mm_and_pd)
348#pragma intrinsic(_mm_andnot_pd)
349#pragma intrinsic(_mm_or_pd)
350#pragma intrinsic(_mm_xor_pd)
351#pragma intrinsic(_mm_cmpeq_pd)
352#pragma intrinsic(_mm_cmplt_pd)
353#pragma intrinsic(_mm_cmple_pd)
354#pragma intrinsic(_mm_cmpgt_pd)
355#pragma intrinsic(_mm_cmpge_pd)
356#pragma intrinsic(_mm_cmpord_pd)
357#pragma intrinsic(_mm_cmpunord_pd)
358#pragma intrinsic(_mm_cmpneq_pd)
359#pragma intrinsic(_mm_cmpnlt_pd)
360#pragma intrinsic(_mm_cmpnle_pd)
361#pragma intrinsic(_mm_cmpngt_pd)
362#pragma intrinsic(_mm_cmpnge_pd)
363#pragma intrinsic(_mm_cmpeq_sd)
364#pragma intrinsic(_mm_cmplt_sd)
365#pragma intrinsic(_mm_cmple_sd)
366#pragma intrinsic(_mm_cmpgt_sd)
367#pragma intrinsic(_mm_cmpge_sd)
368#pragma intrinsic(_mm_cmpord_sd)
369#pragma intrinsic(_mm_cmpunord_sd)
370#pragma intrinsic(_mm_cmpneq_sd)
371#pragma intrinsic(_mm_cmpnlt_sd)
372#pragma intrinsic(_mm_cmpnle_sd)
373#pragma intrinsic(_mm_cmpngt_sd)
374#pragma intrinsic(_mm_cmpnge_sd)
375#pragma intrinsic(_mm_comieq_sd)
376#pragma intrinsic(_mm_comilt_sd)
377#pragma intrinsic(_mm_comile_sd)
378#pragma intrinsic(_mm_comigt_sd)
379#pragma intrinsic(_mm_comige_sd)
380#pragma intrinsic(_mm_comineq_sd)
381#pragma intrinsic(_mm_ucomieq_sd)
382#pragma intrinsic(_mm_ucomilt_sd)
383#pragma intrinsic(_mm_ucomile_sd)
384#pragma intrinsic(_mm_ucomigt_sd)
385#pragma intrinsic(_mm_ucomige_sd)
386#pragma intrinsic(_mm_ucomineq_sd)
387#pragma intrinsic(_mm_cvtpd_ps)
388#pragma intrinsic(_mm_cvtps_pd)
389#pragma intrinsic(_mm_cvtepi32_pd)
390#pragma intrinsic(_mm_cvtpd_epi32)
391#pragma intrinsic(_mm_cvtsd_si32)
392#pragma intrinsic(_mm_cvtsd_ss)
393#pragma intrinsic(_mm_cvtsi32_sd)
394#pragma intrinsic(_mm_cvtss_sd)
395#pragma intrinsic(_mm_cvttpd_epi32)
396#pragma intrinsic(_mm_cvttsd_si32)
400#pragma intrinsic(_mm_cvtsd_f64)
401#pragma intrinsic(_mm_load_pd)
402#pragma intrinsic(_mm_load1_pd)
403#pragma intrinsic(_mm_loadr_pd)
404#pragma intrinsic(_mm_loadu_pd)
408#pragma intrinsic(_mm_load_sd)
409#pragma intrinsic(_mm_loadh_pd)
410#pragma intrinsic(_mm_loadl_pd)
412#pragma intrinsic(_mm_set_sd)
413#pragma intrinsic(_mm_set1_pd)
414#pragma intrinsic(_mm_set_pd)
415#pragma intrinsic(_mm_setr_pd)
416#pragma intrinsic(_mm_setzero_pd)
417#pragma intrinsic(_mm_move_sd)
418#pragma intrinsic(_mm_store_sd)
419#pragma intrinsic(_mm_store_pd)
420#pragma intrinsic(_mm_store1_pd)
421#pragma intrinsic(_mm_storeu_pd)
422#pragma intrinsic(_mm_storer_pd)
423#pragma intrinsic(_mm_storeh_pd)
424#pragma intrinsic(_mm_storel_pd)
425#pragma intrinsic(_mm_add_epi8)
426#pragma intrinsic(_mm_add_epi16)
427#pragma intrinsic(_mm_add_epi32)
429#pragma intrinsic(_mm_add_epi64)
430#pragma intrinsic(_mm_adds_epi8)
431#pragma intrinsic(_mm_adds_epi16)
432#pragma intrinsic(_mm_adds_epu8)
433#pragma intrinsic(_mm_adds_epu16)
434#pragma intrinsic(_mm_avg_epu8)
435#pragma intrinsic(_mm_avg_epu16)
436#pragma intrinsic(_mm_madd_epi16)
437#pragma intrinsic(_mm_max_epi16)
438#pragma intrinsic(_mm_max_epu8)
439#pragma intrinsic(_mm_min_epi16)
440#pragma intrinsic(_mm_min_epu8)
441#pragma intrinsic(_mm_mulhi_epi16)
442#pragma intrinsic(_mm_mulhi_epu16)
443#pragma intrinsic(_mm_mullo_epi16)
445#pragma intrinsic(_mm_mul_epu32)
446#pragma intrinsic(_mm_sad_epu8)
447#pragma intrinsic(_mm_sub_epi8)
448#pragma intrinsic(_mm_sub_epi16)
449#pragma intrinsic(_mm_sub_epi32)
451#pragma intrinsic(_mm_sub_epi64)
452#pragma intrinsic(_mm_subs_epi8)
453#pragma intrinsic(_mm_subs_epi16)
454#pragma intrinsic(_mm_subs_epu8)
455#pragma intrinsic(_mm_subs_epu16)
456#pragma intrinsic(_mm_and_si128)
457#pragma intrinsic(_mm_andnot_si128)
458#pragma intrinsic(_mm_or_si128)
459#pragma intrinsic(_mm_xor_si128)
460#pragma intrinsic(_mm_slli_si128)
461#pragma intrinsic(_mm_slli_epi16)
462#pragma intrinsic(_mm_sll_epi16)
463#pragma intrinsic(_mm_slli_epi32)
464#pragma intrinsic(_mm_sll_epi32)
465#pragma intrinsic(_mm_slli_epi64)
466#pragma intrinsic(_mm_sll_epi64)
467#pragma intrinsic(_mm_srai_epi16)
468#pragma intrinsic(_mm_sra_epi16)
469#pragma intrinsic(_mm_srai_epi32)
470#pragma intrinsic(_mm_sra_epi32)
471#pragma intrinsic(_mm_srli_si128)
472#pragma intrinsic(_mm_srli_epi16)
473#pragma intrinsic(_mm_srl_epi16)
474#pragma intrinsic(_mm_srli_epi32)
475#pragma intrinsic(_mm_srl_epi32)
476#pragma intrinsic(_mm_srli_epi64)
477#pragma intrinsic(_mm_srl_epi64)
478#pragma intrinsic(_mm_cmpeq_epi8)
479#pragma intrinsic(_mm_cmpeq_epi16)
480#pragma intrinsic(_mm_cmpeq_epi32)
481#pragma intrinsic(_mm_cmpgt_epi8)
482#pragma intrinsic(_mm_cmpgt_epi16)
483#pragma intrinsic(_mm_cmpgt_epi32)
484#pragma intrinsic(_mm_cmplt_epi8)
485#pragma intrinsic(_mm_cmplt_epi16)
486#pragma intrinsic(_mm_cmplt_epi32)
488#pragma intrinsic(_mm_cvtsi64_sd)
489#pragma intrinsic(_mm_cvtsd_si64)
490#pragma intrinsic(_mm_cvttsd_si64)
492#pragma intrinsic(_mm_cvtepi32_ps)
493#pragma intrinsic(_mm_cvtps_epi32)
494#pragma intrinsic(_mm_cvttps_epi32)
495#pragma intrinsic(_mm_cvtsi32_si128)
497#pragma intrinsic(_mm_cvtsi64_si128)
499#pragma intrinsic(_mm_cvtsi128_si32)
501#pragma intrinsic(_mm_cvtsi128_si64)
503#pragma intrinsic(_mm_load_si128)
504#pragma intrinsic(_mm_loadu_si128)
505#pragma intrinsic(_mm_loadl_epi64)
509#pragma intrinsic(_mm_set_epi32)
510#pragma intrinsic(_mm_set_epi16)
511#pragma intrinsic(_mm_set_epi8)
514#pragma intrinsic(_mm_set1_epi32)
515#pragma intrinsic(_mm_set1_epi16)
516#pragma intrinsic(_mm_set1_epi8)
517#pragma intrinsic(_mm_setl_epi64)
519#pragma intrinsic(_mm_setr_epi32)
520#pragma intrinsic(_mm_setr_epi16)
521#pragma intrinsic(_mm_setr_epi8)
522#pragma intrinsic(_mm_setzero_si128)
523#pragma intrinsic(_mm_store_si128)
524#pragma intrinsic(_mm_storeu_si128)
528#pragma intrinsic(_mm_maskmoveu_si128)
529#pragma intrinsic(_mm_storel_epi64)
530#pragma intrinsic(_mm_stream_pd)
531#pragma intrinsic(_mm_stream_si128)
532#pragma intrinsic(_mm_stream_si32)
533#pragma intrinsic(_mm_clflush)
534#pragma intrinsic(_mm_lfence)
535#pragma intrinsic(_mm_mfence)
536#pragma intrinsic(_mm_packs_epi16)
537#pragma intrinsic(_mm_packs_epi32)
538#pragma intrinsic(_mm_packus_epi16)
539#pragma intrinsic(_mm_extract_epi16)
540#pragma intrinsic(_mm_insert_epi16)
541#pragma intrinsic(_mm_movemask_epi8)
542#pragma intrinsic(_mm_shuffle_epi32)
543#pragma intrinsic(_mm_shufflelo_epi16)
544#pragma intrinsic(_mm_shufflehi_epi16)
545#pragma intrinsic(_mm_unpackhi_epi8)
546#pragma intrinsic(_mm_unpackhi_epi16)
547#pragma intrinsic(_mm_unpackhi_epi32)
548#pragma intrinsic(_mm_unpackhi_epi64)
549#pragma intrinsic(_mm_unpacklo_epi8)
550#pragma intrinsic(_mm_unpacklo_epi16)
551#pragma intrinsic(_mm_unpacklo_epi32)
552#pragma intrinsic(_mm_unpacklo_epi64)
555#pragma intrinsic(_mm_move_epi64)
556#pragma intrinsic(_mm_unpackhi_pd)
557#pragma intrinsic(_mm_unpacklo_pd)
558#pragma intrinsic(_mm_movemask_pd)
559#pragma intrinsic(_mm_shuffle_pd)
560#pragma intrinsic(_mm_castpd_ps)
561#pragma intrinsic(_mm_castpd_si128)
562#pragma intrinsic(_mm_castps_pd)
563#pragma intrinsic(_mm_castps_si128)
564#pragma intrinsic(_mm_castsi128_ps)
565#pragma intrinsic(_mm_castsi128_pd)
566#pragma intrinsic(_mm_pause)
584 return (__m128d)((__v2df)
a + (__v2df)
b);
595 return (__m128d)((__v2df)
a - (__v2df)
b);
606 return (__m128d)((__v2df)
a * (__v2df)
b);
617 return (__m128d)((__v2df)
a / (__v2df)
b);
622 __m128d
__c = __builtin_ia32_sqrtsd((__v2df)
b);
623 return __extension__(__m128d){
__c[0],
a[1]};
628 return __builtin_ia32_sqrtpd((__v2df)
a);
633 return __builtin_ia32_minsd((__v2df)
a, (__v2df)
b);
638 return __builtin_ia32_minpd((__v2df)
a, (__v2df)
b);
643 return __builtin_ia32_maxsd((__v2df)
a, (__v2df)
b);
648 return __builtin_ia32_maxpd((__v2df)
a, (__v2df)
b);
653 return (__m128d)((__v2du)
a & (__v2du)
b);
658 return (__m128d)(~(__v2du)
a & (__v2du)
b);
663 return (__m128d)((__v2du)
a | (__v2du)
b);
668 return (__m128d)((__v2du)
a ^ (__v2du)
b);
673 return (__m128d)__builtin_ia32_cmpeqpd((__v2df)
a, (__v2df)
b);
678 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
a, (__v2df)
b);
683 return (__m128d)__builtin_ia32_cmplepd((__v2df)
a, (__v2df)
b);
688 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
b, (__v2df)
a);
693 return (__m128d)__builtin_ia32_cmplepd((__v2df)
b, (__v2df)
a);
698 return (__m128d)__builtin_ia32_cmpordpd((__v2df)
a, (__v2df)
b);
703 return (__m128d)__builtin_ia32_cmpunordpd((__v2df)
a, (__v2df)
b);
708 return (__m128d)__builtin_ia32_cmpneqpd((__v2df)
a, (__v2df)
b);
713 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
a, (__v2df)
b);
718 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
a, (__v2df)
b);
723 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
b, (__v2df)
a);
728 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
b, (__v2df)
a);
733 return (__m128d)__builtin_ia32_cmpeqsd((__v2df)
a, (__v2df)
b);
738 return (__m128d)__builtin_ia32_cmpltsd((__v2df)
a, (__v2df)
b);
743 return (__m128d)__builtin_ia32_cmplesd((__v2df)
a, (__v2df)
b);
748 __m128d
__c = __builtin_ia32_cmpltsd((__v2df)
b, (__v2df)
a);
749 return __extension__(__m128d){
__c[0],
a[1]};
754 __m128d
__c = __builtin_ia32_cmplesd((__v2df)
b, (__v2df)
a);
755 return __extension__(__m128d){
__c[0],
a[1]};
760 return (__m128d)__builtin_ia32_cmpordsd((__v2df)
a, (__v2df)
b);
765 return (__m128d)__builtin_ia32_cmpunordsd((__v2df)
a, (__v2df)
b);
770 return (__m128d)__builtin_ia32_cmpneqsd((__v2df)
a, (__v2df)
b);
775 return (__m128d)__builtin_ia32_cmpnltsd((__v2df)
a, (__v2df)
b);
780 return (__m128d)__builtin_ia32_cmpnlesd((__v2df)
a, (__v2df)
b);
785 __m128d
__c = __builtin_ia32_cmpnltsd((__v2df)
b, (__v2df)
a);
786 return __extension__(__m128d){
__c[0],
a[1]};
791 __m128d
__c = __builtin_ia32_cmpnlesd((__v2df)
b, (__v2df)
a);
792 return __extension__(__m128d){
__c[0],
a[1]};
797 return __builtin_ia32_comisdeq((__v2df)
a, (__v2df)
b);
802 return __builtin_ia32_comisdlt((__v2df)
a, (__v2df)
b);
807 return __builtin_ia32_comisdle((__v2df)
a, (__v2df)
b);
812 return __builtin_ia32_comisdgt((__v2df)
a, (__v2df)
b);
817 return __builtin_ia32_comisdge((__v2df)
a, (__v2df)
b);
822 return __builtin_ia32_comisdneq((__v2df)
a, (__v2df)
b);
827 return __builtin_ia32_ucomisdeq((__v2df)
a, (__v2df)
b);
832 return __builtin_ia32_ucomisdlt((__v2df)
a, (__v2df)
b);
837 return __builtin_ia32_ucomisdle((__v2df)
a, (__v2df)
b);
842 return __builtin_ia32_ucomisdgt((__v2df)
a, (__v2df)
b);
847 return __builtin_ia32_ucomisdge((__v2df)
a, (__v2df)
b);
852 return __builtin_ia32_ucomisdneq((__v2df)
a, (__v2df)
b);
857 return __builtin_ia32_cvtpd2ps((__v2df)
a);
862#if HAS_BUILTIN(__builtin_convertvector)
863 return (__m128d)__builtin_convertvector(__builtin_shufflevector((__v4sf)
a, (__v4sf)
a, 0, 1), __v2df);
865 return __builtin_ia32_cvtps2pd(
a);
871#if HAS_BUILTIN(__builtin_convertvector)
872 return (__m128d)__builtin_convertvector(__builtin_shufflevector((__v4si)
a, (__v4si)
a, 0, 1), __v2df);
874 return __builtin_ia32_cvtdq2pd((__v4si)
a);
880 return (__m128i)__builtin_ia32_cvtpd2dq((__v2df)
a);
885 return __builtin_ia32_cvtsd2si((__v2df)
a);
890 return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)
a, (__v2df)
b);
908 return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)
a);
913 return __builtin_ia32_cvttsd2si((__v2df)
a);
918 return (__m64)__builtin_ia32_cvtpd2pi((__v2df)
a);
923 return (__m64)__builtin_ia32_cvttpd2pi((__v2df)
a);
928 return __builtin_ia32_cvtpi2pd((__v2si)
a);
938 return *(
const __m128d *)dp;
943 struct __mm_load1_pd_struct {
946 double __u = ((
const struct __mm_load1_pd_struct *)dp)->__u;
947 return __extension__(__m128d){__u, __u};
952#define _MM_SHUFFLE2(fp1,fp0) \
953 (((fp1) << 1) | (fp0))
957#if HAS_BUILTIN(__builtin_shufflevector)
958 __m128d
u = *(
const __m128d *)dp;
959 return __builtin_shufflevector((__v2df)
u, (__v2df)
u, 1, 0);
961 return (__m128d){ dp[1], dp[0] };
970 return ((
const struct __loadu_pd *)dp)->__v;
975 struct __loadu_si64 {
978 long long __u = ((
const struct __loadu_si64 *)
a)->__v;
979 return __extension__(__m128i)(__v2di){__u, 0
LL};
984 struct __loadu_si32 {
987 int __u = ((
const struct __loadu_si32 *)
a)->__v;
988 return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
993 struct __loadu_si16 {
996 short __u = ((
const struct __loadu_si16 *)
a)->__v;
997 return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
1002 struct __mm_load_sd_struct {
1005 double __u = ((
const struct __mm_load_sd_struct *)dp)->__u;
1006 return __extension__(__m128d){__u, 0};
1011 struct __mm_loadh_pd_struct {
1014 double __u = ((
const struct __mm_loadh_pd_struct *)dp)->__u;
1015 return __extension__(__m128d){
a[0], __u};
1020 struct __mm_loadl_pd_struct {
1023 double __u = ((
const struct __mm_loadl_pd_struct *)dp)->__u;
1024 return __extension__(__m128d){__u,
a[1]};
1029#if HAS_BUILTIN(__builtin_ia32_undef128)
1030 return (__m128d)__builtin_ia32_undef128();
1032 __m128d undef = undef;
1039 return __extension__(__m128d){
w, 0};
1044 return __extension__(__m128d){
w,
w};
1049 return __extension__(__m128d){
x,
w};
1054 return __extension__(__m128d){
w,
x};
1059 return __extension__(__m128d){0, 0};
1070 struct __mm_store_sd_struct {
1073 ((
struct __mm_store_sd_struct *)dp)->__u =
a[0];
1083#if HAS_BUILTIN(__builtin_shufflevector)
1084 a = __builtin_shufflevector((__v2df)
a, (__v2df)
a, 0, 0);
1094 struct __storeu_pd {
1097 ((
struct __storeu_pd *)dp)->__v =
a;
1102#if HAS_BUILTIN(__builtin_shufflevector)
1103 a = __builtin_shufflevector((__v2df)
a, (__v2df)
a, 1, 0);
1113 struct __mm_storeh_pd_struct {
1116 ((
struct __mm_storeh_pd_struct *)dp)->__u =
a[1];
1121 struct __mm_storeh_pd_struct {
1124 ((
struct __mm_storeh_pd_struct *)dp)->__u =
a[0];
1129 return (__m128i)((__v16qu)
a + (__v16qu)
b);
1134 return (__m128i)((__v8hu)
a + (__v8hu)
b);
1139 return (__m128i)((__v4su)
a + (__v4su)
b);
1144 return (__m64)__builtin_ia32_paddq((__v1di)
a, (__v1di)
b);
1149 return (__m128i)((__v2du)
a + (__v2du)
b);
1154#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1155 return (__m128i)__builtin_elementwise_add_sat((__v16qs)
a, (__v16qs)
b);
1157 return (__m128i)__builtin_ia32_paddsb128((__v16qi)
a, (__v16qi)
b);
1163#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1164 return (__m128i)__builtin_elementwise_add_sat((__v8hi)
a, (__v8hi)
b);
1166 return (__m128i)__builtin_ia32_paddsw128((__v8hi)
a, (__v8hi)
b);
1172#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1173 return (__m128i)__builtin_elementwise_add_sat((__v16qu)
a, (__v16qu)
b);
1175 return (__m128i)__builtin_ia32_paddusb128((__v16qi)
a, (__v16qi)
b);
1181#if HAS_BUILTIN(__builtin_elementwise_add_sat)
1182 return (__m128i)__builtin_elementwise_add_sat((__v8hu)
a, (__v8hu)
b);
1184 return (__m128i)__builtin_ia32_paddusw128((__v8hi)
a, (__v8hi)
b);
1190 return (__m128i)__builtin_ia32_pavgb128((__v16qi)
a, (__v16qi)
b);
1195 return (__m128i)__builtin_ia32_pavgw128((__v8hi)
a, (__v8hi)
b);
1200 return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)
a, (__v8hi)
b);
1205#if HAS_BUILTIN(__builtin_elementwise_max)
1206 return (__m128i)__builtin_elementwise_max((__v8hi)
a, (__v8hi)
b);
1208 return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)
a, (__v8hi)
b);
1214#if HAS_BUILTIN(__builtin_elementwise_max)
1215 return (__m128i)__builtin_elementwise_max((__v16qu)
a, (__v16qu)
b);
1217 return (__m128i)__builtin_ia32_pmaxub128((__v16qi)
a, (__v16qi)
b);
1223#if HAS_BUILTIN(__builtin_elementwise_min)
1224 return (__m128i)__builtin_elementwise_min((__v8hi)
a, (__v8hi)
b);
1226 return (__m128i)__builtin_ia32_pminsw128((__v8hi)
a, (__v8hi)
b);
1232#if HAS_BUILTIN(__builtin_elementwise_min)
1233 return (__m128i)__builtin_elementwise_min((__v16qu)
a, (__v16qu)
b);
1235 return (__m128i)__builtin_ia32_pminub128((__v16qi)
a, (__v16qi)
b);
1241 return (__m128i)__builtin_ia32_pmulhw128((__v8hi)
a, (__v8hi)
b);
1246 return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)
a, (__v8hi)
b);
1251 return (__m128i)((__v8hu)
a * (__v8hu)
b);
1256 return (__m64)__builtin_ia32_pmuludq((__v2si)
a, (__v2si)
b);
1261 return __builtin_ia32_pmuludq128((__v4si)
a, (__v4si)
b);
1266 return __builtin_ia32_psadbw128((__v16qi)
a, (__v16qi)
b);
1271 return (__m128i)((__v16qu)
a - (__v16qu)
b);
1276 return (__m128i)((__v8hu)
a - (__v8hu)
b);
1281 return (__m128i)((__v4su)
a - (__v4su)
b);
1286 return (__m64)__builtin_ia32_psubq((__v1di)
a, (__v1di)
b);
1291 return (__m128i)((__v2du)
a - (__v2du)
b);
1296#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1297 return (__m128i)__builtin_elementwise_sub_sat((__v16qs)
a, (__v16qs)
b);
1299 return (__m128i)__builtin_ia32_psubsb128((__v16qi)
a, (__v16qi)
b);
1305#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1306 return (__m128i)__builtin_elementwise_sub_sat((__v8hi)
a, (__v8hi)
b);
1308 return (__m128i)__builtin_ia32_psubsw128((__v8hi)
a, (__v8hi)
b);
1314#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1315 return (__m128i)__builtin_elementwise_sub_sat((__v16qu)
a, (__v16qu)
b);
1317 return (__m128i)__builtin_ia32_psubusb128((__v16qi)
a, (__v16qi)
b);
1323#if HAS_BUILTIN(__builtin_elementwise_sub_sat)
1324 return (__m128i)__builtin_elementwise_sub_sat((__v8hu)
a, (__v8hu)
b);
1326 return (__m128i)__builtin_ia32_psubusw128((__v8hi)
a, (__v8hi)
b);
1332 return (__m128i)((__v2du)
a & (__v2du)
b);
1337 return (__m128i)(~(__v2du)
a & (__v2du)
b);
1342 return (__m128i)((__v2du)
a | (__v2du)
b);
1347 return (__m128i)((__v2du)
a ^ (__v2du)
b);
1351#define _mm_slli_si128(a, imm) \
1352 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
1356 return (__m128i)__builtin_ia32_pslldqi128(
a, imm * 8);
1362 return (__m128i)__builtin_ia32_psllwi128((__v8hi)
a,
count);
1367 return (__m128i)__builtin_ia32_psllw128((__v8hi)
a, (__v8hi)
count);
1372 return (__m128i)__builtin_ia32_pslldi128((__v4si)
a,
count);
1377 return (__m128i)__builtin_ia32_pslld128((__v4si)
a, (__v4si)
count);
1382 return __builtin_ia32_psllqi128((__v2di)
a,
count);
1387 return __builtin_ia32_psllq128((__v2di)
a, (__v2di)
count);
1392 return (__m128i)__builtin_ia32_psrawi128((__v8hi)
a,
count);
1397 return (__m128i)__builtin_ia32_psraw128((__v8hi)
a, (__v8hi)
count);
1402 return (__m128i)__builtin_ia32_psradi128((__v4si)
a,
count);
1407 return (__m128i)__builtin_ia32_psrad128((__v4si)
a, (__v4si)
count);
1411#define _mm_srli_si128(a, imm) \
1412 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
1416 return (__m128i)__builtin_ia32_psrldqi128(
a, imm * 8);
1422 return (__m128i)__builtin_ia32_psrlwi128((__v8hi)
a,
count);
1427 return (__m128i)__builtin_ia32_psrlw128((__v8hi)
a, (__v8hi)
count);
1432 return (__m128i)__builtin_ia32_psrldi128((__v4si)
a,
count);
1437 return (__m128i)__builtin_ia32_psrld128((__v4si)
a, (__v4si)
count);
1442 return __builtin_ia32_psrlqi128((__v2di)
a,
count);
1447 return __builtin_ia32_psrlq128((__v2di)
a, (__v2di)
count);
1452 return (__m128i)((__v16qi)
a == (__v16qi)
b);
1457 return (__m128i)((__v8hi)
a == (__v8hi)
b);
1462 return (__m128i)((__v4si)
a == (__v4si)
b);
1469 return (__m128i)((__v16qs)
a > (__v16qs)
b);
1474 return (__m128i)((__v8hi)
a > (__v8hi)
b);
1479 return (__m128i)((__v4si)
a > (__v4si)
b);
1507 return __builtin_ia32_cvtsd2si64((__v2df)
a);
1512 return __builtin_ia32_cvttsd2si64((__v2df)
a);
1518#if HAS_BUILTIN(__builtin_convertvector)
1519 return (__m128)__builtin_convertvector((__v4si)
a, __v4sf);
1521 return __builtin_ia32_cvtdq2ps((__v4si)
a);
1527 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)
a);
1532 return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)
a);
1537 return __extension__(__m128i)(__v4si){
a, 0, 0, 0};
1542 return __extension__(__m128i)(__v2di){
a, 0};
1547 __v4si
b = (__v4si)
a;
1563 struct __loadu_si128 {
1566 return ((
const struct __loadu_si128 *)
p)->__v;
1571 struct __mm_loadl_epi64_struct {
1574 return __extension__(__m128i){
1575 ((
const struct __mm_loadl_epi64_struct *)
p)->__u, 0};
1580#if HAS_BUILTIN(__builtin_ia32_undef128)
1581 return (__m128i)__builtin_ia32_undef128();
1583 __m128i undef = undef;
1590 return __extension__(__m128i)(__v2di){q0, q1};
1600 return __extension__(__m128i)(__v4si){i0, i1, i2, i3};
1604 short w7,
short w6,
short w5,
short w4,
1605 short w3,
short w2,
short w1,
short w0)
1607 return __extension__(__m128i)(__v8hi){w0,
w1,
w2, w3, w4, w5, w6, w7};
1611 char b15,
char b14,
char b13,
char b12,
1612 char b11,
char b10,
char b9,
char b8,
1613 char b7,
char b6,
char b5,
char b4,
1614 char b3,
char b2,
char b1,
char b0)
1616 return __extension__(__m128i)(__v16qi){
1618 b8, b9, b10, b11, b12, b13, b14, b15};
1643 return _mm_set_epi8(
b,
b,
b,
b,
b,
b,
b,
b,
b,
b,
b,
1658 short w0,
short w1,
short w2,
short w3,
1659 short w4,
short w5,
short w6,
short w7)
1665 char b0,
char b1,
char b2,
char b3,
1666 char b4,
char b5,
char b6,
char b7,
1667 char b8,
char b9,
char b10,
char b11,
1668 char b12,
char b13,
char b14,
char b15)
1670 return _mm_set_epi8(b15, b14, b13, b12, b11, b10, b9, b8,
1676 return __extension__(__m128i)(__v2di){0
LL, 0
LL};
1686 struct __storeu_si128 {
1689 ((
struct __storeu_si128 *)
p)->__v =
b;
1694 struct __storeu_si64 {
1697 ((
struct __storeu_si64 *)
p)->__v = ((__v2di)
b)[0];
1702 struct __storeu_si32 {
1705 ((
struct __storeu_si32 *)
p)->__v = ((__v4si)
b)[0];
1710 struct __storeu_si16 {
1713 ((
struct __storeu_si16 *)
p)->__v = ((__v8hi)
b)[0];
1718 __builtin_ia32_maskmovdqu((__v16qi)
d, (__v16qi)
n,
p);
1723 struct __mm_storel_epi64_struct {
1726 ((
struct __mm_storel_epi64_struct *)
p)->__u =
a[0];
1731#if HAS_BUILTIN(__builtin_nontemporal_store)
1732 __builtin_nontemporal_store((__v2df)
a, (__v2df *)
p);
1734 __builtin_ia32_movntpd(
p,
a);
1740#if HAS_BUILTIN(__builtin_nontemporal_store)
1741 __builtin_nontemporal_store((__v2di)
a, (__v2di*)
p);
1743 __builtin_ia32_movntdq(
p,
a);
1749 __builtin_ia32_movnti(
p,
a);
1755 __builtin_ia32_movnti64(
p,
a);
1767 return (__m128i)__builtin_ia32_packsswb128((__v8hi)
a, (__v8hi)
b);
1772 return (__m128i)__builtin_ia32_packssdw128((__v4si)
a, (__v4si)
b);
1777 return (__m128i)__builtin_ia32_packuswb128((__v8hi)
a, (__v8hi)
b);
1780#define _mm_extract_epi16(a, imm) \
1781 ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
1784#define _mm_insert_epi16(a, b, imm) \
1785 ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
1790 return __builtin_ia32_pmovmskb128((__v16qi)
a);
1793#define _mm_shuffle_epi32(a, imm) \
1794 ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
1796#define _mm_shufflelo_epi16(a, imm) \
1797 ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
1799#define _mm_shufflehi_epi16(a, imm) \
1800 ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
1804#if HAS_BUILTIN(__builtin_shufflevector)
1805 return (__m128i)__builtin_shufflevector(
1806 (__v16qi)
a, (__v16qi)
b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
1807 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
1809 return (__m128i)__builtin_ia32_punpckhbw128((__v16qi)
a, (__v16qi)
b);
1815#if HAS_BUILTIN(__builtin_shufflevector)
1816 return (__m128i)__builtin_shufflevector((__v8hi)
a, (__v8hi)
b, 4, 8 + 4, 5,
1817 8 + 5, 6, 8 + 6, 7, 8 + 7);
1819 return (__m128i)__builtin_ia32_punpckhwd128((__v8hi)
a, (__v8hi)
b);
1825#if HAS_BUILTIN(__builtin_shufflevector)
1826 return (__m128i)__builtin_shufflevector((__v4si)
a, (__v4si)
b, 2, 4 + 2, 3,
1829 return (__m128i)__builtin_ia32_punpckhdq128((__v4si)
a, (__v4si)
b);
1835#if HAS_BUILTIN(__builtin_shufflevector)
1836 return (__m128i)__builtin_shufflevector((__v2di)
a, (__v2di)
b, 1, 2 + 1);
1838 return (__m128i)__builtin_ia32_punpckhqdq128((__v2di)
a, (__v2di)
b);
1844#if HAS_BUILTIN(__builtin_shufflevector)
1845 return (__m128i)__builtin_shufflevector(
1846 (__v16qi)
a, (__v16qi)
b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
1847 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
1849 return (__m128i)__builtin_ia32_punpcklbw128((__v16qi)
a, (__v16qi)
b);
1855#if HAS_BUILTIN(__builtin_shufflevector)
1856 return (__m128i)__builtin_shufflevector((__v8hi)
a, (__v8hi)
b, 0, 8 + 0, 1,
1857 8 + 1, 2, 8 + 2, 3, 8 + 3);
1859 return (__m128i)__builtin_ia32_punpcklwd128((__v8hi)
a, (__v8hi)
b);
1865#if HAS_BUILTIN(__builtin_shufflevector)
1866 return (__m128i)__builtin_shufflevector((__v4si)
a, (__v4si)
b, 0, 4 + 0, 1,
1869 return (__m128i)__builtin_ia32_punpckldq128((__v4si)
a, (__v4si)
b);
1875#if HAS_BUILTIN(__builtin_shufflevector)
1876 return (__m128i)__builtin_shufflevector((__v2di)
a, (__v2di)
b, 0, 2 + 0);
1878 return (__m128i)__builtin_ia32_punpcklqdq128((__v2di)
a, (__v2di)
b);
1889 return __extension__(__m128i)(__v2di){(
long long)
a, 0};
1894#if HAS_BUILTIN(__builtin_shufflevector)
1897 return (__m128i)__builtin_ia32_movq128((__v2di)
a);
1903#if HAS_BUILTIN(__builtin_shufflevector)
1904 return __builtin_shufflevector((__v2df)
a, (__v2df)
b, 1, 2 + 1);
1906 return (__m128d)__builtin_ia32_unpckhpd((__v2df)
a, (__v2df)
b);
1912#if HAS_BUILTIN(__builtin_shufflevector)
1913 return __builtin_shufflevector((__v2df)
a, (__v2df)
b, 0, 2 + 0);
1915 return (__m128d)__builtin_ia32_unpcklpd((__v2df)
a, (__v2df)
b);
1921 return __builtin_ia32_movmskpd((__v2df)
a);
1924#define _mm_shuffle_pd(a, b, i) \
1925 ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
#define _DECLSPEC_INTRIN_TYPE
int align(int length, int align)
#define __INTRIN_INLINE_MMXSSE2
__m128 _mm_cvtpd_ps(__m128d a)
__m128d _mm_cmpnge_sd(__m128d a, __m128d b)
void _mm_storeu_pd(double *dp, __m128d a)
__m128d _mm_add_sd(__m128d a, __m128d b)
void _mm_storeu_si128(__m128i_u *p, __m128i b)
__m128i _mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
__m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
__m128i _mm_set1_epi16(short w)
int _mm_cvtsi128_si32(__m128i a)
__m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
void _mm_store_si128(__m128i *p, __m128i b)
__m128i _mm_adds_epu16(__m128i a, __m128i b)
__m128i _mm_movpi64_epi64(__m64 a)
__m128i _mm_slli_epi64(__m128i a, int count)
int _mm_ucomile_sd(__m128d a, __m128d b)
__m128i _mm_slli_si128(__m128i a, int i)
__m128d _mm_cmpeq_sd(__m128d a, __m128d b)
int _mm_cvtsd_si32(__m128d a)
int _mm_comile_sd(__m128d a, __m128d b)
__m128i _mm_castps_si128(__m128 a)
__m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
__m128i _mm_setr_epi32(int i0, int i1, int i2, int i3)
__m128i _mm_setzero_si128(void)
__m128i _mm_srl_epi64(__m128i a, __m128i count)
__m128d _mm_add_pd(__m128d a, __m128d b)
__m128i _mm_cvtpd_epi32(__m128d a)
__m128i _mm_xor_si128(__m128i a, __m128i b)
__m128i _mm_move_epi64(__m128i a)
__m128d _mm_sub_sd(__m128d a, __m128d b)
__m128d _mm_loadh_pd(__m128d a, double const *dp)
__m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
__m128i _mm_srli_epi64(__m128i a, int count)
__m128d _mm_setr_pd(double w, double x)
#define __INTRIN_INLINE_SSE2
__m128d _mm_cmpord_sd(__m128d a, __m128d b)
__m128i _mm_set1_epi64(__m64 q)
__m128d _mm_cmpunord_pd(__m128d a, __m128d b)
__m128i _mm_packs_epi32(__m128i a, __m128i b)
__m128d _mm_castps_pd(__m128 a)
void _mm_store1_pd(double *dp, __m128d a)
__m128i _mm_sad_epu8(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128d _mm_undefined_pd(void)
__INTRIN_INLINE_SSE2 void _mm_storeu_si32(void *p, __m128i b)
__m128d _mm_setzero_pd(void)
__m128 _mm_castsi128_ps(__m128i a)
__m128d _mm_cmpneq_pd(__m128d a, __m128d b)
void _mm_storel_epi64(__m128i_u *p, __m128i a)
__m128i _mm_packus_epi16(__m128i a, __m128i b)
__m128d _mm_set_sd(double w)
__m128 _mm_cvtepi32_ps(__m128i a)
__m128i _mm_adds_epu8(__m128i a, __m128i b)
__m128i _mm_sub_epi32(__m128i a, __m128i b)
__m128i _mm_castpd_si128(__m128d a)
__m128i _mm_add_epi16(__m128i a, __m128i b)
__m128d _mm_cmpnge_pd(__m128d a, __m128d b)
int _mm_comige_sd(__m128d a, __m128d b)
__m128d _mm_cmpge_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 void _mm_storeu_si16(void *p, __m128i b)
__m128d _mm_loadr_pd(double const *dp)
__m128i _mm_mulhi_epu16(__m128i a, __m128i b)
__m128i _mm_slli_epi32(__m128i a, int count)
__m128i _mm_load_si128(__m128i const *p)
__m128d _mm_max_sd(__m128d a, __m128d b)
__m128d _mm_and_pd(__m128d a, __m128d b)
__m128i _mm_mul_epu32(__m128i a, __m128i b)
__m128i _mm_cvttpd_epi32(__m128d a)
int _mm_ucomige_sd(__m128d a, __m128d b)
__m128d _mm_sub_pd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
__m128d _mm_load1_pd(double const *dp)
__m128d _mm_load_pd(double const *dp)
__m128d _mm_min_pd(__m128d a, __m128d b)
__m128i _mm_sll_epi32(__m128i a, __m128i count)
__m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
__m64 _mm_sub_si64(__m64 a, __m64 b)
void _mm_stream_si32(int *p, int a)
__m128i _mm_subs_epu8(__m128i a, __m128i b)
__m128i _mm_srl_epi32(__m128i a, __m128i count)
__m128i _mm_mulhi_epi16(__m128i a, __m128i b)
#define _mm_shuffle_pd(a, b, i)
void _mm_stream_si128(__m128i *p, __m128i a)
__m128d _mm_or_pd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
__m128d _mm_cmpge_sd(__m128d a, __m128d b)
__m128d _mm_mul_sd(__m128d a, __m128d b)
__m128i _mm_set1_epi8(char b)
__m128i _mm_sra_epi32(__m128i a, __m128i count)
__m128d _mm_sqrt_sd(__m128d a, __m128d b)
__m128i _mm_srai_epi32(__m128i a, int count)
__m128i _mm_cvtsi32_si128(int a)
__m128i _mm_slli_epi16(__m128i a, int count)
__m128d _mm_cmpeq_pd(__m128d a, __m128d b)
__m128i _mm_subs_epi8(__m128i a, __m128i b)
void _mm_storer_pd(double *dp, __m128d a)
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
__INTRIN_INLINE_SSE2 __m128i _mm_cvtsi64_si128(long long a)
int _mm_movemask_pd(__m128d a)
__m128i _mm_setr_epi64(__m64 q0, __m64 q1)
__m128i _mm_sub_epi64(__m128i a, __m128i b)
__m128d _mm_move_sd(__m128d a, __m128d b)
__m128i _mm_min_epu8(__m128i a, __m128i b)
__m64 _mm_add_si64(__m64 a, __m64 b)
__m128d _mm_cvtpi32_pd(__m64 a)
__INTRIN_INLINE_SSE2 __m128i _mm_set_epi64x(long long q1, long long q0)
__m128d _mm_unpackhi_pd(__m128d a, __m128d b)
int _mm_cvttsd_si32(__m128d a)
__m128d _mm_cmpnle_pd(__m128d a, __m128d b)
__m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
__m128d _mm_cmpnle_sd(__m128d a, __m128d b)
__m128i _mm_add_epi8(__m128i a, __m128i b)
__m128d _mm_cmple_sd(__m128d a, __m128d b)
__m128d _mm_cmple_pd(__m128d a, __m128d b)
__m128i _mm_cmplt_epi32(__m128i a, __m128i b)
#define _mm_insert_epi16(a, b, imm)
__m128d _mm_loadu_pd(double const *dp)
__m128i _mm_avg_epu16(__m128i a, __m128i b)
__m64 _mm_mul_su32(__m64 a, __m64 b)
__m128d _mm_cvtss_sd(__m128d a, __m128 b)
int _mm_ucomieq_sd(__m128d a, __m128d b)
__m128i _mm_setr_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
__m128i _mm_sll_epi64(__m128i a, __m128i count)
__m128d _mm_cmpngt_sd(__m128d a, __m128d b)
void _mm_storel_pd(double *dp, __m128d a)
__m128d _mm_cmplt_sd(__m128d a, __m128d b)
__m128d _mm_cvtsi32_sd(__m128d a, int b)
__m128i _mm_or_si128(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 long long _mm_cvtsi128_si64(__m128i a)
__m128i _mm_cmplt_epi16(__m128i a, __m128i b)
__m128i _mm_subs_epi16(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si64(void const *a)
__m128d _mm_cmpngt_pd(__m128d a, __m128d b)
#define _mm_extract_epi16(a, imm)
double _mm_cvtsd_f64(__m128d a)
#define _mm_shufflelo_epi16(a, imm)
__m128i _mm_packs_epi16(__m128i a, __m128i b)
__INTRIN_INLINE_SSE2 __m128i _mm_set1_epi64x(long long q)
__m128d _mm_min_sd(__m128d a, __m128d b)
void _mm_store_pd(double *dp, __m128d a)
__m128i _mm_srli_epi16(__m128i a, int count)
__m128i _mm_sub_epi8(__m128i a, __m128i b)
__m128d _mm_castsi128_pd(__m128i a)
__m128d _mm_cmpord_pd(__m128d a, __m128d b)
void _mm_storeh_pd(double *dp, __m128d a)
__m128d _mm_mul_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si32(void const *a)
__m64 _mm_cvttpd_pi32(__m128d a)
__m128i _mm_sll_epi16(__m128i a, __m128i count)
__m128d _mm_sqrt_pd(__m128d a)
__m64 _mm_cvtpd_pi32(__m128d a)
__m128i _mm_mullo_epi16(__m128i a, __m128i b)
__m128i _mm_sra_epi16(__m128i a, __m128i count)
int _mm_comieq_sd(__m128d a, __m128d b)
__m128i _mm_cvttps_epi32(__m128 a)
__m128d _mm_load_sd(double const *dp)
__m64 _mm_movepi64_pi64(__m128i a)
void _mm_maskmoveu_si128(__m128i d, __m128i n, _Out_writes_bytes_(16) char *p)
int _mm_movemask_epi8(__m128i a)
__m128i _mm_madd_epi16(__m128i a, __m128i b)
__m128d _mm_cmpgt_sd(__m128d a, __m128d b)
int _mm_ucomilt_sd(__m128d a, __m128d b)
__m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
__m128i _mm_srai_epi16(__m128i a, int count)
__m128i _mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
__m128i _mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
int _mm_ucomineq_sd(__m128d a, __m128d b)
__m128i _mm_cvtps_epi32(__m128 a)
__m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
__m128d _mm_andnot_pd(__m128d a, __m128d b)
__m128d _mm_loadl_pd(__m128d a, double const *dp)
__m128d _mm_cmpneq_sd(__m128d a, __m128d b)
__m128i _mm_min_epi16(__m128i a, __m128i b)
__m128 _mm_cvtsd_ss(__m128 a, __m128d b)
__m128i _mm_andnot_si128(__m128i a, __m128i b)
__m128i _mm_and_si128(__m128i a, __m128i b)
__m128i _mm_setl_epi64(__m128i q)
void _mm_stream_pd(double *p, __m128d a)
__m128 _mm_castpd_ps(__m128d a)
__INTRIN_INLINE_SSE2 __m128i _mm_undefined_si128(void)
int _mm_comineq_sd(__m128d a, __m128d b)
__m128i _mm_avg_epu8(__m128i a, __m128i b)
int _mm_ucomigt_sd(__m128d a, __m128d b)
__m128i _mm_adds_epi16(__m128i a, __m128i b)
__m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
__m128i _mm_srli_si128(__m128i a, int imm)
__m128i _mm_adds_epi8(__m128i a, __m128i b)
int _mm_comilt_sd(__m128d a, __m128d b)
__m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
__m128i _mm_cmplt_epi8(__m128i a, __m128i b)
#define _mm_shufflehi_epi16(a, imm)
__m128d _mm_cvtepi32_pd(__m128i a)
__m128i _mm_max_epi16(__m128i a, __m128i b)
__m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
__m128d _mm_xor_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 void _mm_storeu_si64(void *p, __m128i b)
void _mm_clflush(void const *p)
__m128d _mm_cvtps_pd(__m128 a)
__m128d _mm_cmpgt_pd(__m128d a, __m128d b)
__INTRIN_INLINE_SSE2 __m128i _mm_loadu_si16(void const *a)
void _mm_store_sd(double *dp, __m128d a)
__m128d _mm_set_pd(double w, double x)
__m128i _mm_srli_epi32(__m128i a, int count)
int _mm_comigt_sd(__m128d a, __m128d b)
__m128i _mm_set_epi64(__m64 q1, __m64 q0)
__m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
__m128d _mm_cmplt_pd(__m128d a, __m128d b)
__m128i _mm_add_epi32(__m128i a, __m128i b)
__m128i _mm_sub_epi16(__m128i a, __m128i b)
__m128i _mm_loadl_epi64(__m128i_u const *p)
__m128i _mm_add_epi64(__m128i a, __m128i b)
__m128d _mm_div_sd(__m128d a, __m128d b)
__m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
#define _mm_shuffle_epi32(a, imm)
__m128d _mm_set1_pd(double w)
__m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
__m128d _mm_div_pd(__m128d a, __m128d b)
__m128i _mm_srl_epi16(__m128i a, __m128i count)
__m128d _mm_max_pd(__m128d a, __m128d b)
__m128d _mm_cmpunord_sd(__m128d a, __m128d b)
__m128d _mm_unpacklo_pd(__m128d a, __m128d b)
__m128i _mm_subs_epu16(__m128i a, __m128i b)
__m128i _mm_loadu_si128(__m128i_u const *p)
__m128i _mm_set1_epi32(int i)
__m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
__m128i _mm_max_epu8(__m128i a, __m128i b)
void __declspec(noinline) __cdecl _free_base(void *const block)
GLint GLint GLint GLint GLint x
GLuint GLuint GLsizei count
GLdouble GLdouble GLdouble GLdouble q
GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint GLdouble GLdouble w2
GLboolean GLboolean GLboolean b
GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint GLdouble w1
GLboolean GLboolean GLboolean GLboolean a
GLubyte GLubyte GLubyte GLubyte w
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble * u
static CRYPT_DATA_BLOB b4
static CRYPT_DATA_BLOB b3[]
static CRYPT_DATA_BLOB b2[]
static CRYPT_DATA_BLOB b1[]
#define _Out_writes_bytes_(s)
#define _STATIC_ASSERT(expr)