; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE,SSE_SLOW,SSE3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSE_FAST,SSE3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSE_SLOW,SSSE3,SSSE3_SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSE_FAST,SSSE3,SSSE3_FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1_SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1_FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2_SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops | FileCheck %s --check-prefixes=AVX,AVX2,AVX2_FAST ; The next 8 tests check for matching the horizontal op and eliminating the shuffle. ; PR34111 - https://bugs.llvm.org/show_bug.cgi?id=34111 define <4 x float> @hadd_v4f32(<4 x float> %a) { ; SSE-LABEL: hadd_v4f32: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: hadd_v4f32: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2> %a13 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 1, i32 3> %hop = fadd <2 x float> %a02, %a13 %shuf = shufflevector <2 x float> %hop, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1> ret <4 x float> %shuf } define <8 x float> @hadd_v8f32a(<8 x float> %a) { ; SSE_SLOW-LABEL: hadd_v8f32a: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movaps %xmm0, %xmm2 ; SSE_SLOW-NEXT: haddps %xmm1, %xmm2 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] ; SSE_SLOW-NEXT: movaps %xmm2, %xmm1 ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v8f32a: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: movaps %xmm0, %xmm2 ; SSE_FAST-NEXT: haddps %xmm1, %xmm2 ; SSE_FAST-NEXT: haddps %xmm0, %xmm0 ; SSE_FAST-NEXT: movaps %xmm2, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v8f32a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v8f32a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 ; AVX1_FAST-NEXT: vhaddps %ymm0, %ymm1, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hadd_v8f32a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hop = fadd <4 x float> %a0, %a1 %shuf = shufflevector <4 x float> %hop, <4 x float> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 2, i32 3> ret <8 x float> %shuf } define <8 x float> @hadd_v8f32b(<8 x float> %a) { ; SSE-LABEL: hadd_v8f32b: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm0, %xmm0 ; SSE-NEXT: haddps %xmm1, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: hadd_v8f32b: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 4, i32 6, i32 undef, i32 undef> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 1, i32 3, i32 undef, i32 undef, i32 5, i32 7, i32 undef, i32 undef> %hop = fadd <8 x float> %a0, %a1 %shuf = shufflevector <8 x float> %hop, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x float> %shuf } define <4 x float> @hsub_v4f32(<4 x float> %a) { ; SSE-LABEL: hsub_v4f32: ; SSE: # %bb.0: ; SSE-NEXT: hsubps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: hsub_v4f32: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a02 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 2> %a13 = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 1, i32 3> %hop = fsub <2 x float> %a02, %a13 %shuf = shufflevector <2 x float> %hop, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1> ret <4 x float> %shuf } define <8 x float> @hsub_v8f32a(<8 x float> %a) { ; SSE_SLOW-LABEL: hsub_v8f32a: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movaps %xmm0, %xmm2 ; SSE_SLOW-NEXT: hsubps %xmm1, %xmm2 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] ; SSE_SLOW-NEXT: movaps %xmm2, %xmm1 ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hsub_v8f32a: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: movaps %xmm0, %xmm2 ; SSE_FAST-NEXT: hsubps %xmm1, %xmm2 ; SSE_FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE_FAST-NEXT: movaps %xmm2, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hsub_v8f32a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vhsubps %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hsub_v8f32a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 ; AVX1_FAST-NEXT: vhsubps %ymm0, %ymm1, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hsub_v8f32a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vhsubps %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hop = fsub <4 x float> %a0, %a1 %shuf = shufflevector <4 x float> %hop, <4 x float> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 2, i32 3> ret <8 x float> %shuf } define <8 x float> @hsub_v8f32b(<8 x float> %a) { ; SSE-LABEL: hsub_v8f32b: ; SSE: # %bb.0: ; SSE-NEXT: hsubps %xmm0, %xmm0 ; SSE-NEXT: hsubps %xmm1, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: hsub_v8f32b: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 4, i32 6, i32 undef, i32 undef> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 1, i32 3, i32 undef, i32 undef, i32 5, i32 7, i32 undef, i32 undef> %hop = fsub <8 x float> %a0, %a1 %shuf = shufflevector <8 x float> %hop, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x float> %shuf } define <2 x double> @hadd_v2f64(<2 x double> %a) { ; SSE_SLOW-LABEL: hadd_v2f64: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE_SLOW-NEXT: addsd %xmm0, %xmm1 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v2f64: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v2f64: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX1_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v2f64: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: retq ; ; AVX2_SLOW-LABEL: hadd_v2f64: ; AVX2_SLOW: # %bb.0: ; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX2_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2_SLOW-NEXT: retq ; ; AVX2_FAST-LABEL: hadd_v2f64: ; AVX2_FAST: # %bb.0: ; AVX2_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX2_FAST-NEXT: retq %a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef> %a1 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 undef> %hop = fadd <2 x double> %a0, %a1 %shuf = shufflevector <2 x double> %hop, <2 x double> undef, <2 x i32> <i32 0, i32 0> ret <2 x double> %shuf } define <2 x double> @hadd_v2f64_scalar_splat(<2 x double> %a) { ; SSE_SLOW-LABEL: hadd_v2f64_scalar_splat: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE_SLOW-NEXT: addsd %xmm0, %xmm1 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v2f64_scalar_splat: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v2f64_scalar_splat: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX1_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v2f64_scalar_splat: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: retq ; ; AVX2_SLOW-LABEL: hadd_v2f64_scalar_splat: ; AVX2_SLOW: # %bb.0: ; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX2_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2_SLOW-NEXT: retq ; ; AVX2_FAST-LABEL: hadd_v2f64_scalar_splat: ; AVX2_FAST: # %bb.0: ; AVX2_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX2_FAST-NEXT: retq %a0 = extractelement <2 x double> %a, i32 0 %a1 = extractelement <2 x double> %a, i32 1 %hop = fadd double %a0, %a1 %ins = insertelement <2 x double> undef, double %hop, i32 0 %shuf = shufflevector <2 x double> %ins, <2 x double> undef, <2 x i32> <i32 0, i32 0> ret <2 x double> %shuf } define <4 x double> @hadd_v4f64_scalar_splat(<4 x double> %a) { ; SSE_SLOW-LABEL: hadd_v4f64_scalar_splat: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm2 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE_SLOW-NEXT: addsd %xmm0, %xmm2 ; SSE_SLOW-NEXT: movapd %xmm1, %xmm3 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] ; SSE_SLOW-NEXT: addsd %xmm1, %xmm3 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm3[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v4f64_scalar_splat: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE_FAST-NEXT: haddpd %xmm1, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX-LABEL: hadd_v4f64_scalar_splat: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a0 = extractelement <4 x double> %a, i32 0 %a1 = extractelement <4 x double> %a, i32 1 %hop0 = fadd double %a0, %a1 %a2 = extractelement <4 x double> %a, i32 2 %a3 = extractelement <4 x double> %a, i32 3 %hop1 = fadd double %a2, %a3 %ins = insertelement <4 x double> undef, double %hop0, i32 0 %ins2 = insertelement <4 x double> %ins, double %hop1, i32 2 %shuf = shufflevector <4 x double> %ins2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> ret <4 x double> %shuf } define <4 x double> @hadd_v4f64_scalar_broadcast(<4 x double> %a) { ; SSE_SLOW-LABEL: hadd_v4f64_scalar_broadcast: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE_SLOW-NEXT: addsd %xmm0, %xmm1 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] ; SSE_SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v4f64_scalar_broadcast: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE_FAST-NEXT: movapd %xmm0, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v4f64_scalar_broadcast: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX1_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v4f64_scalar_broadcast: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2_SLOW-LABEL: hadd_v4f64_scalar_broadcast: ; AVX2_SLOW: # %bb.0: ; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX2_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX2_SLOW-NEXT: vbroadcastsd %xmm0, %ymm0 ; AVX2_SLOW-NEXT: retq ; ; AVX2_FAST-LABEL: hadd_v4f64_scalar_broadcast: ; AVX2_FAST: # %bb.0: ; AVX2_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX2_FAST-NEXT: vbroadcastsd %xmm0, %ymm0 ; AVX2_FAST-NEXT: retq %a0 = extractelement <4 x double> %a, i32 0 %a1 = extractelement <4 x double> %a, i32 1 %hop0 = fadd double %a0, %a1 %a2 = extractelement <4 x double> %a, i32 2 %a3 = extractelement <4 x double> %a, i32 3 %hop1 = fadd double %a2, %a3 %ins = insertelement <4 x double> undef, double %hop0, i32 0 %ins2 = insertelement <4 x double> %ins, double %hop1, i32 2 %shuf = shufflevector <4 x double> %ins2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0> ret <4 x double> %shuf } define <4 x double> @hadd_v4f64(<4 x double> %a) { ; SSE_SLOW-LABEL: hadd_v4f64: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm2 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE_SLOW-NEXT: addsd %xmm0, %xmm2 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] ; SSE_SLOW-NEXT: movapd %xmm1, %xmm2 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE_SLOW-NEXT: addsd %xmm1, %xmm2 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm2[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hadd_v4f64: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE_FAST-NEXT: haddpd %xmm1, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX-LABEL: hadd_v4f64: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef> %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef> %hop = fadd <4 x double> %a0, %a1 %shuf = shufflevector <4 x double> %hop, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> ret <4 x double> %shuf } define <2 x double> @hsub_v2f64(<2 x double> %a) { ; SSE_SLOW-LABEL: hsub_v2f64: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE_SLOW-NEXT: subsd %xmm1, %xmm0 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hsub_v2f64: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hsub_v2f64: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX1_SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hsub_v2f64: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: retq ; ; AVX2_SLOW-LABEL: hsub_v2f64: ; AVX2_SLOW: # %bb.0: ; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX2_SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX2_SLOW-NEXT: retq ; ; AVX2_FAST-LABEL: hsub_v2f64: ; AVX2_FAST: # %bb.0: ; AVX2_FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX2_FAST-NEXT: retq %a0 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 undef> %a1 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 undef> %hop = fsub <2 x double> %a0, %a1 %shuf = shufflevector <2 x double> %hop, <2 x double> undef, <2 x i32> <i32 undef, i32 0> ret <2 x double> %shuf } define <4 x double> @hsub_v4f64(<4 x double> %a) { ; SSE_SLOW-LABEL: hsub_v4f64: ; SSE_SLOW: # %bb.0: ; SSE_SLOW-NEXT: movapd %xmm0, %xmm2 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE_SLOW-NEXT: subsd %xmm2, %xmm0 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] ; SSE_SLOW-NEXT: movapd %xmm1, %xmm2 ; SSE_SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE_SLOW-NEXT: subsd %xmm2, %xmm1 ; SSE_SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0] ; SSE_SLOW-NEXT: retq ; ; SSE_FAST-LABEL: hsub_v4f64: ; SSE_FAST: # %bb.0: ; SSE_FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE_FAST-NEXT: hsubpd %xmm1, %xmm1 ; SSE_FAST-NEXT: retq ; ; AVX-LABEL: hsub_v4f64: ; AVX: # %bb.0: ; AVX-NEXT: vhsubpd %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef> %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef> %hop = fsub <4 x double> %a0, %a1 %shuf = shufflevector <4 x double> %hop, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> ret <4 x double> %shuf } define <4 x i32> @hadd_v4i32(<4 x i32> %a) { ; SSE3-LABEL: hadd_v4i32: ; SSE3: # %bb.0: ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,2] ; SSE3-NEXT: paddd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_v4i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddd %xmm0, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: hadd_v4i32: ; AVX: # %bb.0: ; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> %a13 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef> %hop = add <4 x i32> %a02, %a13 %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 1> ret <4 x i32> %shuf } define <8 x i32> @hadd_v8i32a(<8 x i32> %a) { ; SSE3-LABEL: hadd_v8i32a: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm2 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE3-NEXT: paddd %xmm0, %xmm2 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3_SLOW-LABEL: hadd_v8i32a: ; SSSE3_SLOW: # %bb.0: ; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_SLOW-NEXT: phaddd %xmm1, %xmm2 ; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_SLOW-NEXT: retq ; ; SSSE3_FAST-LABEL: hadd_v8i32a: ; SSSE3_FAST: # %bb.0: ; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_FAST-NEXT: phaddd %xmm1, %xmm2 ; SSSE3_FAST-NEXT: phaddd %xmm0, %xmm0 ; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v8i32a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v8i32a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm1 ; AVX1_FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hadd_v8i32a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hop = add <4 x i32> %a0, %a1 %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 2, i32 3> ret <8 x i32> %shuf } define <8 x i32> @hadd_v8i32b(<8 x i32> %a) { ; SSE3-LABEL: hadd_v8i32b: ; SSE3: # %bb.0: ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,1,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE3-NEXT: paddd %xmm2, %xmm0 ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,1,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,0,2] ; SSE3-NEXT: paddd %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_v8i32b: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddd %xmm0, %xmm0 ; SSSE3-NEXT: phaddd %xmm1, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hadd_v8i32b: ; AVX1: # %bb.0: ; AVX1-NEXT: vphaddd %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vphaddd %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_v8i32b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 4, i32 6, i32 undef, i32 undef> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 undef, i32 undef, i32 5, i32 7, i32 undef, i32 undef> %hop = add <8 x i32> %a0, %a1 %shuf = shufflevector <8 x i32> %hop, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x i32> %shuf } define <4 x i32> @hsub_v4i32(<4 x i32> %a) { ; SSE3-LABEL: hsub_v4i32: ; SSE3: # %bb.0: ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,3,1,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,3] ; SSE3-NEXT: psubd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_v4i32: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phsubd %xmm0, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: hsub_v4i32: ; AVX: # %bb.0: ; AVX-NEXT: vphsubd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a02 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 undef, i32 undef> %a13 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 undef, i32 undef> %hop = sub <4 x i32> %a02, %a13 %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <4 x i32> <i32 undef, i32 1, i32 0, i32 undef> ret <4 x i32> %shuf } define <8 x i32> @hsub_v8i32a(<8 x i32> %a) { ; SSE3-LABEL: hsub_v8i32a: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm2 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE3-NEXT: psubd %xmm0, %xmm2 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3_SLOW-LABEL: hsub_v8i32a: ; SSSE3_SLOW: # %bb.0: ; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_SLOW-NEXT: phsubd %xmm1, %xmm2 ; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_SLOW-NEXT: retq ; ; SSSE3_FAST-LABEL: hsub_v8i32a: ; SSSE3_FAST: # %bb.0: ; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_FAST-NEXT: phsubd %xmm1, %xmm2 ; SSSE3_FAST-NEXT: phsubd %xmm0, %xmm0 ; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hsub_v8i32a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vphsubd %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hsub_v8i32a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_FAST-NEXT: vphsubd %xmm1, %xmm0, %xmm1 ; AVX1_FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hsub_v8i32a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphsubd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hop = sub <4 x i32> %a0, %a1 %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <8 x i32> <i32 undef, i32 undef, i32 0, i32 1, i32 undef, i32 undef, i32 2, i32 3> ret <8 x i32> %shuf } define <8 x i32> @hsub_v8i32b(<8 x i32> %a) { ; SSE3-LABEL: hsub_v8i32b: ; SSE3: # %bb.0: ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,1,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; SSE3-NEXT: psubd %xmm2, %xmm0 ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,1,3] ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,0,2] ; SSE3-NEXT: psubd %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_v8i32b: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phsubd %xmm0, %xmm0 ; SSSE3-NEXT: phsubd %xmm1, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hsub_v8i32b: ; AVX1: # %bb.0: ; AVX1-NEXT: vphsubd %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vphsubd %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_v8i32b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphsubd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 4, i32 6, i32 undef, i32 undef> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 1, i32 3, i32 undef, i32 undef, i32 5, i32 7, i32 undef, i32 undef> %hop = sub <8 x i32> %a0, %a1 %shuf = shufflevector <8 x i32> %hop, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x i32> %shuf } define <8 x i16> @hadd_v8i16(<8 x i16> %a) { ; SSE3-LABEL: hadd_v8i16: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] ; SSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] ; SSE3-NEXT: paddw %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_v8i16: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddw %xmm0, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: hadd_v8i16: ; AVX: # %bb.0: ; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef> %a1357 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 undef, i32 undef> %hop = add <8 x i16> %a0246, %a1357 %shuf = shufflevector <8 x i16> %hop, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3> ret <8 x i16> %shuf } define <16 x i16> @hadd_v16i16a(<16 x i16> %a) { ; SSE3-LABEL: hadd_v16i16a: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE3-NEXT: psrad $16, %xmm1 ; SSE3-NEXT: psrad $16, %xmm0 ; SSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE3-NEXT: paddw %xmm0, %xmm2 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3_SLOW-LABEL: hadd_v16i16a: ; SSSE3_SLOW: # %bb.0: ; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_SLOW-NEXT: phaddw %xmm1, %xmm2 ; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_SLOW-NEXT: retq ; ; SSSE3_FAST-LABEL: hadd_v16i16a: ; SSSE3_FAST: # %bb.0: ; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_FAST-NEXT: phaddw %xmm1, %xmm2 ; SSSE3_FAST-NEXT: phaddw %xmm0, %xmm0 ; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hadd_v16i16a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vphaddw %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hadd_v16i16a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_FAST-NEXT: vphaddw %xmm1, %xmm0, %xmm1 ; AVX1_FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hadd_v16i16a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphaddw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> %a1 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> %hop = add <8 x i16> %a0, %a1 %shuf = shufflevector <8 x i16> %hop, <8 x i16> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 7> ret <16 x i16> %shuf } define <16 x i16> @hadd_v16i16b(<16 x i16> %a) { ; SSE3-LABEL: hadd_v16i16b: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[3,1,1,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,5,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,7,5,4] ; SSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,4] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,4,5] ; SSE3-NEXT: paddw %xmm2, %xmm0 ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[3,1,1,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,5,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,7,5,4] ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,0,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,4] ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,4,5] ; SSE3-NEXT: paddw %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_v16i16b: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddw %xmm0, %xmm0 ; SSSE3-NEXT: phaddw %xmm1, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hadd_v16i16b: ; AVX1: # %bb.0: ; AVX1-NEXT: vphaddw %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vphaddw %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_v16i16b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphaddw %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: retq %a0 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14, i32 undef, i32 undef, i32 undef, i32 undef> %a1 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 9, i32 11, i32 13, i32 15, i32 undef, i32 undef, i32 undef, i32 undef> %hop = add <16 x i16> %a0, %a1 %shuf = shufflevector <16 x i16> %hop, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11> ret <16 x i16> %shuf } define <8 x i16> @hsub_v8i16(<8 x i16> %a) { ; SSE3-LABEL: hsub_v8i16: ; SSE3: # %bb.0: ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3] ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[1,1,3,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,6] ; SSE3-NEXT: psubw %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_v8i16: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phsubw %xmm0, %xmm0 ; SSSE3-NEXT: retq ; ; AVX-LABEL: hsub_v8i16: ; AVX: # %bb.0: ; AVX-NEXT: vphsubw %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a0246 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef> %a1357 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 undef, i32 undef> %hop = sub <8 x i16> %a0246, %a1357 %shuf = shufflevector <8 x i16> %hop, <8 x i16> undef, <8 x i32> <i32 0, i32 undef, i32 2, i32 undef, i32 undef, i32 1, i32 undef, i32 3> ret <8 x i16> %shuf } define <16 x i16> @hsub_v16i16a(<16 x i16> %a) { ; SSE3-LABEL: hsub_v16i16a: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE3-NEXT: psrad $16, %xmm1 ; SSE3-NEXT: psrad $16, %xmm0 ; SSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE3-NEXT: psubw %xmm0, %xmm2 ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3_SLOW-LABEL: hsub_v16i16a: ; SSSE3_SLOW: # %bb.0: ; SSSE3_SLOW-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_SLOW-NEXT: phsubw %xmm1, %xmm2 ; SSSE3_SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] ; SSSE3_SLOW-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_SLOW-NEXT: retq ; ; SSSE3_FAST-LABEL: hsub_v16i16a: ; SSSE3_FAST: # %bb.0: ; SSSE3_FAST-NEXT: movdqa %xmm0, %xmm2 ; SSSE3_FAST-NEXT: phsubw %xmm1, %xmm2 ; SSSE3_FAST-NEXT: phsubw %xmm0, %xmm0 ; SSSE3_FAST-NEXT: movdqa %xmm2, %xmm1 ; SSSE3_FAST-NEXT: retq ; ; AVX1_SLOW-LABEL: hsub_v16i16a: ; AVX1_SLOW: # %bb.0: ; AVX1_SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_SLOW-NEXT: vphsubw %xmm1, %xmm0, %xmm0 ; AVX1_SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] ; AVX1_SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1_SLOW-NEXT: retq ; ; AVX1_FAST-LABEL: hsub_v16i16a: ; AVX1_FAST: # %bb.0: ; AVX1_FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1_FAST-NEXT: vphsubw %xmm1, %xmm0, %xmm1 ; AVX1_FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0 ; AVX1_FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1_FAST-NEXT: retq ; ; AVX2-LABEL: hsub_v16i16a: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphsubw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] ; AVX2-NEXT: retq %a0 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> %a1 = shufflevector <16 x i16> %a, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> %hop = sub <8 x i16> %a0, %a1 %shuf = shufflevector <8 x i16> %hop, <8 x i16> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 7> ret <16 x i16> %shuf } define <16 x i16> @hsub_v16i16b(<16 x i16> %a) { ; SSE3-LABEL: hsub_v16i16b: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[3,1,1,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,5,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,7,5,4] ; SSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,4] ; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,4,5] ; SSE3-NEXT: psubw %xmm2, %xmm0 ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[3,1,1,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,5,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,7,5,4] ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,0,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,4] ; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1] ; SSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,4,5] ; SSE3-NEXT: psubw %xmm2, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_v16i16b: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phsubw %xmm0, %xmm0 ; SSSE3-NEXT: phsubw %xmm1, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hsub_v16i16b: ; AVX1: # %bb.0: ; AVX1-NEXT: vphsubw %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vphsubw %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_v16i16b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphsubw %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: retq %a0 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14, i32 undef, i32 undef, i32 undef, i32 undef> %a1 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 9, i32 11, i32 13, i32 15, i32 undef, i32 undef, i32 undef, i32 undef> %hop = sub <16 x i16> %a0, %a1 %shuf = shufflevector <16 x i16> %hop, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11> ret <16 x i16> %shuf } define <4 x float> @broadcast_haddps_v4f32(<4 x float> %a0) { ; SSE-LABEL: broadcast_haddps_v4f32: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm0, %xmm0 ; SSE-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] ; SSE-NEXT: retq ; ; AVX1-LABEL: broadcast_haddps_v4f32: ; AVX1: # %bb.0: ; AVX1-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: broadcast_haddps_v4f32: ; AVX2: # %bb.0: ; AVX2-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a0) %2 = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer ret <4 x float> %2 } declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) define <4 x float> @PR34724_1(<4 x float> %a, <4 x float> %b) { ; SSE-LABEL: PR34724_1: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: PR34724_1: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %t0 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 2, i32 4> %t1 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 3, i32 5> %t2 = fadd <2 x float> %t0, %t1 %vecinit9 = shufflevector <2 x float> %t2, <2 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef> %t3 = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2> %t4 = fadd <4 x float> %t3, %b %vecinit13 = shufflevector <4 x float> %vecinit9, <4 x float> %t4, <4 x i32> <i32 undef, i32 1, i32 2, i32 7> ret <4 x float> %vecinit13 } define <4 x float> @PR34724_2(<4 x float> %a, <4 x float> %b) { ; SSE-LABEL: PR34724_2: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: PR34724_2: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %t0 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 4, i32 undef, i32 undef> %t1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 3, i32 5, i32 undef, i32 undef> %t2 = fadd <4 x float> %t0, %t1 %vecinit9 = shufflevector <4 x float> %t2, <4 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef> %t3 = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2> %t4 = fadd <4 x float> %t3, %b %vecinit13 = shufflevector <4 x float> %vecinit9, <4 x float> %t4, <4 x i32> <i32 undef, i32 1, i32 2, i32 7> ret <4 x float> %vecinit13 } ; ; fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X))) ; --> SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))). ; define <4 x float> @hadd_4f32_v8f32_shuffle(<8 x float> %a0) { ; SSE-LABEL: hadd_4f32_v8f32_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE-NEXT: retq ; ; AVX-LABEL: hadd_4f32_v8f32_shuffle: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %shuf256 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 2, i32 3, i32 6, i32 7, i32 6, i32 7> %lo = shufflevector <8 x float> %shuf256, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> %hi = shufflevector <8 x float> %shuf256, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %hadd0 = shufflevector <4 x float> %lo, <4 x float> %hi, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %hadd1 = shufflevector <4 x float> %lo, <4 x float> %hi, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hadd = fadd <4 x float> %hadd0, %hadd1 ret <4 x float> %hadd } define <4 x float> @hsub_4f32_v8f32_shuffle(<8 x float> %a0) { ; SSE-LABEL: hsub_4f32_v8f32_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE-NEXT: retq ; ; AVX-LABEL: hsub_4f32_v8f32_shuffle: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %shuf256 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 2, i32 3, i32 2, i32 3, i32 6, i32 7, i32 6, i32 7> %lo = shufflevector <8 x float> %shuf256, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> %hi = shufflevector <8 x float> %shuf256, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %hsub0 = shufflevector <4 x float> %lo, <4 x float> %hi, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %hsub1 = shufflevector <4 x float> %lo, <4 x float> %hi, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hsub = fadd <4 x float> %hsub0, %hsub1 ret <4 x float> %hsub } define <4 x i32> @hadd_4i32_v8i32_shuffle(<8 x i32> %a0) { ; SSE3-LABEL: hadd_4i32_v8i32_shuffle: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm2 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,2],xmm1[2,2] ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3] ; SSE3-NEXT: paddd %xmm2, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_4i32_v8i32_shuffle: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddd %xmm1, %xmm0 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hadd_4i32_v8i32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_4i32_v8i32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %shuf256 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 2, i32 3, i32 6, i32 7, i32 6, i32 7> %lo = shufflevector <8 x i32> %shuf256, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> %hi = shufflevector <8 x i32> %shuf256, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %hadd0 = shufflevector <4 x i32> %lo, <4 x i32> %hi, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %hadd1 = shufflevector <4 x i32> %lo, <4 x i32> %hi, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hadd = add <4 x i32> %hadd0, %hadd1 ret <4 x i32> %hadd } define <4 x i32> @hsub_4i32_v8i32_shuffle(<8 x i32> %a0) { ; SSE3-LABEL: hsub_4i32_v8i32_shuffle: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm2 ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,2],xmm1[2,2] ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3] ; SSE3-NEXT: paddd %xmm2, %xmm0 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_4i32_v8i32_shuffle: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddd %xmm1, %xmm0 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hsub_4i32_v8i32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_4i32_v8i32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %shuf256 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 2, i32 3, i32 6, i32 7, i32 6, i32 7> %lo = shufflevector <8 x i32> %shuf256, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> %hi = shufflevector <8 x i32> %shuf256, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %hsub0 = shufflevector <4 x i32> %lo, <4 x i32> %hi, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %hsub1 = shufflevector <4 x i32> %lo, <4 x i32> %hi, <4 x i32> <i32 1, i32 3, i32 5, i32 7> %hsub = add <4 x i32> %hsub0, %hsub1 ret <4 x i32> %hsub } ; ; fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) --> SHUFFLE(HOP(X,Y)). ; define <4 x double> @hadd_4f64_v4f64_shuffle(<4 x double> %a0, <4 x double> %a1) { ; SSE-LABEL: hadd_4f64_v4f64_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: haddpd %xmm1, %xmm0 ; SSE-NEXT: haddpd %xmm3, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: hadd_4f64_v4f64_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: vhaddpd %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_4f64_v4f64_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %shuf1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7> %hadd0 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 0, i32 4, i32 2, i32 6> %hadd1 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 1, i32 5, i32 3, i32 7> %hadd = fadd <4 x double> %hadd0, %hadd1 ret <4 x double> %hadd } define <4 x double> @hsub_4f64_v4f64_shuffle(<4 x double> %a0, <4 x double> %a1) { ; SSE-LABEL: hsub_4f64_v4f64_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: hsubpd %xmm1, %xmm0 ; SSE-NEXT: hsubpd %xmm3, %xmm2 ; SSE-NEXT: movapd %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: hsub_4f64_v4f64_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: vhsubpd %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_4f64_v4f64_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %shuf1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 2, i32 3, i32 6, i32 7> %hadd0 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 0, i32 4, i32 2, i32 6> %hadd1 = shufflevector <4 x double> %shuf0, <4 x double> %shuf1, <4 x i32> <i32 1, i32 5, i32 3, i32 7> %hadd = fsub <4 x double> %hadd0, %hadd1 ret <4 x double> %hadd } define <8 x float> @hadd_8f32_v8f32_shuffle(<8 x float> %a0, <8 x float> %a1) { ; SSE-LABEL: hadd_8f32_v8f32_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: haddps %xmm3, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: hadd_8f32_v8f32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: vhaddps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_8f32_v8f32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> %shuf1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15> %hadd0 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14> %hadd1 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15> %hadd = fadd <8 x float> %hadd0, %hadd1 ret <8 x float> %hadd } define <8 x float> @hsub_8f32_v8f32_shuffle(<8 x float> %a0, <8 x float> %a1) { ; SSE-LABEL: hsub_8f32_v8f32_shuffle: ; SSE: # %bb.0: ; SSE-NEXT: haddps %xmm1, %xmm0 ; SSE-NEXT: haddps %xmm3, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: hsub_8f32_v8f32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: vhaddps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_8f32_v8f32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vhaddps %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> %shuf1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15> %hsub0 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14> %hsub1 = shufflevector <8 x float> %shuf0, <8 x float> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15> %hsub = fadd <8 x float> %hsub0, %hsub1 ret <8 x float> %hsub } define <8 x i32> @hadd_8i32_v8i32_shuffle(<8 x i32> %a0, <8 x i32> %a1) { ; SSE3-LABEL: hadd_8i32_v8i32_shuffle: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm2, %xmm4 ; SSE3-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2] ; SSE3-NEXT: movaps %xmm0, %xmm5 ; SSE3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm1[0,2] ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] ; SSE3-NEXT: paddd %xmm2, %xmm4 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE3-NEXT: paddd %xmm5, %xmm0 ; SSE3-NEXT: movdqa %xmm4, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_8i32_v8i32_shuffle: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddd %xmm1, %xmm0 ; SSSE3-NEXT: phaddd %xmm3, %xmm2 ; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hadd_8i32_v8i32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vphaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vphaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_8i32_v8i32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vphaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> %shuf1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15> %hadd0 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14> %hadd1 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15> %hadd = add <8 x i32> %hadd0, %hadd1 ret <8 x i32> %hadd } define <8 x i32> @hsub_8i32_v8i32_shuffle(<8 x i32> %a0, <8 x i32> %a1) { ; SSE3-LABEL: hsub_8i32_v8i32_shuffle: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm2, %xmm4 ; SSE3-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2] ; SSE3-NEXT: movaps %xmm0, %xmm5 ; SSE3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm1[0,2] ; SSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3] ; SSE3-NEXT: psubd %xmm2, %xmm4 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE3-NEXT: psubd %xmm0, %xmm5 ; SSE3-NEXT: movdqa %xmm5, %xmm0 ; SSE3-NEXT: movdqa %xmm4, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hsub_8i32_v8i32_shuffle: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phsubd %xmm1, %xmm0 ; SSSE3-NEXT: phsubd %xmm3, %xmm2 ; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hsub_8i32_v8i32_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vphsubd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vphsubd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hsub_8i32_v8i32_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vphsubd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> %shuf1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15> %hadd0 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14> %hadd1 = shufflevector <8 x i32> %shuf0, <8 x i32> %shuf1, <8 x i32> <i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15> %hadd = sub <8 x i32> %hadd0, %hadd1 ret <8 x i32> %hadd } define <16 x i16> @hadd_16i16_16i16_shuffle(<16 x i16> %a0, <16 x i16> %a1) { ; SSE3-LABEL: hadd_16i16_16i16_shuffle: ; SSE3: # %bb.0: ; SSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,2,2,3] ; SSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0] ; SSE3-NEXT: pshuflw {{.*#+}} xmm5 = xmm1[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] ; SSE3-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7] ; SSE3-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,6,7] ; SSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0] ; SSE3-NEXT: psrad $16, %xmm3 ; SSE3-NEXT: psrad $16, %xmm2 ; SSE3-NEXT: packssdw %xmm3, %xmm2 ; SSE3-NEXT: paddw %xmm2, %xmm4 ; SSE3-NEXT: psrad $16, %xmm1 ; SSE3-NEXT: psrad $16, %xmm0 ; SSE3-NEXT: packssdw %xmm1, %xmm0 ; SSE3-NEXT: paddw %xmm6, %xmm0 ; SSE3-NEXT: movdqa %xmm4, %xmm1 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: hadd_16i16_16i16_shuffle: ; SSSE3: # %bb.0: ; SSSE3-NEXT: phaddw %xmm1, %xmm0 ; SSSE3-NEXT: phaddw %xmm3, %xmm2 ; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; ; AVX1-LABEL: hadd_16i16_16i16_shuffle: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vphaddw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vphaddw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: hadd_16i16_16i16_shuffle: ; AVX2: # %bb.0: ; AVX2-NEXT: vphaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-NEXT: retq %shuf0 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23> %shuf1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> %hadd0 = shufflevector <16 x i16> %shuf0, <16 x i16> %shuf1, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30> %hadd1 = shufflevector <16 x i16> %shuf0, <16 x i16> %shuf1, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31> %hadd = add <16 x i16> %hadd0, %hadd1 ret <16 x i16> %hadd }