; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512 ; ; vXf32 (accum) ; define float @test_v2f32(float %a0, <2 x float> %a1) { ; SSE2-LABEL: test_v2f32: ; SSE2: # %bb.0: ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v2f32: ; SSE41: # %bb.0: ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v2f32: ; AVX: # %bb.0: ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v2f32(float %a0, <2 x float> %a1) ret float %1 } define float @test_v4f32(float %a0, <4 x float> %a1) { ; SSE2-LABEL: test_v4f32: ; SSE2: # %bb.0: ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v4f32: ; SSE41: # %bb.0: ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm2 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v4f32: ; AVX: # %bb.0: ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v4f32(float %a0, <4 x float> %a1) ret float %1 } define float @test_v8f32(float %a0, <8 x float> %a1) { ; SSE2-LABEL: test_v8f32: ; SSE2: # %bb.0: ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm3 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm3 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v8f32: ; SSE41: # %bb.0: ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm3 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm2, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v8f32: ; AVX: # %bb.0: ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v8f32(float %a0, <8 x float> %a1) ret float %1 } define float @test_v16f32(float %a0, <16 x float> %a1) { ; SSE2-LABEL: test_v16f32: ; SSE2: # %bb.0: ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm5 ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm5, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm5 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1] ; SSE2-NEXT: mulss %xmm5, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: movaps %xmm4, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm4, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3,3,3] ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v16f32: ; SSE41: # %bb.0: ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm5, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm5 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1] ; SSE41-NEXT: mulss %xmm5, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm2, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm3, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm4[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm4, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3,3,3] ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v16f32: ; AVX: # %bb.0: ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v16f32(float %a0, <16 x float> %a1) ret float %1 } ; ; vXf32 (one) ; define float @test_v2f32_one(<2 x float> %a0) { ; SSE2-LABEL: test_v2f32_one: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v2f32_one: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v2f32_one: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f32_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v2f32(float 1.0, <2 x float> %a0) ret float %1 } define float @test_v4f32_one(<4 x float> %a0) { ; SSE2-LABEL: test_v4f32_one: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; SSE2-NEXT: mulss %xmm0, %xmm1 ; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE2-NEXT: mulss %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v4f32_one: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss %xmm0, %xmm1 ; SSE41-NEXT: movaps %xmm0, %xmm2 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE41-NEXT: mulss %xmm1, %xmm2 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v4f32_one: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f32_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a0) ret float %1 } define float @test_v8f32_one(<8 x float> %a0) { ; SSE2-LABEL: test_v8f32_one: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1] ; SSE2-NEXT: mulss %xmm0, %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm3 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] ; SSE2-NEXT: mulss %xmm2, %xmm3 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v8f32_one: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss %xmm0, %xmm2 ; SSE41-NEXT: movaps %xmm0, %xmm3 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] ; SSE41-NEXT: mulss %xmm2, %xmm3 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm2 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v8f32_one: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f32_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %a0) ret float %1 } define float @test_v16f32_one(<16 x float> %a0) { ; SSE2-LABEL: test_v16f32_one: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1] ; SSE2-NEXT: mulss %xmm0, %xmm4 ; SSE2-NEXT: movaps %xmm0, %xmm5 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] ; SSE2-NEXT: mulss %xmm4, %xmm5 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm5, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v16f32_one: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss %xmm0, %xmm4 ; SSE41-NEXT: movaps %xmm0, %xmm5 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] ; SSE41-NEXT: mulss %xmm4, %xmm5 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm5, %xmm0 ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm4 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm2, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm3, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v16f32_one: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulss %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm2, %xmm0 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f32_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v16f32(float 1.0, <16 x float> %a0) ret float %1 } ; ; vXf32 (undef) ; define float @test_v2f32_undef(<2 x float> %a0) { ; SSE2-LABEL: test_v2f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE2-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v2f32_undef: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v2f32_undef: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f32_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v2f32(float undef, <2 x float> %a0) ret float %1 } define float @test_v4f32_undef(<4 x float> %a0) { ; SSE2-LABEL: test_v4f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; SSE2-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE2-NEXT: mulss %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v4f32_undef: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: movaps %xmm0, %xmm2 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE41-NEXT: mulss %xmm1, %xmm2 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v4f32_undef: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f32_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v4f32(float undef, <4 x float> %a0) ret float %1 } define float @test_v8f32_undef(<8 x float> %a0) { ; SSE2-LABEL: test_v8f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1] ; SSE2-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm3 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] ; SSE2-NEXT: mulss %xmm2, %xmm3 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v8f32_undef: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: movaps %xmm0, %xmm3 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1] ; SSE41-NEXT: mulss %xmm2, %xmm3 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm2 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v8f32_undef: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f32_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v8f32(float undef, <8 x float> %a0) ret float %1 } define float @test_v16f32_undef(<16 x float> %a0) { ; SSE2-LABEL: test_v16f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1] ; SSE2-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 ; SSE2-NEXT: movaps %xmm0, %xmm5 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] ; SSE2-NEXT: mulss %xmm4, %xmm5 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE2-NEXT: mulss %xmm5, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1] ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE2-NEXT: mulss %xmm4, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm2, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[1,1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm3, %xmm1 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v16f32_undef: ; SSE41: # %bb.0: ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE41-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 ; SSE41-NEXT: movaps %xmm0, %xmm5 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] ; SSE41-NEXT: mulss %xmm4, %xmm5 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-NEXT: mulss %xmm5, %xmm0 ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3] ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: movaps %xmm1, %xmm4 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1] ; SSE41-NEXT: mulss %xmm4, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm2, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; SSE41-NEXT: mulss %xmm2, %xmm0 ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: movaps %xmm3, %xmm1 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE41-NEXT: mulss %xmm1, %xmm0 ; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE41-NEXT: mulss %xmm3, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v16f32_undef: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulss %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vmulss %xmm0, %xmm2, %xmm0 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f32_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2 ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] ; AVX512-NEXT: vmulss %xmm3, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call float @llvm.vector.reduce.fmul.f32.v16f32(float undef, <16 x float> %a0) ret float %1 } ; ; vXf64 (accum) ; define double @test_v2f64(double %a0, <2 x double> %a1) { ; SSE-LABEL: test_v2f64: ; SSE: # %bb.0: ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64: ; AVX: # %bb.0: ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v2f64(double %a0, <2 x double> %a1) ret double %1 } define double @test_v4f64(double %a0, <4 x double> %a1) { ; SSE-LABEL: test_v4f64: ; SSE: # %bb.0: ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f64: ; AVX: # %bb.0: ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v4f64(double %a0, <4 x double> %a1) ret double %1 } define double @test_v8f64(double %a0, <8 x double> %a1) { ; SSE-LABEL: test_v8f64: ; SSE: # %bb.0: ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v8f64: ; AVX: # %bb.0: ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v8f64(double %a0, <8 x double> %a1) ret double %1 } define double @test_v16f64(double %a0, <16 x double> %a1) { ; SSE2-LABEL: test_v16f64: ; SSE2: # %bb.0: ; SSE2-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: mulsd %xmm1, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE2-NEXT: mulsd %xmm1, %xmm0 ; SSE2-NEXT: mulsd %xmm2, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE2-NEXT: mulsd %xmm2, %xmm0 ; SSE2-NEXT: mulsd %xmm3, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE2-NEXT: mulsd %xmm3, %xmm0 ; SSE2-NEXT: mulsd %xmm4, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE2-NEXT: mulsd %xmm4, %xmm0 ; SSE2-NEXT: mulsd %xmm5, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1] ; SSE2-NEXT: mulsd %xmm5, %xmm0 ; SSE2-NEXT: mulsd %xmm6, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1] ; SSE2-NEXT: mulsd %xmm6, %xmm0 ; SSE2-NEXT: mulsd %xmm7, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1] ; SSE2-NEXT: mulsd %xmm7, %xmm0 ; SSE2-NEXT: mulsd %xmm8, %xmm0 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1,1] ; SSE2-NEXT: mulsd %xmm8, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v16f64: ; SSE41: # %bb.0: ; SSE41-NEXT: mulsd %xmm1, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE41-NEXT: mulsd %xmm1, %xmm0 ; SSE41-NEXT: mulsd %xmm2, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE41-NEXT: mulsd %xmm2, %xmm0 ; SSE41-NEXT: mulsd %xmm3, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE41-NEXT: mulsd %xmm3, %xmm0 ; SSE41-NEXT: mulsd %xmm4, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE41-NEXT: mulsd %xmm4, %xmm0 ; SSE41-NEXT: mulsd %xmm5, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1] ; SSE41-NEXT: mulsd %xmm5, %xmm0 ; SSE41-NEXT: mulsd %xmm6, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1] ; SSE41-NEXT: mulsd %xmm6, %xmm0 ; SSE41-NEXT: mulsd %xmm7, %xmm0 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1] ; SSE41-NEXT: mulsd %xmm7, %xmm0 ; SSE41-NEXT: mulsd {{[0-9]+}}(%rsp), %xmm0 ; SSE41-NEXT: mulsd {{[0-9]+}}(%rsp), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v16f64: ; AVX: # %bb.0: ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm5, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm4, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm4[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm4, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v16f64(double %a0, <16 x double> %a1) ret double %1 } ; ; vXf64 (one) ; define double @test_v2f64_one(<2 x double> %a0) { ; SSE-LABEL: test_v2f64_one: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm1 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_one: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f64_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %a0) ret double %1 } define double @test_v4f64_one(<4 x double> %a0) { ; SSE-LABEL: test_v4f64_one: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm2 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f64_one: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f64_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v4f64(double 1.0, <4 x double> %a0) ret double %1 } define double @test_v8f64_one(<8 x double> %a0) { ; SSE-LABEL: test_v8f64_one: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm4 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1] ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v8f64_one: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm2, %xmm0 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f64_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v8f64(double 1.0, <8 x double> %a0) ret double %1 } define double @test_v16f64_one(<16 x double> %a0) { ; SSE-LABEL: test_v16f64_one: ; SSE: # %bb.0: ; SSE-NEXT: movapd %xmm0, %xmm8 ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm0[1] ; SSE-NEXT: mulsd %xmm8, %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: mulsd %xmm5, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1] ; SSE-NEXT: mulsd %xmm5, %xmm0 ; SSE-NEXT: mulsd %xmm6, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1] ; SSE-NEXT: mulsd %xmm6, %xmm0 ; SSE-NEXT: mulsd %xmm7, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1] ; SSE-NEXT: mulsd %xmm7, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v16f64_one: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm4, %xmm0, %xmm4 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm4, %xmm4 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm4, %xmm0 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm4, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f64_one: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm2 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm2, %xmm0 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v16f64(double 1.0, <16 x double> %a0) ret double %1 } ; ; vXf64 (undef) ; define double @test_v2f64_undef(<2 x double> %a0) { ; SSE-LABEL: test_v2f64_undef: ; SSE: # %bb.0: ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_undef: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f64_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v2f64(double undef, <2 x double> %a0) ret double %1 } define double @test_v4f64_undef(<4 x double> %a0) { ; SSE-LABEL: test_v4f64_undef: ; SSE: # %bb.0: ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f64_undef: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v4f64_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v4f64(double undef, <4 x double> %a0) ret double %1 } define double @test_v8f64_undef(<8 x double> %a0) { ; SSE-LABEL: test_v8f64_undef: ; SSE: # %bb.0: ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v8f64_undef: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm2, %xmm0 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v8f64_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v8f64(double undef, <8 x double> %a0) ret double %1 } define double @test_v16f64_undef(<16 x double> %a0) { ; SSE-LABEL: test_v16f64_undef: ; SSE: # %bb.0: ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1] ; SSE-NEXT: mulsd %xmm3, %xmm0 ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1,1] ; SSE-NEXT: mulsd %xmm4, %xmm0 ; SSE-NEXT: mulsd %xmm5, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1,1] ; SSE-NEXT: mulsd %xmm5, %xmm0 ; SSE-NEXT: mulsd %xmm6, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1,1] ; SSE-NEXT: mulsd %xmm6, %xmm0 ; SSE-NEXT: mulsd %xmm7, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1,1] ; SSE-NEXT: mulsd %xmm7, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v16f64_undef: ; AVX: # %bb.0: ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] ; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm4, %xmm4 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX-NEXT: vmulsd %xmm0, %xmm4, %xmm0 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm4, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v16f64_undef: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: vmulsd %xmm0, %xmm2, %xmm0 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] ; AVX512-NEXT: vmulsd %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX512-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = call double @llvm.vector.reduce.fmul.f64.v16f64(double undef, <16 x double> %a0) ret double %1 } declare float @llvm.vector.reduce.fmul.f32.v2f32(float, <2 x float>) declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>) declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>) declare float @llvm.vector.reduce.fmul.f32.v16f32(float, <16 x float>) declare double @llvm.vector.reduce.fmul.f64.v2f64(double, <2 x double>) declare double @llvm.vector.reduce.fmul.f64.v4f64(double, <4 x double>) declare double @llvm.vector.reduce.fmul.f64.v8f64(double, <8 x double>) declare double @llvm.vector.reduce.fmul.f64.v16f64(double, <16 x double>)