; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle ; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=corei7-avx | FileCheck %s --check-prefixes=CHECK,X86 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s --check-prefixes=CHECK,X64 define <4 x i32> @blendvb_fallback_v4i32(<4 x i1> %mask, <4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: blendvb_fallback_v4i32: ; CHECK: ## %bb.0: ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 ; CHECK-NEXT: ret{{[l|q]}} %ret = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %y ret <4 x i32> %ret } define <8 x i32> @blendvb_fallback_v8i32(<8 x i1> %mask, <8 x i32> %x, <8 x i32> %y) { ; CHECK-LABEL: blendvb_fallback_v8i32: ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 ; CHECK-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} %ret = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y ret <8 x i32> %ret } define <8 x float> @blendvb_fallback_v8f32(<8 x i1> %mask, <8 x float> %x, <8 x float> %y) { ; CHECK-LABEL: blendvb_fallback_v8f32: ; CHECK: ## %bb.0: ; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; CHECK-NEXT: vpslld $31, %xmm3, %xmm3 ; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 ; CHECK-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: ret{{[l|q]}} %ret = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y ret <8 x float> %ret } declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone define <4 x float> @insertps_from_vector_load(<4 x float> %a, ptr nocapture readonly %pb) { ; X86-LABEL: insertps_from_vector_load: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vinsertps $48, (%eax), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load: ; X64: ## %bb.0: ; X64-NEXT: vinsertps $48, (%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq %1 = load <4 x float>, ptr %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) ret <4 x float> %2 } ;; Use a non-zero CountS for insertps define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, ptr nocapture readonly %pb) { ; X86-LABEL: insertps_from_vector_load_offset: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vinsertps $32, 4(%eax), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load_offset: ; X64: ## %bb.0: ; X64-NEXT: vinsertps $32, 4(%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1],mem[0],xmm0[3] ; X64-NEXT: retq %1 = load <4 x float>, ptr %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) ret <4 x float> %2 } define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, ptr nocapture readonly %pb, i64 %index) { ; X86-LABEL: insertps_from_vector_load_offset_2: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: shll $4, %ecx ; X86-NEXT: vinsertps $0, 12(%eax,%ecx), %xmm0, %xmm0 ## xmm0 = mem[0],xmm0[1,2,3] ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_vector_load_offset_2: ; X64: ## %bb.0: ; X64-NEXT: shlq $4, %rsi ; X64-NEXT: vinsertps $0, 12(%rdi,%rsi), %xmm0, %xmm0 ## xmm0 = mem[0],xmm0[1,2,3] ; X64-NEXT: retq %1 = getelementptr inbounds <4 x float>, ptr %pb, i64 %index %2 = load <4 x float>, ptr %1, align 16 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192) ret <4 x float> %3 } define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, ptr nocapture readonly %fb, i64 %index) { ; X86-LABEL: insertps_from_broadcast_loadf32: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: vinsertps $48, (%ecx,%eax,4), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_broadcast_loadf32: ; X64: ## %bb.0: ; X64-NEXT: vinsertps $48, (%rdi,%rsi,4), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq %1 = getelementptr inbounds float, ptr %fb, i64 %index %2 = load float, ptr %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 %6 = insertelement <4 x float> %5, float %2, i32 3 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48) ret <4 x float> %7 } define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, ptr nocapture readonly %b) { ; X86-LABEL: insertps_from_broadcast_loadv4f32: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: vinsertps $48, (%eax), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_broadcast_loadv4f32: ; X64: ## %bb.0: ; X64-NEXT: vinsertps $48, (%rdi), %xmm0, %xmm0 ## xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq %1 = load <4 x float>, ptr %b, align 4 %2 = extractelement <4 x float> %1, i32 0 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 %6 = insertelement <4 x float> %5, float %2, i32 3 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48) ret <4 x float> %7 } define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, ptr nocapture readonly %fb, i64 %index) { ; X86-LABEL: insertps_from_broadcast_multiple_use: ; X86: ## %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: vbroadcastss (%ecx,%eax,4), %xmm4 ; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3] ; X86-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3] ; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm4[3] ; X86-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1,2],xmm4[3] ; X86-NEXT: vaddps %xmm2, %xmm1, %xmm1 ; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: insertps_from_broadcast_multiple_use: ; X64: ## %bb.0: ; X64-NEXT: vbroadcastss (%rdi,%rsi,4), %xmm4 ; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[3] ; X64-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3] ; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; X64-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm4[3] ; X64-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1,2],xmm4[3] ; X64-NEXT: vaddps %xmm2, %xmm1, %xmm1 ; X64-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; X64-NEXT: retq %1 = getelementptr inbounds float, ptr %fb, i64 %index %2 = load float, ptr %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 %6 = insertelement <4 x float> %5, float %2, i32 3 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48) %8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48) %9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48) %10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48) %11 = fadd <4 x float> %7, %8 %12 = fadd <4 x float> %9, %10 %13 = fadd <4 x float> %11, %12 ret <4 x float> %13 }