; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2 ; Tests that a floating-point build_vector doesn't try and generate a VID ; instruction define void @buildvec_no_vid_v4f32(<4 x float>* %x) { ; CHECK-LABEL: buildvec_no_vid_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI0_0) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret store <4 x float> <float 0.0, float 4.0, float 0.0, float 2.0>, <4 x float>* %x ret void } ; Not all BUILD_VECTORs are successfully lowered by the backend: some are ; expanded into scalarized stack stores. However, this may result in an ; infinite loop in the DAGCombiner which tries to recombine those stores into a ; BUILD_VECTOR followed by a vector store. The BUILD_VECTOR is then expanded ; and the loop begins. ; Until all BUILD_VECTORs are lowered, we disable store-combining after ; legalization for fixed-length vectors. ; This test uses a trick with a shufflevector which can't be lowered to a ; SHUFFLE_VECTOR node; the mask is shorter than the source vectors and the ; shuffle indices aren't located within the same 4-element subvector, so is ; expanded to 4 EXTRACT_VECTOR_ELTs and a BUILD_VECTOR. This then triggers the ; loop when expanded. define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x, <8 x float> %y) optsize { ; LMULMAX1-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: li a0, 2 ; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.s.x v0, a0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vrgather.vi v12, v8, 0 ; LMULMAX1-NEXT: vrgather.vi v12, v9, 3, v0.t ; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: vmv.s.x v0, a0 ; LMULMAX1-NEXT: vrgather.vi v9, v10, 0 ; LMULMAX1-NEXT: li a0, 3 ; LMULMAX1-NEXT: vmv.s.x v8, a0 ; LMULMAX1-NEXT: vrgather.vi v9, v11, 3, v0.t ; LMULMAX1-NEXT: vmv.v.v v0, v8 ; LMULMAX1-NEXT: vmerge.vvm v8, v9, v12, v0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi sp, sp, -16 ; LMULMAX2-NEXT: .cfi_def_cfa_offset 16 ; LMULMAX2-NEXT: addi a0, sp, 8 ; LMULMAX2-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: vslidedown.vi v10, v10, 7 ; LMULMAX2-NEXT: addi a1, sp, 12 ; LMULMAX2-NEXT: vse32.v v10, (a1) ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 7 ; LMULMAX2-NEXT: addi a1, sp, 4 ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: addi sp, sp, 16 ; LMULMAX2-NEXT: ret %z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> <i32 0, i32 7, i32 8, i32 15> ret <4 x float> %z } define void @buildvec_dominant0_v2f32(<2 x float>* %x) { ; CHECK-LABEL: buildvec_dominant0_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret store <2 x float> <float 0.0, float 1.0>, <2 x float>* %x ret void } ; We don't want to lower this to the insertion of two scalar elements as above, ; as each would require their own load from the constant pool. define void @buildvec_dominant1_v2f32(<2 x float>* %x) { ; CHECK-LABEL: buildvec_dominant1_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_0) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret store <2 x float> <float 1.0, float 2.0>, <2 x float>* %x ret void } define void @buildvec_dominant0_v4f32(<4 x float>* %x) { ; CHECK-LABEL: buildvec_dominant0_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret store <4 x float> <float 2.0, float 2.0, float 0.0, float 2.0>, <4 x float>* %x ret void } define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_dominant1_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 0.0, i32 1 %v2 = insertelement <4 x float> %v1, float %f, i32 2 %v3 = insertelement <4 x float> %v2, float %f, i32 3 store <4 x float> %v3, <4 x float>* %x ret void } define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_dominant2_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 2.0, i32 1 %v2 = insertelement <4 x float> %v1, float %f, i32 2 %v3 = insertelement <4 x float> %v2, float %f, i32 3 store <4 x float> %v3, <4 x float>* %x ret void } define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) { ; RV32-LABEL: buildvec_merge0_v4f32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 6 ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: lui a2, %hi(.LCPI7_0) ; RV32-NEXT: flw ft0, %lo(.LCPI7_0)(a2) ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_merge0_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI7_0) ; RV64-NEXT: flw ft0, %lo(.LCPI7_0)(a1) ; RV64-NEXT: li a1, 6 ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vmv.s.x v0, a1 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV64-NEXT: vse32.v v8, (a0) ; RV64-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 2.0, i32 1 %v2 = insertelement <4 x float> %v1, float 2.0, i32 2 %v3 = insertelement <4 x float> %v2, float %f, i32 3 store <4 x float> %v3, <4 x float>* %x ret void } define <4 x half> @splat_c3_v4f16(<4 x half> %v) { ; CHECK-LABEL: splat_c3_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 3 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x half> %v, i32 3 %ins = insertelement <4 x half> poison, half %x, i32 0 %splat = shufflevector <4 x half> %ins, <4 x half> poison, <4 x i32> zeroinitializer ret <4 x half> %splat } define <4 x half> @splat_idx_v4f16(<4 x half> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x half> %v, i64 %idx %ins = insertelement <4 x half> poison, half %x, i32 0 %splat = shufflevector <4 x half> %ins, <4 x half> poison, <4 x i32> zeroinitializer ret <4 x half> %splat } define <8 x float> @splat_c5_v8f32(<8 x float> %v) { ; LMULMAX1-LABEL: splat_c5_v8f32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vrgather.vi v8, v9, 1 ; LMULMAX1-NEXT: vmv.v.v v9, v8 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: splat_c5_v8f32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vrgather.vi v10, v8, 5 ; LMULMAX2-NEXT: vmv.v.v v8, v10 ; LMULMAX2-NEXT: ret %x = extractelement <8 x float> %v, i32 5 %ins = insertelement <8 x float> poison, float %x, i32 0 %splat = shufflevector <8 x float> %ins, <8 x float> poison, <8 x i32> zeroinitializer ret <8 x float> %splat } define <8 x float> @splat_idx_v8f32(<8 x float> %v, i64 %idx) { ; LMULMAX1-LABEL: splat_idx_v8f32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi sp, sp, -32 ; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 ; LMULMAX1-NEXT: andi a0, a0, 7 ; LMULMAX1-NEXT: slli a0, a0, 2 ; LMULMAX1-NEXT: mv a1, sp ; LMULMAX1-NEXT: add a0, a1, a0 ; LMULMAX1-NEXT: addi a2, sp, 16 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v9, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vlse32.v v8, (a0), zero ; LMULMAX1-NEXT: vmv.v.v v9, v8 ; LMULMAX1-NEXT: addi sp, sp, 32 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: splat_idx_v8f32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vrgather.vx v10, v8, a0 ; LMULMAX2-NEXT: vmv.v.v v8, v10 ; LMULMAX2-NEXT: ret %x = extractelement <8 x float> %v, i64 %idx %ins = insertelement <8 x float> poison, float %x, i32 0 %splat = shufflevector <8 x float> %ins, <8 x float> poison, <8 x i32> zeroinitializer ret <8 x float> %splat } ; Test that we pull the vlse of the constant pool out of the loop. define dso_local void @splat_load_licm(float* %0) { ; RV32-LABEL: splat_load_licm: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI12_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI12_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vlse32.v v8, (a1), zero ; RV32-NEXT: li a1, 1024 ; RV32-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: addi a1, a1, -4 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: bnez a1, .LBB12_1 ; RV32-NEXT: # %bb.2: ; RV32-NEXT: ret ; ; RV64-LABEL: splat_load_licm: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI12_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI12_0) ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vlse32.v v8, (a1), zero ; RV64-NEXT: li a1, 0 ; RV64-NEXT: li a2, 1024 ; RV64-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; RV64-NEXT: slli a3, a1, 2 ; RV64-NEXT: add a3, a0, a3 ; RV64-NEXT: addiw a1, a1, 4 ; RV64-NEXT: vse32.v v8, (a3) ; RV64-NEXT: bne a1, a2, .LBB12_1 ; RV64-NEXT: # %bb.2: ; RV64-NEXT: ret br label %2 2: ; preds = %2, %1 %3 = phi i32 [ 0, %1 ], [ %6, %2 ] %4 = getelementptr inbounds float, float* %0, i32 %3 %5 = bitcast float* %4 to <4 x float>* store <4 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, <4 x float>* %5, align 4 %6 = add nuw i32 %3, 4 %7 = icmp eq i32 %6, 1024 br i1 %7, label %8, label %2 8: ; preds = %2 ret void }