; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s ;;; Test store instructions ;;; ;;; Note: ;;; We test store instructions using general stack, stack with dynamic ;;; allocation, stack with dynamic allocation and alignment, and stack ;;; with dynamic allocation, alignment, and spill. ;;; ;;; Fist test using a stack for leaf function. ;;; ;;; | | Higher address ;;; |----------------------------------------------| <- old sp ;;; | Local variables of fixed size | ;;; |----------------------------------------------| <- sp ;;; | | Lower address ;;; ;;; Access local variable using sp (%s11). In addition, please remember ;;; that stack is aligned by 16 bytes. ;;; ;;; Second test using a general stack. ;;; ;;; | | Higher address ;;; |----------------------------------------------| ;;; | Parameter area for this function | ;;; |----------------------------------------------| ;;; | Register save area (RSA) for this function | ;;; |----------------------------------------------| ;;; | Return address for this function | ;;; |----------------------------------------------| ;;; | Frame pointer for this function | ;;; |----------------------------------------------| <- fp(=old sp) ;;; | Local variables of fixed size | ;;; |----------------------------------------------| ;;; |.variable-sized.local.variables.(VLAs)........| ;;; |..............................................| ;;; |..............................................| ;;; |----------------------------------------------| <- returned by alloca ;;; | Parameter area for callee | ;;; |----------------------------------------------| ;;; | Register save area (RSA) for callee | ;;; |----------------------------------------------| ;;; | Return address for callee | ;;; |----------------------------------------------| ;;; | Frame pointer for callee | ;;; |----------------------------------------------| <- sp ;;; | | Lower address ;;; ;;; Access local variable using fp (%s9) since the size of VLA is not ;;; known. At the beginning of the functions, allocates 240 + data ;;; bytes. 240 means RSA+RA+FP (=176) + Parameter (=64). ;;; ;;; Third test using a general stack. ;;; ;;; | | Higher address ;;; |----------------------------------------------| ;;; | Parameter area for this function | ;;; |----------------------------------------------| ;;; | Register save area (RSA) for this function | ;;; |----------------------------------------------| ;;; | Return address for this function | ;;; |----------------------------------------------| ;;; | Frame pointer for this function | ;;; |----------------------------------------------| <- fp(=old sp) ;;; |.empty.space.to.make.part.below.aligned.in....| ;;; |.case.it.needs.more.than.the.standard.16-byte.| (size of this area is ;;; |.alignment....................................| unknown at compile time) ;;; |----------------------------------------------| ;;; | Local variables of fixed size including spill| ;;; | slots | ;;; |----------------------------------------------| <- bp(not defined by ABI, ;;; |.variable-sized.local.variables.(VLAs)........| LLVM chooses SX17) ;;; |..............................................| (size of this area is ;;; |..............................................| unknown at compile time) ;;; |----------------------------------------------| <- stack top (returned by ;;; | Parameter area for callee | alloca) ;;; |----------------------------------------------| ;;; | Register save area (RSA) for callee | ;;; |----------------------------------------------| ;;; | Return address for callee | ;;; |----------------------------------------------| ;;; | Frame pointer for callee | ;;; |----------------------------------------------| <- sp ;;; | | Lower address ;;; ;;; Access local variable using bp (%s17) since the size of alignment ;;; and VLA are not known. At the beginning of the functions, allocates ;;; pad(240 + data + align) bytes. Then, access data through bp + pad(240) ;;; since this address doesn't change even if VLA is dynamically allocated. ;;; ;;; Fourth test using a general stack with some spills. ;;; ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk(<256 x i1> noundef %0) { ; CHECK-LABEL: store__vm256_stk: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -224(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB0_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 192(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 200(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 208(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 216(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca <256 x i1>, align 32 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %2) store volatile <256 x i1> %0, ptr %2, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %2) ret void } ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) ; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_big_fit(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_big_fit: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -2147483648(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: brge.l %s11, %s8, .LBB1_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB1_4: ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 2147483616(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 2147483624(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 2147483632(, %s11) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 2147483640(, %s11) ; CHECK-NEXT: or %s1, 0, (0)1 ; CHECK-NEXT: lea %s2, 2147483424 ; CHECK-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: st %s0, 192(%s1, %s11) ; CHECK-NEXT: lea %s1, 8(, %s1) ; CHECK-NEXT: brne.l %s1, %s2, .LBB1_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 %4 = alloca [268435428 x i64], align 8 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) call void @llvm.lifetime.start.p0(i64 2147483424, ptr nonnull %4) store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 br label %6 5: ; preds = %6 call void @llvm.lifetime.end.p0(i64 2147483424, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void 6: ; preds = %2, %6 %7 = phi i64 [ 0, %2 ], [ %9, %6 ] %8 = getelementptr inbounds [268435428 x i64], ptr %4, i64 0, i64 %7 store volatile i64 %1, ptr %8, align 8, !tbaa !6 %9 = add nuw nsw i64 %7, 1 %10 = icmp eq i64 %9, 268435428 br i1 %10, label %5, label %6, !llvm.loop !8 } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_big(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_big: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s13, 2147483616 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: brge.l %s11, %s8, .LBB2_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB2_4: ; CHECK-NEXT: lea %s13, -2147483648 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s13, (%s11, %s13) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, (, %s13) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 8(, %s13) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 16(, %s13) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 24(, %s13) ; CHECK-NEXT: or %s1, 0, (0)1 ; CHECK-NEXT: lea %s2, 2147483432 ; CHECK-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: st %s0, 216(%s1, %s11) ; CHECK-NEXT: lea %s1, 8(, %s1) ; CHECK-NEXT: brne.l %s1, %s2, .LBB2_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 %4 = alloca [268435429 x i64], align 8 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) call void @llvm.lifetime.start.p0(i64 2147483432, ptr nonnull %4) store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 br label %6 5: ; preds = %6 call void @llvm.lifetime.end.p0(i64 2147483432, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void 6: ; preds = %2, %6 %7 = phi i64 [ 0, %2 ], [ %9, %6 ] %8 = getelementptr inbounds [268435429 x i64], ptr %4, i64 0, i64 %7 store volatile i64 %1, ptr %8, align 8, !tbaa !6 %9 = add nuw nsw i64 %7, 1 %10 = icmp eq i64 %9, 268435429 br i1 %10, label %5, label %6, !llvm.loop !8 } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_big2(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_big2: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s13, 2147483424 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: brge.l %s11, %s8, .LBB3_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB3_4: ; CHECK-NEXT: lea %s13, -2147483456 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s13, (%s11, %s13) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, (, %s13) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 8(, %s13) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 16(, %s13) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 24(, %s13) ; CHECK-NEXT: or %s1, 0, (0)1 ; CHECK-NEXT: lea %s2, -2147483648 ; CHECK-NEXT: and %s2, %s2, (32)0 ; CHECK-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: st %s0, 192(%s1, %s11) ; CHECK-NEXT: lea %s1, 8(, %s1) ; CHECK-NEXT: brne.l %s1, %s2, .LBB3_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 %4 = alloca [268435456 x i64], align 8 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) call void @llvm.lifetime.start.p0(i64 2147483648, ptr nonnull %4) store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 br label %6 5: ; preds = %6 call void @llvm.lifetime.end.p0(i64 2147483648, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void 6: ; preds = %2, %6 %7 = phi i64 [ 0, %2 ], [ %9, %6 ] %8 = getelementptr inbounds [268435456 x i64], ptr %4, i64 0, i64 %7 store volatile i64 %1, ptr %8, align 8, !tbaa !6 %9 = add nuw nsw i64 %7, 1 %10 = icmp eq i64 %9, 268435456 br i1 %10, label %5, label %6, !llvm.loop !10 } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_dyn(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_dyn: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -272(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB4_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: sll %s0, %s0, 5 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm1, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, -32(, %s9) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, -24(, %s9) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, -16(, %s9) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, -8(, %s9) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 8 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) %4 = alloca <256 x i1>, i64 %1, align 8 store volatile <256 x i1> %0, ptr %4, align 32, !tbaa !3 store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_dyn_align(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_dyn_align: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB5_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: sll %s0, %s0, 5 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm1, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) %4 = alloca <256 x i1>, i64 %1, align 8 store volatile <256 x i1> %0, ptr %4, align 32, !tbaa !3 store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm256_stk_dyn_align2(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_dyn_align2: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -320(, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB6_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: sll %s0, %s0, 5 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm1, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm1, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 312(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 %4 = alloca <256 x i1>, align 64 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) %5 = alloca <256 x i1>, i64 %1, align 8 store volatile <256 x i1> %0, ptr %5, align 32, !tbaa !3 store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %4) store volatile <256 x i1> %0, ptr %4, align 64, !tbaa !3 call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void } ; Function Attrs: nounwind define fastcc void @store__vm256_stk_dyn_align_spill(<256 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm256_stk_dyn_align_spill: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -320(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB7_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill ; CHECK-NEXT: st %s19, 56(, %s9) # 8-byte Folded Spill ; CHECK-NEXT: or %s18, 0, %s0 ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 280(, %s17) # 32-byte Folded Spill ; CHECK-NEXT: sll %s0, %s0, 5 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s19, 240(, %s11) ; CHECK-NEXT: lea %s0, dummy@lo ; CHECK-NEXT: and %s0, %s0, (32)0 ; CHECK-NEXT: lea.sl %s12, dummy@hi(, %s0) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, pass@lo ; CHECK-NEXT: and %s0, %s0, (32)0 ; CHECK-NEXT: lea.sl %s12, pass@hi(, %s0) ; CHECK-NEXT: or %s0, 0, %s18 ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld %s16, 256(, %s17) ; CHECK-NEXT: lvm %vm1, 0, %s16 ; CHECK-NEXT: ld %s16, 264(, %s17) ; CHECK-NEXT: lvm %vm1, 1, %s16 ; CHECK-NEXT: ld %s16, 272(, %s17) ; CHECK-NEXT: lvm %vm1, 2, %s16 ; CHECK-NEXT: ld %s16, 280(, %s17) # 32-byte Folded Reload ; CHECK-NEXT: lvm %vm1, 3, %s16 ; CHECK-NEXT: svm %s0, %vm1, 3 ; CHECK-NEXT: st %s0, 24(, %s19) ; CHECK-NEXT: svm %s0, %vm1, 2 ; CHECK-NEXT: st %s0, 16(, %s19) ; CHECK-NEXT: svm %s0, %vm1, 1 ; CHECK-NEXT: st %s0, 8(, %s19) ; CHECK-NEXT: svm %s0, %vm1, 0 ; CHECK-NEXT: st %s0, (, %s19) ; CHECK-NEXT: svm %s16, %vm1, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm1, 3 ; CHECK-NEXT: st %s16, 312(, %s17) ; CHECK-NEXT: ld %s19, 56(, %s9) # 8-byte Folded Reload ; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <256 x i1>, align 32 call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) %4 = alloca <256 x i1>, i64 %1, align 8 tail call fastcc void @dummy() tail call fastcc void @pass(i64 noundef %1) store volatile <256 x i1> %0, ptr %4, align 32, !tbaa !3 store volatile <256 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) ret void } declare fastcc void @dummy() declare fastcc void @pass(i64 noundef) ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk(<512 x i1> noundef %0) { ; CHECK-LABEL: store__vm512_stk: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -256(, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB8_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 192(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 200(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 208(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 216(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 224(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 232(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 240(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca <512 x i1>, align 64 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %2) store volatile <512 x i1> %0, ptr %2, align 64, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %2) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_bc(<512 x i1> noundef %0) { ; CHECK-LABEL: store__vm512_stk_bc: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -320(, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB9_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 192(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 200(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 208(, %s11) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 216(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 224(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 232(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 240(, %s11) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 248(, %s11) ; CHECK-NEXT: ld %s0, 192(, %s11) ; CHECK-NEXT: ld %s1, 200(, %s11) ; CHECK-NEXT: ld %s2, 208(, %s11) ; CHECK-NEXT: ld %s3, 216(, %s11) ; CHECK-NEXT: ld %s4, 248(, %s11) ; CHECK-NEXT: ld %s5, 240(, %s11) ; CHECK-NEXT: ld %s6, 232(, %s11) ; CHECK-NEXT: ld %s7, 224(, %s11) ; CHECK-NEXT: st %s4, 312(, %s11) ; CHECK-NEXT: st %s5, 304(, %s11) ; CHECK-NEXT: st %s6, 296(, %s11) ; CHECK-NEXT: st %s7, 288(, %s11) ; CHECK-NEXT: st %s3, 280(, %s11) ; CHECK-NEXT: st %s2, 272(, %s11) ; CHECK-NEXT: st %s1, 264(, %s11) ; CHECK-NEXT: st %s0, 256(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca i512, align 64 %3 = bitcast <512 x i1> %0 to i512 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %2) store volatile i512 %3, ptr %2, align 64, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %2) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_big(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_big: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s13, 2147483392 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: brge.l %s11, %s8, .LBB10_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB10_4: ; CHECK-NEXT: lea %s13, -2147483456 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s13, (%s11, %s13) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, (, %s13) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 8(, %s13) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 16(, %s13) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 24(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 32(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 40(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 48(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 56(, %s13) ; CHECK-NEXT: or %s1, 0, (0)1 ; CHECK-NEXT: lea %s2, 2147483640 ; CHECK-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: st %s0, 200(%s1, %s11) ; CHECK-NEXT: lea %s1, 8(, %s1) ; CHECK-NEXT: brne.l %s1, %s2, .LBB10_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 64 %4 = alloca [268435455 x i64], align 8 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) call void @llvm.lifetime.start.p0(i64 2147483640, ptr nonnull %4) store volatile <512 x i1> %0, ptr %3, align 64, !tbaa !3 br label %6 5: ; preds = %6 call void @llvm.lifetime.end.p0(i64 2147483640, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void 6: ; preds = %2, %6 %7 = phi i64 [ 0, %2 ], [ %9, %6 ] %8 = getelementptr inbounds [268435455 x i64], ptr %4, i64 0, i64 %7 store volatile i64 %1, ptr %8, align 8, !tbaa !6 %9 = add nuw nsw i64 %7, 1 %10 = icmp eq i64 %9, 268435455 br i1 %10, label %5, label %6, !llvm.loop !11 } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_big2(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_big2: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s13, 2147483392 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: brge.l %s11, %s8, .LBB11_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB11_4: ; CHECK-NEXT: lea %s13, -2147483456 ; CHECK-NEXT: and %s13, %s13, (32)0 ; CHECK-NEXT: lea.sl %s13, (%s11, %s13) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, (, %s13) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 8(, %s13) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 16(, %s13) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 24(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 32(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 40(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 48(, %s13) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 56(, %s13) ; CHECK-NEXT: or %s1, 0, (0)1 ; CHECK-NEXT: lea %s2, -2147483648 ; CHECK-NEXT: and %s2, %s2, (32)0 ; CHECK-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: st %s0, 192(%s1, %s11) ; CHECK-NEXT: lea %s1, 8(, %s1) ; CHECK-NEXT: brne.l %s1, %s2, .LBB11_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 64 %4 = alloca [268435456 x i64], align 8 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) call void @llvm.lifetime.start.p0(i64 2147483648, ptr nonnull %4) store volatile <512 x i1> %0, ptr %3, align 64, !tbaa !3 br label %6 5: ; preds = %6 call void @llvm.lifetime.end.p0(i64 2147483648, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void 6: ; preds = %2, %6 %7 = phi i64 [ 0, %2 ], [ %9, %6 ] %8 = getelementptr inbounds [268435456 x i64], ptr %4, i64 0, i64 %7 store volatile i64 %1, ptr %8, align 8, !tbaa !6 %9 = add nuw nsw i64 %7, 1 %10 = icmp eq i64 %9, 268435456 br i1 %10, label %5, label %6, !llvm.loop !12 } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_dyn(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_dyn: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -320(, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB12_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: sll %s0, %s0, 6 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm2, 3 ; CHECK-NEXT: st %s1, 56(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 2 ; CHECK-NEXT: st %s1, 48(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 1 ; CHECK-NEXT: st %s1, 40(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 0 ; CHECK-NEXT: st %s1, 32(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 312(, %s17) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 64 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) %4 = alloca <512 x i1>, i64 %1, align 8 store volatile <512 x i1> %0, ptr %4, align 64, !tbaa !3 store volatile <512 x i1> %0, ptr %3, align 64, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_dyn_align(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_dyn_align: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -320(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB13_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: sll %s0, %s0, 6 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm2, 3 ; CHECK-NEXT: st %s1, 56(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 2 ; CHECK-NEXT: st %s1, 48(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 1 ; CHECK-NEXT: st %s1, 40(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 0 ; CHECK-NEXT: st %s1, 32(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 312(, %s17) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 32 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) %4 = alloca <512 x i1>, i64 %1, align 8 store volatile <512 x i1> %0, ptr %4, align 64, !tbaa !3 store volatile <512 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void } ; Function Attrs: argmemonly nofree nounwind define fastcc void @store__vm512_stk_dyn_align2(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_dyn_align2: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -384(, %s11) ; CHECK-NEXT: and %s11, %s11, (58)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB14_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB14_2: ; CHECK-NEXT: lea %s0, 15(, %s0) ; CHECK-NEXT: and %s0, -16, %s0 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: svm %s1, %vm2, 3 ; CHECK-NEXT: st %s1, 56(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 2 ; CHECK-NEXT: st %s1, 48(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 1 ; CHECK-NEXT: st %s1, 40(, %s0) ; CHECK-NEXT: svm %s1, %vm2, 0 ; CHECK-NEXT: st %s1, 32(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 3 ; CHECK-NEXT: st %s1, 24(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 2 ; CHECK-NEXT: st %s1, 16(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 1 ; CHECK-NEXT: st %s1, 8(, %s0) ; CHECK-NEXT: svm %s1, %vm3, 0 ; CHECK-NEXT: st %s1, (, %s0) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 320(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 328(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 336(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 344(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 352(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 360(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 368(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 376(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 312(, %s17) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 32 %4 = alloca <512 x i1>, align 64 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) %5 = alloca i8, i64 %1, align 8 store volatile <512 x i1> %0, ptr %5, align 64, !tbaa !3 store volatile <512 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %4) store volatile <512 x i1> %0, ptr %4, align 64, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %4) call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void } ; Function Attrs: nounwind define fastcc void @store__vm512_stk_dyn_align_spill(<512 x i1> noundef %0, i64 noundef %1) { ; CHECK-LABEL: store__vm512_stk_dyn_align_spill: ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -384(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 ; CHECK-NEXT: or %s17, 0, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB15_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) ; CHECK-NEXT: or %s62, 0, %s0 ; CHECK-NEXT: lea %s63, 315 ; CHECK-NEXT: shm.l %s63, (%s61) ; CHECK-NEXT: shm.l %s8, 8(%s61) ; CHECK-NEXT: shm.l %s11, 16(%s61) ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB15_2: ; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill ; CHECK-NEXT: st %s19, 56(, %s9) # 8-byte Folded Spill ; CHECK-NEXT: or %s18, 0, %s0 ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 256(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 264(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 272(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 280(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 288(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 296(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 304(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 312(, %s17) # 64-byte Folded Spill ; CHECK-NEXT: sll %s0, %s0, 6 ; CHECK-NEXT: lea %s1, __ve_grow_stack@lo ; CHECK-NEXT: and %s1, %s1, (32)0 ; CHECK-NEXT: lea.sl %s12, __ve_grow_stack@hi(, %s1) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s19, 240(, %s11) ; CHECK-NEXT: lea %s0, dummy@lo ; CHECK-NEXT: and %s0, %s0, (32)0 ; CHECK-NEXT: lea.sl %s12, dummy@hi(, %s0) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: lea %s0, pass@lo ; CHECK-NEXT: and %s0, %s0, (32)0 ; CHECK-NEXT: lea.sl %s12, pass@hi(, %s0) ; CHECK-NEXT: or %s0, 0, %s18 ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: # implicit-def: $vmp1 ; CHECK-NEXT: ld %s16, 256(, %s17) ; CHECK-NEXT: lvm %vm3, 0, %s16 ; CHECK-NEXT: ld %s16, 264(, %s17) ; CHECK-NEXT: lvm %vm3, 1, %s16 ; CHECK-NEXT: ld %s16, 272(, %s17) ; CHECK-NEXT: lvm %vm3, 2, %s16 ; CHECK-NEXT: ld %s16, 280(, %s17) ; CHECK-NEXT: lvm %vm3, 3, %s16 ; CHECK-NEXT: ld %s16, 288(, %s17) ; CHECK-NEXT: lvm %vm2, 0, %s16 ; CHECK-NEXT: ld %s16, 296(, %s17) ; CHECK-NEXT: lvm %vm2, 1, %s16 ; CHECK-NEXT: ld %s16, 304(, %s17) ; CHECK-NEXT: lvm %vm2, 2, %s16 ; CHECK-NEXT: ld %s16, 312(, %s17) # 64-byte Folded Reload ; CHECK-NEXT: lvm %vm2, 3, %s16 ; CHECK-NEXT: svm %s0, %vm2, 3 ; CHECK-NEXT: st %s0, 56(, %s19) ; CHECK-NEXT: svm %s0, %vm2, 2 ; CHECK-NEXT: st %s0, 48(, %s19) ; CHECK-NEXT: svm %s0, %vm2, 1 ; CHECK-NEXT: st %s0, 40(, %s19) ; CHECK-NEXT: svm %s0, %vm2, 0 ; CHECK-NEXT: st %s0, 32(, %s19) ; CHECK-NEXT: svm %s0, %vm3, 3 ; CHECK-NEXT: st %s0, 24(, %s19) ; CHECK-NEXT: svm %s0, %vm3, 2 ; CHECK-NEXT: st %s0, 16(, %s19) ; CHECK-NEXT: svm %s0, %vm3, 1 ; CHECK-NEXT: st %s0, 8(, %s19) ; CHECK-NEXT: svm %s0, %vm3, 0 ; CHECK-NEXT: st %s0, (, %s19) ; CHECK-NEXT: svm %s16, %vm3, 0 ; CHECK-NEXT: st %s16, 320(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 1 ; CHECK-NEXT: st %s16, 328(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 2 ; CHECK-NEXT: st %s16, 336(, %s17) ; CHECK-NEXT: svm %s16, %vm3, 3 ; CHECK-NEXT: st %s16, 344(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 0 ; CHECK-NEXT: st %s16, 352(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 1 ; CHECK-NEXT: st %s16, 360(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 2 ; CHECK-NEXT: st %s16, 368(, %s17) ; CHECK-NEXT: svm %s16, %vm2, 3 ; CHECK-NEXT: st %s16, 376(, %s17) ; CHECK-NEXT: ld %s19, 56(, %s9) # 8-byte Folded Reload ; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) %3 = alloca <512 x i1>, align 32 call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %3) %4 = alloca <512 x i1>, i64 %1, align 8 tail call fastcc void @dummy() tail call fastcc void @pass(i64 noundef %1) store volatile <512 x i1> %0, ptr %4, align 64, !tbaa !3 store volatile <512 x i1> %0, ptr %3, align 32, !tbaa !3 call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %3) ret void } !2 = !{!"clang version 15.0.0 (git@kaz7.github.com:sx-aurora-dev/llvm-project.git 6c510cbf7e17baa380bf8a181c3b43145fd50980)"} !3 = !{!4, !4, i64 0} !4 = !{!"omnipotent char", !5, i64 0} !5 = !{!"Simple C/C++ TBAA"} !6 = !{!7, !7, i64 0} !7 = !{!"long", !4, i64 0} !8 = distinct !{!8, !9} !9 = !{!"llvm.loop.mustprogress"} !10 = distinct !{!10, !9} !11 = distinct !{!11, !9} !12 = distinct !{!12, !9}