; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt --arm-mve-gather-scatter-lowering -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -S -o - -opaque-pointers | FileCheck %s target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" define arm_aapcs_vfpcc void @push_out_add_sub_block(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: @push_out_add_sub_block( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> <i32 0, i32 2, i32 4, i32 6>, <i32 6, i32 6, i32 6, i32 6> ; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[PUSHEDOUTADD]], <i32 2, i32 2, i32 2, i32 2> ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[DATA:%.*]] to i32 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]] ; CHECK-NEXT: [[PREINCREMENTSTARTINDEX:%.*]] = sub <4 x i32> [[STARTINDEX]], <i32 32, i32 32, i32 32, i32 32> ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PREINCREMENTSTARTINDEX]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX]], 48 ; CHECK-NEXT: br i1 [[TMP1]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]] ; CHECK: lower.block: ; CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> [[VEC_IND]], i32 32) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 0 ; CHECK-NEXT: [[TMP4]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY_END]] ; CHECK: vector.body.end: ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] ; CHECK-NEXT: br i1 [[TMP6]], label [[END]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; vector.ph: br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body.end ] %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body.end ] %0 = icmp eq i32 %index, 48 br i1 %0, label %lower.block, label %end lower.block: ; preds = %vector.body %1 = add <4 x i32> %vec.ind, <i32 6, i32 6, i32 6, i32 6> %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef) %3 = getelementptr inbounds i32, i32* %dst, i32 %index %4 = bitcast i32* %3 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8> br label %vector.body.end vector.body.end: ; preds = %lower.block %5 = icmp eq i32 %index.next, %n.vec br i1 %5, label %end, label %vector.body end: ret void; } define arm_aapcs_vfpcc void @push_out_mul_sub_block(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: @push_out_mul_sub_block( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: [[PUSHEDOUTMUL:%.*]] = mul <4 x i32> <i32 0, i32 2, i32 4, i32 6>, <i32 3, i32 3, i32 3, i32 3> ; CHECK-NEXT: [[PRODUCT:%.*]] = mul <4 x i32> <i32 8, i32 8, i32 8, i32 8>, <i32 3, i32 3, i32 3, i32 3> ; CHECK-NEXT: [[PUSHEDOUTADD:%.*]] = add <4 x i32> [[PUSHEDOUTMUL]], <i32 6, i32 6, i32 6, i32 6> ; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[PUSHEDOUTADD]], <i32 2, i32 2, i32 2, i32 2> ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[DATA:%.*]] to i32 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]] ; CHECK-NEXT: [[PREINCREMENTSTARTINDEX:%.*]] = sub <4 x i32> [[STARTINDEX]], <i32 96, i32 96, i32 96, i32 96> ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[PREINCREMENTSTARTINDEX]], [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX]], 48 ; CHECK-NEXT: br i1 [[TMP1]], label [[LOWER_BLOCK:%.*]], label [[END:%.*]] ; CHECK: lower.block: ; CHECK-NEXT: [[TMP2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> [[VEC_IND]], i32 96) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 0 ; CHECK-NEXT: [[TMP4]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP2]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY_END]] ; CHECK: vector.body.end: ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] ; CHECK-NEXT: br i1 [[TMP6]], label [[END]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; vector.ph: br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body.end ] %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body.end ] %0 = icmp eq i32 %index, 48 br i1 %0, label %lower.block, label %end lower.block: ; preds = %vector.body %1 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3> %2 = add <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6> %3 = getelementptr inbounds i32, i32* %data, <4 x i32> %2 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef) %4 = getelementptr inbounds i32, i32* %dst, i32 %index %5 = bitcast i32* %4 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %5, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8> br label %vector.body.end vector.body.end: ; preds = %lower.block %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %end, label %vector.body end: ret void; } define arm_aapcs_vfpcc void @push_out_mul_sub_loop(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: @push_out_mul_sub_loop( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY_END:%.*]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY_END]] ] ; CHECK-NEXT: br label [[VECTOR_2_PH:%.*]] ; CHECK: vector.2.ph: ; CHECK-NEXT: br label [[VECTOR_2_BODY:%.*]] ; CHECK: vector.2.body: ; CHECK-NEXT: [[TMP0:%.*]] = mul <4 x i32> [[VEC_IND]], <i32 3, i32 3, i32 3, i32 3> ; CHECK-NEXT: [[SCALEDINDEX:%.*]] = shl <4 x i32> [[TMP0]], <i32 2, i32 2, i32 2, i32 2> ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[DATA:%.*]] to i32 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[STARTINDEX:%.*]] = add <4 x i32> [[SCALEDINDEX]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.base.v4i32.v4i32(<4 x i32> [[STARTINDEX]], i32 24) ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: br label [[VECTOR_2_BODY_END:%.*]] ; CHECK: vector.2.body.end: ; CHECK-NEXT: [[INDEX_2_NEXT:%.*]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_2_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY_END]], label [[VECTOR_2_BODY]] ; CHECK: vector.body.end: ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 8, i32 8, i32 8, i32 8> ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] ; CHECK-NEXT: br i1 [[TMP5]], label [[END:%.*]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; vector.ph: br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body.end ] %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body.end ] br label %vector.2.ph vector.2.ph: br label %vector.2.body vector.2.body: ; preds = %vector.body %0 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3> %1 = add <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6> %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef) %3 = getelementptr inbounds i32, i32* %dst, i32 %index %4 = bitcast i32* %3 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4 br label %vector.2.body.end vector.2.body.end: ; preds = %lower.block %index.2.next = add i32 %index, 4 %5 = icmp eq i32 %index.2.next, 16 br i1 %5, label %vector.body.end, label %vector.2.body vector.body.end: ; preds = %lower.block %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8> %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %end, label %vector.body end: ret void; } define arm_aapcs_vfpcc void @invariant_add(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: @invariant_add( ; CHECK-NEXT: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[L0:%.*]] = mul <4 x i32> [[VEC_IND]], <i32 3, i32 3, i32 3, i32 3> ; CHECK-NEXT: [[L1:%.*]] = add <4 x i32> [[L0]], [[VEC_IND]] ; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr [[DATA:%.*]], <4 x i32> [[L1]], i32 32, i32 2, i32 1) ; CHECK-NEXT: [[L3:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP0]], ptr [[L3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 8, i32 8, i32 8, i32 8> ; CHECK-NEXT: [[L5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] ; CHECK-NEXT: br i1 [[L5]], label [[END:%.*]], label [[VECTOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; vector.ph: br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body ] %l0 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3> %l1 = add <4 x i32> %l0, %vec.ind %l2 = getelementptr inbounds i32, i32* %data, <4 x i32> %l1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %l2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef) %l3 = getelementptr inbounds i32, i32* %dst, i32 %index %l4 = bitcast i32* %l3 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %l4, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8> %l5 = icmp eq i32 %index.next, %n.vec br i1 %l5, label %end, label %vector.body end: ret void; } define void @gatherload(i32 %n, i32 %m, i32* nocapture %a, i32* nocapture readonly %b, i32 %call.us.us) { ; CHECK-LABEL: @gatherload( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP38:%.*]] = icmp sgt i32 [[N:%.*]], 0 ; CHECK-NEXT: br i1 [[CMP38]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END16:%.*]] ; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[CMP636:%.*]] = icmp sgt i32 [[M:%.*]], 0 ; CHECK-NEXT: br i1 [[CMP636]], label [[FOR_BODY_US_US_PREHEADER:%.*]], label [[FOR_BODY:%.*]] ; CHECK: for.body.us.us.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i32 [[M]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, ptr [[A:%.*]], i32 [[M]] ; CHECK-NEXT: [[SCEVGEP64:%.*]] = getelementptr i32, ptr [[B:%.*]], i32 [[M]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[M]], 4 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP64]], [[A]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[M]], -4 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[M]] ; CHECK-NEXT: br label [[FOR_BODY_US_US:%.*]] ; CHECK: for.body.us.us: ; CHECK-NEXT: [[I_039_US_US:%.*]] = phi i32 [ [[INC15_US_US:%.*]], [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US:%.*]] ], [ 0, [[FOR_BODY_US_US_PREHEADER]] ] ; CHECK-NEXT: [[VLA_US_US:%.*]] = alloca i32, i32 [[CALL_US_US:%.*]], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr nonnull align 4 [[VLA_US_US]], ptr align 4 [[A]], i32 [[TMP0]], i1 false) ; CHECK-NEXT: [[BRMERGE:%.*]] = select i1 [[MIN_ITERS_CHECK]], i1 true, i1 [[FOUND_CONFLICT]] ; CHECK-NEXT: br i1 [[BRMERGE]], label [[FOR_BODY7_US_US_PREHEADER:%.*]], label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [ 0, [[FOR_BODY_US_US]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vldr.gather.offset.v4i32.p0.v4i32(ptr [[VLA_US_US]], <4 x i32> [[WIDE_LOAD]], i32 32, i32 2, i32 1) ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US]], label [[FOR_BODY7_US_US_PREHEADER]] ; CHECK: for.body7.us.us.preheader: ; CHECK-NEXT: [[J_137_US_US_PH:%.*]] = phi i32 [ 0, [[FOR_BODY_US_US]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY7_US_US:%.*]] ; CHECK: for.body7.us.us: ; CHECK-NEXT: [[J_137_US_US:%.*]] = phi i32 [ [[INC12_US_US:%.*]], [[FOR_BODY7_US_US]] ], [ [[J_137_US_US_PH]], [[FOR_BODY7_US_US_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX8_US_US:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_137_US_US]] ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX8_US_US]], align 4 ; CHECK-NEXT: [[ARRAYIDX9_US_US:%.*]] = getelementptr inbounds i32, ptr [[VLA_US_US]], i32 [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX9_US_US]], align 4 ; CHECK-NEXT: [[ARRAYIDX10_US_US:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[J_137_US_US]] ; CHECK-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX10_US_US]], align 4 ; CHECK-NEXT: [[INC12_US_US]] = add nuw nsw i32 [[J_137_US_US]], 1 ; CHECK-NEXT: [[EXITCOND58_NOT:%.*]] = icmp eq i32 [[INC12_US_US]], [[M]] ; CHECK-NEXT: br i1 [[EXITCOND58_NOT]], label [[FOR_COND5_FOR_END13_CRIT_EDGE_US_US]], label [[FOR_BODY7_US_US]] ; CHECK: for.cond5.for.end13_crit_edge.us.us: ; CHECK-NEXT: [[INC15_US_US]] = add nuw nsw i32 [[I_039_US_US]], 1 ; CHECK-NEXT: [[EXITCOND59_NOT:%.*]] = icmp eq i32 [[INC15_US_US]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND59_NOT]], label [[FOR_END16]], label [[FOR_BODY_US_US]] ; CHECK: for.body: ; CHECK-NEXT: [[I_039:%.*]] = phi i32 [ [[INC15:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ] ; CHECK-NEXT: [[INC15]] = add nuw nsw i32 [[I_039]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC15]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END16]], label [[FOR_BODY]] ; CHECK: for.end16: ; CHECK-NEXT: ret void ; entry: %a57 = bitcast i32* %a to i8* %cmp38 = icmp sgt i32 %n, 0 br i1 %cmp38, label %for.body.lr.ph, label %for.end16 for.body.lr.ph: ; preds = %entry %cmp636 = icmp sgt i32 %m, 0 br i1 %cmp636, label %for.body.us.us.preheader, label %for.body for.body.us.us.preheader: ; preds = %for.body.lr.ph %0 = shl nuw i32 %m, 2 %scevgep = getelementptr i32, i32* %a, i32 %m %scevgep64 = getelementptr i32, i32* %b, i32 %m %min.iters.check = icmp ult i32 %m, 4 %bound0 = icmp ugt i32* %scevgep64, %a %bound1 = icmp ugt i32* %scevgep, %b %found.conflict = and i1 %bound0, %bound1 %n.vec = and i32 %m, -4 %cmp.n = icmp eq i32 %n.vec, %m br label %for.body.us.us for.body.us.us: ; preds = %for.body.us.us.preheader, %for.cond5.for.end13_crit_edge.us.us %i.039.us.us = phi i32 [ %inc15.us.us, %for.cond5.for.end13_crit_edge.us.us ], [ 0, %for.body.us.us.preheader ] %1 = add i32 0, 0 %vla.us.us = alloca i32, i32 %call.us.us, align 4 %vla.us.us56 = bitcast i32* %vla.us.us to i8* call void @llvm.memcpy.p0i8.p0i8.i32(i8* nonnull align 4 %vla.us.us56, i8* align 4 %a57, i32 %0, i1 false) %brmerge = select i1 %min.iters.check, i1 true, i1 %found.conflict br i1 %brmerge, label %for.body7.us.us.preheader, label %vector.body vector.body: ; preds = %for.body.us.us, %vector.body %index = phi i32 [ %index.next, %vector.body ], [ 0, %for.body.us.us ] %2 = getelementptr inbounds i32, i32* %b, i32 %index %3 = bitcast i32* %2 to <4 x i32>* %wide.load = load <4 x i32>, <4 x i32>* %3, align 4 %4 = getelementptr inbounds i32, i32* %vla.us.us, <4 x i32> %wide.load %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %4, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef) %5 = getelementptr inbounds i32, i32* %a, i32 %index %6 = bitcast i32* %5 to <4 x i32>* store <4 x i32> %wide.masked.gather, <4 x i32>* %6, align 4 %index.next = add nuw i32 %index, 4 %7 = icmp eq i32 %index.next, %n.vec br i1 %7, label %middle.block, label %vector.body middle.block: ; preds = %vector.body br i1 %cmp.n, label %for.cond5.for.end13_crit_edge.us.us, label %for.body7.us.us.preheader for.body7.us.us.preheader: ; preds = %for.body.us.us, %middle.block %j.137.us.us.ph = phi i32 [ 0, %for.body.us.us ], [ %n.vec, %middle.block ] br label %for.body7.us.us for.body7.us.us: ; preds = %for.body7.us.us.preheader, %for.body7.us.us %j.137.us.us = phi i32 [ %inc12.us.us, %for.body7.us.us ], [ %j.137.us.us.ph, %for.body7.us.us.preheader ] %arrayidx8.us.us = getelementptr inbounds i32, i32* %b, i32 %j.137.us.us %8 = load i32, i32* %arrayidx8.us.us, align 4 %arrayidx9.us.us = getelementptr inbounds i32, i32* %vla.us.us, i32 %8 %9 = load i32, i32* %arrayidx9.us.us, align 4 %arrayidx10.us.us = getelementptr inbounds i32, i32* %a, i32 %j.137.us.us store i32 %9, i32* %arrayidx10.us.us, align 4 %inc12.us.us = add nuw nsw i32 %j.137.us.us, 1 %exitcond58.not = icmp eq i32 %inc12.us.us, %m br i1 %exitcond58.not, label %for.cond5.for.end13_crit_edge.us.us, label %for.body7.us.us for.cond5.for.end13_crit_edge.us.us: ; preds = %for.body7.us.us, %middle.block %inc15.us.us = add nuw nsw i32 %i.039.us.us, 1 %exitcond59.not = icmp eq i32 %inc15.us.us, %n br i1 %exitcond59.not, label %for.end16, label %for.body.us.us for.body: ; preds = %for.body.lr.ph, %for.body %i.039 = phi i32 [ %inc15, %for.body ], [ 0, %for.body.lr.ph ] %inc15 = add nuw nsw i32 %i.039, 1 %exitcond.not = icmp eq i32 %inc15, %n br i1 %exitcond.not, label %for.end16, label %for.body for.end16: ; preds = %for.body, %for.cond5.for.end13_crit_edge.us.us, %entry ret void } declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)