; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled %s -o - | FileCheck %s define arm_aapcs_vfpcc i32 @test_acc_scalar_char(i8 zeroext %a, i8* nocapture readonly %b, i32 %N) { ; CHECK-LABEL: test_acc_scalar_char: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: itt eq ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: .LBB0_1: @ %vector.ph ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: adds r3, r2, #3 ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: bic r3, r3, #3 ; CHECK-NEXT: sub.w r12, r3, #4 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: add.w r3, r3, r12, lsr #2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: .LBB0_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vctp.32 r2 ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrbt.u32 q2, [r1], #4 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: vmla.u32 q0, q2, r0 ; CHECK-NEXT: le lr, .LBB0_2 ; CHECK-NEXT: @ %bb.3: @ %middle.block ; CHECK-NEXT: vpsel q0, q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: %cmp7 = icmp eq i32 %N, 0 br i1 %cmp7, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv = zext i8 %a to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0 %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ] %0 = getelementptr inbounds i8, i8* %b, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i8* %0 to <4 x i8>* %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %2, i32 1, <4 x i1> %1, <4 x i8> undef) %3 = zext <4 x i8> %wide.masked.load to <4 x i32> %4 = mul nuw nsw <4 x i32> %broadcast.splat13, %3 %5 = add nuw nsw <4 x i32> %4, %vec.phi %index.next = add i32 %index, 4 %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7) br label %for.cond.cleanup for.cond.cleanup: ; preds = %middle.block, %entry %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ] ret i32 %res.0.lcssa } define arm_aapcs_vfpcc i32 @test_acc_scalar_short(i16 signext %a, i16* nocapture readonly %b, i32 %N) { ; CHECK-LABEL: test_acc_scalar_short: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: itt eq ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: .LBB1_1: @ %vector.ph ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: adds r3, r2, #3 ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: bic r3, r3, #3 ; CHECK-NEXT: sub.w r12, r3, #4 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: add.w r3, r3, r12, lsr #2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: .LBB1_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vctp.32 r2 ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrht.s32 q2, [r1], #8 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: vmla.u32 q0, q2, r0 ; CHECK-NEXT: le lr, .LBB1_2 ; CHECK-NEXT: @ %bb.3: @ %middle.block ; CHECK-NEXT: vpsel q0, q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: %cmp7 = icmp eq i32 %N, 0 br i1 %cmp7, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv = sext i16 %a to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0 %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ] %0 = getelementptr inbounds i16, i16* %b, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i16* %0 to <4 x i16>* %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) %3 = sext <4 x i16> %wide.masked.load to <4 x i32> %4 = mul nsw <4 x i32> %broadcast.splat13, %3 %5 = add nsw <4 x i32> %4, %vec.phi %index.next = add i32 %index, 4 %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7) br label %for.cond.cleanup for.cond.cleanup: ; preds = %middle.block, %entry %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ] ret i32 %res.0.lcssa } define arm_aapcs_vfpcc i32 @test_acc_scalar_uchar(i8 zeroext %a, i8* nocapture readonly %b, i32 %N) { ; CHECK-LABEL: test_acc_scalar_uchar: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: itt eq ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: .LBB2_1: @ %vector.ph ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: adds r3, r2, #3 ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: bic r3, r3, #3 ; CHECK-NEXT: sub.w r12, r3, #4 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: add.w r3, r3, r12, lsr #2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: .LBB2_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vctp.32 r2 ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrbt.u32 q2, [r1], #4 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: vmla.u32 q0, q2, r0 ; CHECK-NEXT: le lr, .LBB2_2 ; CHECK-NEXT: @ %bb.3: @ %middle.block ; CHECK-NEXT: vpsel q0, q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: %cmp7 = icmp eq i32 %N, 0 br i1 %cmp7, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv = zext i8 %a to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0 %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ] %0 = getelementptr inbounds i8, i8* %b, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i8* %0 to <4 x i8>* %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %2, i32 1, <4 x i1> %1, <4 x i8> undef) %3 = zext <4 x i8> %wide.masked.load to <4 x i32> %4 = mul nuw nsw <4 x i32> %broadcast.splat13, %3 %5 = add nuw nsw <4 x i32> %4, %vec.phi %index.next = add i32 %index, 4 %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7) br label %for.cond.cleanup for.cond.cleanup: ; preds = %middle.block, %entry %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ] ret i32 %res.0.lcssa } define arm_aapcs_vfpcc i32 @test_acc_scalar_ushort(i16 signext %a, i16* nocapture readonly %b, i32 %N) { ; CHECK-LABEL: test_acc_scalar_ushort: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: itt eq ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: .LBB3_1: @ %vector.ph ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: adds r3, r2, #3 ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: bic r3, r3, #3 ; CHECK-NEXT: sub.w r12, r3, #4 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: add.w r3, r3, r12, lsr #2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: .LBB3_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vctp.32 r2 ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrht.u32 q2, [r1], #8 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: vmla.u32 q0, q2, r0 ; CHECK-NEXT: le lr, .LBB3_2 ; CHECK-NEXT: @ %bb.3: @ %middle.block ; CHECK-NEXT: vpsel q0, q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: %cmp7 = icmp eq i32 %N, 0 br i1 %cmp7, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv = sext i16 %a to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert12 = insertelement <4 x i32> undef, i32 %conv, i32 0 %broadcast.splat13 = shufflevector <4 x i32> %broadcast.splatinsert12, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %5, %vector.body ] %0 = getelementptr inbounds i16, i16* %b, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i16* %0 to <4 x i16>* %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) %3 = zext <4 x i16> %wide.masked.load to <4 x i32> %4 = mul nsw <4 x i32> %broadcast.splat13, %3 %5 = add nsw <4 x i32> %4, %vec.phi %index.next = add i32 %index, 4 %6 = icmp eq i32 %index.next, %n.vec br i1 %6, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7) br label %for.cond.cleanup for.cond.cleanup: ; preds = %middle.block, %entry %res.0.lcssa = phi i32 [ 0, %entry ], [ %8, %middle.block ] ret i32 %res.0.lcssa } define arm_aapcs_vfpcc i32 @test_acc_scalar_int(i32 %a, i32* nocapture readonly %b, i32 %N) { ; CHECK-LABEL: test_acc_scalar_int: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: itt eq ; CHECK-NEXT: moveq r0, #0 ; CHECK-NEXT: bxeq lr ; CHECK-NEXT: .LBB4_1: @ %vector.ph ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: adds r3, r2, #3 ; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: bic r3, r3, #3 ; CHECK-NEXT: sub.w r12, r3, #4 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: add.w r3, r3, r12, lsr #2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: .LBB4_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vctp.32 r2 ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrwt.u32 q2, [r1], #16 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: vmla.u32 q0, q2, r0 ; CHECK-NEXT: le lr, .LBB4_2 ; CHECK-NEXT: @ %bb.3: @ %middle.block ; CHECK-NEXT: vpsel q0, q0, q1 ; CHECK-NEXT: vaddv.u32 r0, q0 ; CHECK-NEXT: pop {r7, pc} entry: %cmp6 = icmp eq i32 %N, 0 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert11 = insertelement <4 x i32> undef, i32 %a, i32 0 %broadcast.splat12 = shufflevector <4 x i32> %broadcast.splatinsert11, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %4, %vector.body ] %0 = getelementptr inbounds i32, i32* %b, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i32* %0 to <4 x i32>* %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef) %3 = mul nsw <4 x i32> %wide.masked.load, %broadcast.splat12 %4 = add nsw <4 x i32> %3, %vec.phi %index.next = add i32 %index, 4 %5 = icmp eq i32 %index.next, %n.vec br i1 %5, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %6 = select <4 x i1> %1, <4 x i32> %4, <4 x i32> %vec.phi %7 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %6) br label %for.cond.cleanup for.cond.cleanup: ; preds = %middle.block, %entry %res.0.lcssa = phi i32 [ 0, %entry ], [ %7, %middle.block ] ret i32 %res.0.lcssa } define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_char(i8* nocapture readonly %a, i8* nocapture readonly %b, i8 zeroext %c, i32* nocapture %res, i32 %N) { ; CHECK-LABEL: test_vec_mul_scalar_add_char: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: ldr r4, [sp, #28] ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: beq.w .LBB5_11 ; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph ; CHECK-NEXT: add.w r5, r3, r4, lsl #2 ; CHECK-NEXT: adds r6, r1, r4 ; CHECK-NEXT: cmp r5, r1 ; CHECK-NEXT: add.w r7, r0, r4 ; CHECK-NEXT: cset r12, hi ; CHECK-NEXT: cmp r6, r3 ; CHECK-NEXT: cset r6, hi ; CHECK-NEXT: cmp r5, r0 ; CHECK-NEXT: cset r5, hi ; CHECK-NEXT: cmp r7, r3 ; CHECK-NEXT: cset r7, hi ; CHECK-NEXT: tst r7, r5 ; CHECK-NEXT: it eq ; CHECK-NEXT: andseq.w r7, r6, r12 ; CHECK-NEXT: beq .LBB5_4 ; CHECK-NEXT: @ %bb.2: @ %for.body.preheader ; CHECK-NEXT: subs r7, r4, #1 ; CHECK-NEXT: and r12, r4, #3 ; CHECK-NEXT: cmp r7, #3 ; CHECK-NEXT: bhs .LBB5_6 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: b .LBB5_8 ; CHECK-NEXT: .LBB5_4: @ %vector.ph ; CHECK-NEXT: dlstp.32 lr, r4 ; CHECK-NEXT: .LBB5_5: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrb.u32 q0, [r0], #4 ; CHECK-NEXT: vldrb.u32 q1, [r1], #4 ; CHECK-NEXT: vmlas.u32 q1, q0, r2 ; CHECK-NEXT: vstrw.32 q1, [r3], #16 ; CHECK-NEXT: letp lr, .LBB5_5 ; CHECK-NEXT: b .LBB5_11 ; CHECK-NEXT: .LBB5_6: @ %for.body.preheader.new ; CHECK-NEXT: bic r7, r4, #3 ; CHECK-NEXT: movs r6, #1 ; CHECK-NEXT: subs r7, #4 ; CHECK-NEXT: add.w r5, r3, #8 ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: add.w lr, r6, r7, lsr #2 ; CHECK-NEXT: adds r6, r0, #3 ; CHECK-NEXT: adds r7, r1, #1 ; CHECK-NEXT: .LBB5_7: @ %for.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldrb r9, [r6, #-3] ; CHECK-NEXT: add.w r8, r8, #4 ; CHECK-NEXT: ldrb r4, [r7, #-1] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-8] ; CHECK-NEXT: ldrb r9, [r6, #-2] ; CHECK-NEXT: ldrb r4, [r7], #4 ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-4] ; CHECK-NEXT: ldrb r9, [r6, #-1] ; CHECK-NEXT: ldrb r4, [r7, #-3] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5] ; CHECK-NEXT: ldrb r9, [r6], #4 ; CHECK-NEXT: ldrb r4, [r7, #-2] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #4] ; CHECK-NEXT: adds r5, #16 ; CHECK-NEXT: le lr, .LBB5_7 ; CHECK-NEXT: .LBB5_8: @ %for.cond.cleanup.loopexit.unr-lcssa ; CHECK-NEXT: wls lr, r12, .LBB5_11 ; CHECK-NEXT: @ %bb.9: @ %for.body.epil.preheader ; CHECK-NEXT: add r0, r8 ; CHECK-NEXT: add r1, r8 ; CHECK-NEXT: add.w r3, r3, r8, lsl #2 ; CHECK-NEXT: .LBB5_10: @ %for.body.epil ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldrb r7, [r0], #1 ; CHECK-NEXT: ldrb r6, [r1], #1 ; CHECK-NEXT: smlabb r7, r6, r7, r2 ; CHECK-NEXT: str r7, [r3], #4 ; CHECK-NEXT: le lr, .LBB5_10 ; CHECK-NEXT: .LBB5_11: @ %for.cond.cleanup ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: %res12 = bitcast i32* %res to i8* %cmp10 = icmp eq i32 %N, 0 br i1 %cmp10, label %for.cond.cleanup, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry %conv3 = zext i8 %c to i32 %scevgep = getelementptr i32, i32* %res, i32 %N %scevgep13 = bitcast i32* %scevgep to i8* %scevgep14 = getelementptr i8, i8* %a, i32 %N %scevgep15 = getelementptr i8, i8* %b, i32 %N %bound0 = icmp ugt i8* %scevgep14, %res12 %bound1 = icmp ugt i8* %scevgep13, %a %found.conflict = and i1 %bound0, %bound1 %bound016 = icmp ugt i8* %scevgep15, %res12 %bound117 = icmp ugt i8* %scevgep13, %b %found.conflict18 = and i1 %bound016, %bound117 %conflict.rdx = or i1 %found.conflict, %found.conflict18 br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph for.body.preheader: ; preds = %for.body.lr.ph %0 = add i32 %N, -1 %xtraiter = and i32 %N, 3 %1 = icmp ult i32 %0, 3 br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new for.body.preheader.new: ; preds = %for.body.preheader %unroll_iter = sub i32 %N, %xtraiter br label %for.body vector.ph: ; preds = %for.body.lr.ph %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert22 = insertelement <4 x i32> undef, i32 %conv3, i32 0 %broadcast.splat23 = shufflevector <4 x i32> %broadcast.splatinsert22, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %2 = getelementptr inbounds i8, i8* %a, i32 %index %3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %4 = bitcast i8* %2 to <4 x i8>* %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %4, i32 1, <4 x i1> %3, <4 x i8> undef) %5 = zext <4 x i8> %wide.masked.load to <4 x i32> %6 = getelementptr inbounds i8, i8* %b, i32 %index %7 = bitcast i8* %6 to <4 x i8>* %wide.masked.load21 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %7, i32 1, <4 x i1> %3, <4 x i8> undef) %8 = zext <4 x i8> %wide.masked.load21 to <4 x i32> %9 = mul nuw nsw <4 x i32> %8, %5 %10 = add nuw nsw <4 x i32> %9, %broadcast.splat23 %11 = getelementptr inbounds i32, i32* %res, i32 %index %12 = bitcast i32* %11 to <4 x i32>* call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %3) %index.next = add i32 %index, 4 %13 = icmp eq i32 %index.next, %n.vec br i1 %13, label %for.cond.cleanup, label %vector.body for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ] %lcmp.mod = icmp eq i32 %xtraiter, 0 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ] %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ] %arrayidx.epil = getelementptr inbounds i8, i8* %a, i32 %i.011.epil %14 = load i8, i8* %arrayidx.epil, align 1 %conv.epil = zext i8 %14 to i32 %arrayidx1.epil = getelementptr inbounds i8, i8* %b, i32 %i.011.epil %15 = load i8, i8* %arrayidx1.epil, align 1 %conv2.epil = zext i8 %15 to i32 %mul.epil = mul nuw nsw i32 %conv2.epil, %conv.epil %add.epil = add nuw nsw i32 %mul.epil, %conv3 %arrayidx4.epil = getelementptr inbounds i32, i32* %res, i32 %i.011.epil store i32 %add.epil, i32* %arrayidx4.epil, align 4 %inc.epil = add nuw i32 %i.011.epil, 1 %epil.iter.sub = add i32 %epil.iter, -1 %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil for.cond.cleanup: ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry ret void for.body: ; preds = %for.body, %for.body.preheader.new %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ] %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.011 %16 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %16 to i32 %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.011 %17 = load i8, i8* %arrayidx1, align 1 %conv2 = zext i8 %17 to i32 %mul = mul nuw nsw i32 %conv2, %conv %add = add nuw nsw i32 %mul, %conv3 %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011 store i32 %add, i32* %arrayidx4, align 4 %inc = or i32 %i.011, 1 %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc %18 = load i8, i8* %arrayidx.1, align 1 %conv.1 = zext i8 %18 to i32 %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc %19 = load i8, i8* %arrayidx1.1, align 1 %conv2.1 = zext i8 %19 to i32 %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1 %add.1 = add nuw nsw i32 %mul.1, %conv3 %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc store i32 %add.1, i32* %arrayidx4.1, align 4 %inc.1 = or i32 %i.011, 2 %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1 %20 = load i8, i8* %arrayidx.2, align 1 %conv.2 = zext i8 %20 to i32 %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.1 %21 = load i8, i8* %arrayidx1.2, align 1 %conv2.2 = zext i8 %21 to i32 %mul.2 = mul nuw nsw i32 %conv2.2, %conv.2 %add.2 = add nuw nsw i32 %mul.2, %conv3 %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1 store i32 %add.2, i32* %arrayidx4.2, align 4 %inc.2 = or i32 %i.011, 3 %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2 %22 = load i8, i8* %arrayidx.3, align 1 %conv.3 = zext i8 %22 to i32 %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.2 %23 = load i8, i8* %arrayidx1.3, align 1 %conv2.3 = zext i8 %23 to i32 %mul.3 = mul nuw nsw i32 %conv2.3, %conv.3 %add.3 = add nuw nsw i32 %mul.3, %conv3 %arrayidx4.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2 store i32 %add.3, i32* %arrayidx4.3, align 4 %inc.3 = add nuw i32 %i.011, 4 %niter.nsub.3 = add i32 %niter, -4 %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0 br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body } define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_short(i16* nocapture readonly %a, i16* nocapture readonly %b, i16 signext %c, i32* nocapture %res, i32 %N) { ; CHECK-LABEL: test_vec_mul_scalar_add_short: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: ldr.w r12, [sp, #8] ; CHECK-NEXT: cmp.w r12, #0 ; CHECK-NEXT: it eq ; CHECK-NEXT: popeq {r4, pc} ; CHECK-NEXT: .LBB6_1: @ %vector.ph ; CHECK-NEXT: dlstp.32 lr, r12 ; CHECK-NEXT: .LBB6_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrh.s32 q0, [r0], #8 ; CHECK-NEXT: vldrh.s32 q1, [r1], #8 ; CHECK-NEXT: vmlas.u32 q1, q0, r2 ; CHECK-NEXT: vstrw.32 q1, [r3], #16 ; CHECK-NEXT: letp lr, .LBB6_2 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup ; CHECK-NEXT: pop {r4, pc} entry: %cmp10 = icmp eq i32 %N, 0 br i1 %cmp10, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv3 = sext i16 %c to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert15 = insertelement <4 x i32> undef, i32 %conv3, i32 0 %broadcast.splat16 = shufflevector <4 x i32> %broadcast.splatinsert15, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i16, i16* %a, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i16* %0 to <4 x i16>* %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) %3 = sext <4 x i16> %wide.masked.load to <4 x i32> %4 = getelementptr inbounds i16, i16* %b, i32 %index %5 = bitcast i16* %4 to <4 x i16>* %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %5, i32 2, <4 x i1> %1, <4 x i16> undef) %6 = sext <4 x i16> %wide.masked.load14 to <4 x i32> %7 = mul nsw <4 x i32> %6, %3 %8 = add nsw <4 x i32> %7, %broadcast.splat16 %9 = getelementptr inbounds i32, i32* %res, i32 %index %10 = bitcast i32* %9 to <4 x i32>* call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %1) %index.next = add i32 %index, 4 %11 = icmp eq i32 %index.next, %n.vec br i1 %11, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry ret void } define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_uchar(i8* nocapture readonly %a, i8* nocapture readonly %b, i8 zeroext %c, i32* nocapture %res, i32 %N) { ; CHECK-LABEL: test_vec_mul_scalar_add_uchar: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: ldr r4, [sp, #28] ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: beq.w .LBB7_11 ; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph ; CHECK-NEXT: add.w r5, r3, r4, lsl #2 ; CHECK-NEXT: adds r6, r1, r4 ; CHECK-NEXT: cmp r5, r1 ; CHECK-NEXT: add.w r7, r0, r4 ; CHECK-NEXT: cset r12, hi ; CHECK-NEXT: cmp r6, r3 ; CHECK-NEXT: cset r6, hi ; CHECK-NEXT: cmp r5, r0 ; CHECK-NEXT: cset r5, hi ; CHECK-NEXT: cmp r7, r3 ; CHECK-NEXT: cset r7, hi ; CHECK-NEXT: tst r7, r5 ; CHECK-NEXT: it eq ; CHECK-NEXT: andseq.w r7, r6, r12 ; CHECK-NEXT: beq .LBB7_4 ; CHECK-NEXT: @ %bb.2: @ %for.body.preheader ; CHECK-NEXT: subs r7, r4, #1 ; CHECK-NEXT: and r12, r4, #3 ; CHECK-NEXT: cmp r7, #3 ; CHECK-NEXT: bhs .LBB7_6 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: b .LBB7_8 ; CHECK-NEXT: .LBB7_4: @ %vector.ph ; CHECK-NEXT: dlstp.32 lr, r4 ; CHECK-NEXT: .LBB7_5: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrb.u32 q0, [r0], #4 ; CHECK-NEXT: vldrb.u32 q1, [r1], #4 ; CHECK-NEXT: vmlas.u32 q1, q0, r2 ; CHECK-NEXT: vstrw.32 q1, [r3], #16 ; CHECK-NEXT: letp lr, .LBB7_5 ; CHECK-NEXT: b .LBB7_11 ; CHECK-NEXT: .LBB7_6: @ %for.body.preheader.new ; CHECK-NEXT: bic r7, r4, #3 ; CHECK-NEXT: movs r6, #1 ; CHECK-NEXT: subs r7, #4 ; CHECK-NEXT: add.w r5, r3, #8 ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: add.w lr, r6, r7, lsr #2 ; CHECK-NEXT: adds r6, r0, #3 ; CHECK-NEXT: adds r7, r1, #1 ; CHECK-NEXT: .LBB7_7: @ %for.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldrb r9, [r6, #-3] ; CHECK-NEXT: add.w r8, r8, #4 ; CHECK-NEXT: ldrb r4, [r7, #-1] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-8] ; CHECK-NEXT: ldrb r9, [r6, #-2] ; CHECK-NEXT: ldrb r4, [r7], #4 ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-4] ; CHECK-NEXT: ldrb r9, [r6, #-1] ; CHECK-NEXT: ldrb r4, [r7, #-3] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5] ; CHECK-NEXT: ldrb r9, [r6], #4 ; CHECK-NEXT: ldrb r4, [r7, #-2] ; CHECK-NEXT: smlabb r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #4] ; CHECK-NEXT: adds r5, #16 ; CHECK-NEXT: le lr, .LBB7_7 ; CHECK-NEXT: .LBB7_8: @ %for.cond.cleanup.loopexit.unr-lcssa ; CHECK-NEXT: wls lr, r12, .LBB7_11 ; CHECK-NEXT: @ %bb.9: @ %for.body.epil.preheader ; CHECK-NEXT: add r0, r8 ; CHECK-NEXT: add r1, r8 ; CHECK-NEXT: add.w r3, r3, r8, lsl #2 ; CHECK-NEXT: .LBB7_10: @ %for.body.epil ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldrb r7, [r0], #1 ; CHECK-NEXT: ldrb r6, [r1], #1 ; CHECK-NEXT: smlabb r7, r6, r7, r2 ; CHECK-NEXT: str r7, [r3], #4 ; CHECK-NEXT: le lr, .LBB7_10 ; CHECK-NEXT: .LBB7_11: @ %for.cond.cleanup ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: %res12 = bitcast i32* %res to i8* %cmp10 = icmp eq i32 %N, 0 br i1 %cmp10, label %for.cond.cleanup, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry %conv3 = zext i8 %c to i32 %scevgep = getelementptr i32, i32* %res, i32 %N %scevgep13 = bitcast i32* %scevgep to i8* %scevgep14 = getelementptr i8, i8* %a, i32 %N %scevgep15 = getelementptr i8, i8* %b, i32 %N %bound0 = icmp ugt i8* %scevgep14, %res12 %bound1 = icmp ugt i8* %scevgep13, %a %found.conflict = and i1 %bound0, %bound1 %bound016 = icmp ugt i8* %scevgep15, %res12 %bound117 = icmp ugt i8* %scevgep13, %b %found.conflict18 = and i1 %bound016, %bound117 %conflict.rdx = or i1 %found.conflict, %found.conflict18 br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph for.body.preheader: ; preds = %for.body.lr.ph %0 = add i32 %N, -1 %xtraiter = and i32 %N, 3 %1 = icmp ult i32 %0, 3 br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new for.body.preheader.new: ; preds = %for.body.preheader %unroll_iter = sub i32 %N, %xtraiter br label %for.body vector.ph: ; preds = %for.body.lr.ph %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert22 = insertelement <4 x i32> undef, i32 %conv3, i32 0 %broadcast.splat23 = shufflevector <4 x i32> %broadcast.splatinsert22, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %2 = getelementptr inbounds i8, i8* %a, i32 %index %3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %4 = bitcast i8* %2 to <4 x i8>* %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %4, i32 1, <4 x i1> %3, <4 x i8> undef) %5 = zext <4 x i8> %wide.masked.load to <4 x i32> %6 = getelementptr inbounds i8, i8* %b, i32 %index %7 = bitcast i8* %6 to <4 x i8>* %wide.masked.load21 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %7, i32 1, <4 x i1> %3, <4 x i8> undef) %8 = zext <4 x i8> %wide.masked.load21 to <4 x i32> %9 = mul nuw nsw <4 x i32> %8, %5 %10 = add nuw nsw <4 x i32> %9, %broadcast.splat23 %11 = getelementptr inbounds i32, i32* %res, i32 %index %12 = bitcast i32* %11 to <4 x i32>* call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %12, i32 4, <4 x i1> %3) %index.next = add i32 %index, 4 %13 = icmp eq i32 %index.next, %n.vec br i1 %13, label %for.cond.cleanup, label %vector.body for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ] %lcmp.mod = icmp eq i32 %xtraiter, 0 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ] %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ] %arrayidx.epil = getelementptr inbounds i8, i8* %a, i32 %i.011.epil %14 = load i8, i8* %arrayidx.epil, align 1 %conv.epil = zext i8 %14 to i32 %arrayidx1.epil = getelementptr inbounds i8, i8* %b, i32 %i.011.epil %15 = load i8, i8* %arrayidx1.epil, align 1 %conv2.epil = zext i8 %15 to i32 %mul.epil = mul nuw nsw i32 %conv2.epil, %conv.epil %add.epil = add nuw nsw i32 %mul.epil, %conv3 %arrayidx4.epil = getelementptr inbounds i32, i32* %res, i32 %i.011.epil store i32 %add.epil, i32* %arrayidx4.epil, align 4 %inc.epil = add nuw i32 %i.011.epil, 1 %epil.iter.sub = add i32 %epil.iter, -1 %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil for.cond.cleanup: ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry ret void for.body: ; preds = %for.body, %for.body.preheader.new %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ] %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.011 %16 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %16 to i32 %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.011 %17 = load i8, i8* %arrayidx1, align 1 %conv2 = zext i8 %17 to i32 %mul = mul nuw nsw i32 %conv2, %conv %add = add nuw nsw i32 %mul, %conv3 %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011 store i32 %add, i32* %arrayidx4, align 4 %inc = or i32 %i.011, 1 %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc %18 = load i8, i8* %arrayidx.1, align 1 %conv.1 = zext i8 %18 to i32 %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc %19 = load i8, i8* %arrayidx1.1, align 1 %conv2.1 = zext i8 %19 to i32 %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1 %add.1 = add nuw nsw i32 %mul.1, %conv3 %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc store i32 %add.1, i32* %arrayidx4.1, align 4 %inc.1 = or i32 %i.011, 2 %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1 %20 = load i8, i8* %arrayidx.2, align 1 %conv.2 = zext i8 %20 to i32 %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.1 %21 = load i8, i8* %arrayidx1.2, align 1 %conv2.2 = zext i8 %21 to i32 %mul.2 = mul nuw nsw i32 %conv2.2, %conv.2 %add.2 = add nuw nsw i32 %mul.2, %conv3 %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1 store i32 %add.2, i32* %arrayidx4.2, align 4 %inc.2 = or i32 %i.011, 3 %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2 %22 = load i8, i8* %arrayidx.3, align 1 %conv.3 = zext i8 %22 to i32 %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.2 %23 = load i8, i8* %arrayidx1.3, align 1 %conv2.3 = zext i8 %23 to i32 %mul.3 = mul nuw nsw i32 %conv2.3, %conv.3 %add.3 = add nuw nsw i32 %mul.3, %conv3 %arrayidx4.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2 store i32 %add.3, i32* %arrayidx4.3, align 4 %inc.3 = add nuw i32 %i.011, 4 %niter.nsub.3 = add i32 %niter, -4 %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0 br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body } define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_ushort(i16* nocapture readonly %a, i16* nocapture readonly %b, i16 signext %c, i32* nocapture %res, i32 %N) { ; CHECK-LABEL: test_vec_mul_scalar_add_ushort: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: ldr.w r12, [sp, #8] ; CHECK-NEXT: cmp.w r12, #0 ; CHECK-NEXT: it eq ; CHECK-NEXT: popeq {r4, pc} ; CHECK-NEXT: .LBB8_1: @ %vector.ph ; CHECK-NEXT: dlstp.32 lr, r12 ; CHECK-NEXT: .LBB8_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrh.u32 q0, [r0], #8 ; CHECK-NEXT: vldrh.u32 q1, [r1], #8 ; CHECK-NEXT: vmlas.u32 q1, q0, r2 ; CHECK-NEXT: vstrw.32 q1, [r3], #16 ; CHECK-NEXT: letp lr, .LBB8_2 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup ; CHECK-NEXT: pop {r4, pc} entry: %cmp10 = icmp eq i32 %N, 0 br i1 %cmp10, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %conv3 = sext i16 %c to i32 %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert15 = insertelement <4 x i32> undef, i32 %conv3, i32 0 %broadcast.splat16 = shufflevector <4 x i32> %broadcast.splatinsert15, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i16, i16* %a, i32 %index %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %2 = bitcast i16* %0 to <4 x i16>* %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef) %3 = zext <4 x i16> %wide.masked.load to <4 x i32> %4 = getelementptr inbounds i16, i16* %b, i32 %index %5 = bitcast i16* %4 to <4 x i16>* %wide.masked.load14 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %5, i32 2, <4 x i1> %1, <4 x i16> undef) %6 = zext <4 x i16> %wide.masked.load14 to <4 x i32> %7 = mul nuw nsw <4 x i32> %6, %3 %8 = add nsw <4 x i32> %7, %broadcast.splat16 %9 = getelementptr inbounds i32, i32* %res, i32 %index %10 = bitcast i32* %9 to <4 x i32>* call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %1) %index.next = add i32 %index, 4 %11 = icmp eq i32 %index.next, %n.vec br i1 %11, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry ret void } define arm_aapcs_vfpcc void @test_vec_mul_scalar_add_int(i32* nocapture readonly %a, i32* nocapture readonly %b, i32 %c, i32* nocapture %res, i32 %N) { ; CHECK-LABEL: test_vec_mul_scalar_add_int: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: ldr r4, [sp, #28] ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: beq.w .LBB9_11 ; CHECK-NEXT: @ %bb.1: @ %vector.memcheck ; CHECK-NEXT: add.w r5, r3, r4, lsl #2 ; CHECK-NEXT: add.w r6, r1, r4, lsl #2 ; CHECK-NEXT: cmp r5, r1 ; CHECK-NEXT: add.w r7, r0, r4, lsl #2 ; CHECK-NEXT: cset r12, hi ; CHECK-NEXT: cmp r6, r3 ; CHECK-NEXT: cset r6, hi ; CHECK-NEXT: cmp r5, r0 ; CHECK-NEXT: cset r5, hi ; CHECK-NEXT: cmp r7, r3 ; CHECK-NEXT: cset r7, hi ; CHECK-NEXT: tst r7, r5 ; CHECK-NEXT: it eq ; CHECK-NEXT: andseq.w r7, r6, r12 ; CHECK-NEXT: beq .LBB9_4 ; CHECK-NEXT: @ %bb.2: @ %for.body.preheader ; CHECK-NEXT: subs r7, r4, #1 ; CHECK-NEXT: and r12, r4, #3 ; CHECK-NEXT: cmp r7, #3 ; CHECK-NEXT: bhs .LBB9_6 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: b .LBB9_8 ; CHECK-NEXT: .LBB9_4: @ %vector.ph ; CHECK-NEXT: dlstp.32 lr, r4 ; CHECK-NEXT: .LBB9_5: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q0, [r0], #16 ; CHECK-NEXT: vldrw.u32 q1, [r1], #16 ; CHECK-NEXT: vmlas.u32 q1, q0, r2 ; CHECK-NEXT: vstrw.32 q1, [r3], #16 ; CHECK-NEXT: letp lr, .LBB9_5 ; CHECK-NEXT: b .LBB9_11 ; CHECK-NEXT: .LBB9_6: @ %for.body.preheader.new ; CHECK-NEXT: bic r7, r4, #3 ; CHECK-NEXT: movs r6, #1 ; CHECK-NEXT: subs r7, #4 ; CHECK-NEXT: add.w r5, r3, #8 ; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: add.w lr, r6, r7, lsr #2 ; CHECK-NEXT: add.w r6, r0, #8 ; CHECK-NEXT: add.w r7, r1, #8 ; CHECK-NEXT: .LBB9_7: @ %for.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr r9, [r6, #-8] ; CHECK-NEXT: add.w r8, r8, #4 ; CHECK-NEXT: ldr r4, [r7, #-8] ; CHECK-NEXT: mla r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-8] ; CHECK-NEXT: ldr r9, [r6, #-4] ; CHECK-NEXT: ldr r4, [r7, #-4] ; CHECK-NEXT: mla r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #-4] ; CHECK-NEXT: ldr.w r9, [r6] ; CHECK-NEXT: ldr r4, [r7] ; CHECK-NEXT: mla r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5] ; CHECK-NEXT: ldr.w r9, [r6, #4] ; CHECK-NEXT: adds r6, #16 ; CHECK-NEXT: ldr r4, [r7, #4] ; CHECK-NEXT: adds r7, #16 ; CHECK-NEXT: mla r4, r4, r9, r2 ; CHECK-NEXT: str r4, [r5, #4] ; CHECK-NEXT: adds r5, #16 ; CHECK-NEXT: le lr, .LBB9_7 ; CHECK-NEXT: .LBB9_8: @ %for.cond.cleanup.loopexit.unr-lcssa ; CHECK-NEXT: wls lr, r12, .LBB9_11 ; CHECK-NEXT: @ %bb.9: @ %for.body.epil.preheader ; CHECK-NEXT: add.w r0, r0, r8, lsl #2 ; CHECK-NEXT: add.w r1, r1, r8, lsl #2 ; CHECK-NEXT: add.w r3, r3, r8, lsl #2 ; CHECK-NEXT: .LBB9_10: @ %for.body.epil ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr r7, [r0], #4 ; CHECK-NEXT: ldr r6, [r1], #4 ; CHECK-NEXT: mla r7, r6, r7, r2 ; CHECK-NEXT: str r7, [r3], #4 ; CHECK-NEXT: le lr, .LBB9_10 ; CHECK-NEXT: .LBB9_11: @ %for.cond.cleanup ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: %cmp8 = icmp eq i32 %N, 0 br i1 %cmp8, label %for.cond.cleanup, label %vector.memcheck vector.memcheck: ; preds = %entry %scevgep = getelementptr i32, i32* %res, i32 %N %scevgep13 = getelementptr i32, i32* %a, i32 %N %scevgep16 = getelementptr i32, i32* %b, i32 %N %bound0 = icmp ugt i32* %scevgep13, %res %bound1 = icmp ugt i32* %scevgep, %a %found.conflict = and i1 %bound0, %bound1 %bound018 = icmp ugt i32* %scevgep16, %res %bound119 = icmp ugt i32* %scevgep, %b %found.conflict20 = and i1 %bound018, %bound119 %conflict.rdx = or i1 %found.conflict, %found.conflict20 br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph for.body.preheader: ; preds = %vector.memcheck %0 = add i32 %N, -1 %xtraiter = and i32 %N, 3 %1 = icmp ult i32 %0, 3 br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new for.body.preheader.new: ; preds = %for.body.preheader %unroll_iter = sub i32 %N, %xtraiter br label %for.body vector.ph: ; preds = %vector.memcheck %n.rnd.up = add i32 %N, 3 %n.vec = and i32 %n.rnd.up, -4 %broadcast.splatinsert24 = insertelement <4 x i32> undef, i32 %c, i32 0 %broadcast.splat25 = shufflevector <4 x i32> %broadcast.splatinsert24, <4 x i32> undef, <4 x i32> zeroinitializer br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %2 = getelementptr inbounds i32, i32* %a, i32 %index %3 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N) %4 = bitcast i32* %2 to <4 x i32>* %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %3, <4 x i32> undef) %5 = getelementptr inbounds i32, i32* %b, i32 %index %6 = bitcast i32* %5 to <4 x i32>* %wide.masked.load23 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %6, i32 4, <4 x i1> %3, <4 x i32> undef) %7 = mul nsw <4 x i32> %wide.masked.load23, %wide.masked.load %8 = add nsw <4 x i32> %7, %broadcast.splat25 %9 = getelementptr inbounds i32, i32* %res, i32 %index %10 = bitcast i32* %9 to <4 x i32>* call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %8, <4 x i32>* %10, i32 4, <4 x i1> %3) %index.next = add i32 %index, 4 %11 = icmp eq i32 %index.next, %n.vec br i1 %11, label %for.cond.cleanup, label %vector.body for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader %i.09.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ] %lcmp.mod = icmp eq i32 %xtraiter, 0 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil %i.09.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.09.unr, %for.cond.cleanup.loopexit.unr-lcssa ] %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ] %arrayidx.epil = getelementptr inbounds i32, i32* %a, i32 %i.09.epil %12 = load i32, i32* %arrayidx.epil, align 4 %arrayidx1.epil = getelementptr inbounds i32, i32* %b, i32 %i.09.epil %13 = load i32, i32* %arrayidx1.epil, align 4 %mul.epil = mul nsw i32 %13, %12 %add.epil = add nsw i32 %mul.epil, %c %arrayidx2.epil = getelementptr inbounds i32, i32* %res, i32 %i.09.epil store i32 %add.epil, i32* %arrayidx2.epil, align 4 %inc.epil = add nuw i32 %i.09.epil, 1 %epil.iter.sub = add i32 %epil.iter, -1 %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil for.cond.cleanup: ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil, %entry ret void for.body: ; preds = %for.body, %for.body.preheader.new %i.09 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ] %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ] %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.09 %14 = load i32, i32* %arrayidx, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %b, i32 %i.09 %15 = load i32, i32* %arrayidx1, align 4 %mul = mul nsw i32 %15, %14 %add = add nsw i32 %mul, %c %arrayidx2 = getelementptr inbounds i32, i32* %res, i32 %i.09 store i32 %add, i32* %arrayidx2, align 4 %inc = or i32 %i.09, 1 %arrayidx.1 = getelementptr inbounds i32, i32* %a, i32 %inc %16 = load i32, i32* %arrayidx.1, align 4 %arrayidx1.1 = getelementptr inbounds i32, i32* %b, i32 %inc %17 = load i32, i32* %arrayidx1.1, align 4 %mul.1 = mul nsw i32 %17, %16 %add.1 = add nsw i32 %mul.1, %c %arrayidx2.1 = getelementptr inbounds i32, i32* %res, i32 %inc store i32 %add.1, i32* %arrayidx2.1, align 4 %inc.1 = or i32 %i.09, 2 %arrayidx.2 = getelementptr inbounds i32, i32* %a, i32 %inc.1 %18 = load i32, i32* %arrayidx.2, align 4 %arrayidx1.2 = getelementptr inbounds i32, i32* %b, i32 %inc.1 %19 = load i32, i32* %arrayidx1.2, align 4 %mul.2 = mul nsw i32 %19, %18 %add.2 = add nsw i32 %mul.2, %c %arrayidx2.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1 store i32 %add.2, i32* %arrayidx2.2, align 4 %inc.2 = or i32 %i.09, 3 %arrayidx.3 = getelementptr inbounds i32, i32* %a, i32 %inc.2 %20 = load i32, i32* %arrayidx.3, align 4 %arrayidx1.3 = getelementptr inbounds i32, i32* %b, i32 %inc.2 %21 = load i32, i32* %arrayidx1.3, align 4 %mul.3 = mul nsw i32 %21, %20 %add.3 = add nsw i32 %mul.3, %c %arrayidx2.3 = getelementptr inbounds i32, i32* %res, i32 %inc.2 store i32 %add.3, i32* %arrayidx2.3, align 4 %inc.3 = add nuw i32 %i.09, 4 %niter.nsub.3 = add i32 %niter, -4 %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0 br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body } define dso_local arm_aapcs_vfpcc void @test_v8i8_to_v8i16(i16* noalias nocapture %a, i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) { ; CHECK-LABEL: test_v8i8_to_v8i16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: it eq ; CHECK-NEXT: popeq {r7, pc} ; CHECK-NEXT: .LBB10_1: @ %vector.ph ; CHECK-NEXT: dlstp.16 lr, r3 ; CHECK-NEXT: .LBB10_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrb.u16 q0, [r1], #8 ; CHECK-NEXT: vldrb.u16 q1, [r2], #8 ; CHECK-NEXT: vmul.i16 q0, q1, q0 ; CHECK-NEXT: vstrh.16 q0, [r0], #16 ; CHECK-NEXT: letp lr, .LBB10_2 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup ; CHECK-NEXT: pop {r7, pc} entry: %cmp10 = icmp eq i32 %N, 0 br i1 %cmp10, label %for.cond.cleanup, label %vector.ph vector.ph: ; preds = %entry %n.rnd.up = add i32 %N, 7 %n.vec = and i32 %n.rnd.up, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i8, i8* %b, i32 %index %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N) %2 = bitcast i8* %0 to <8 x i8>* %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef) %3 = zext <8 x i8> %wide.masked.load to <8 x i16> %4 = getelementptr inbounds i8, i8* %c, i32 %index %5 = bitcast i8* %4 to <8 x i8>* %wide.masked.load14 = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %5, i32 1, <8 x i1> %1, <8 x i8> undef) %6 = zext <8 x i8> %wide.masked.load14 to <8 x i16> %7 = mul nuw <8 x i16> %6, %3 %8 = getelementptr inbounds i16, i16* %a, i32 %index %9 = bitcast i16* %8 to <8 x i16>* call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %7, <8 x i16>* %9, i32 2, <8 x i1> %1) %index.next = add i32 %index, 8 %10 = icmp eq i32 %index.next, %n.vec br i1 %10, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry ret void } declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>) declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)