Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN:     -riscv-v-vector-bits-min=128 | FileCheck %s

define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_mul:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB0_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vmul.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB0_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = mul <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_add:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB1_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB1_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = add <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB2_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB2_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = sub <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_rsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB3_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vrsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB3_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = sub <4 x i32> %broadcast.splat, %wide.load
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_and(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_and:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB4_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vand.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB4_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = and <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_or(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_or:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB5_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB5_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = or <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_xor(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_xor:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB6_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vxor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB6_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = xor <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_mul_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB7_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB7_5
; CHECK-NEXT:  .LBB7_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB7_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vmul.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB7_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB7_7
; CHECK-NEXT:  .LBB7_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB7_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    mulw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB7_6
; CHECK-NEXT:  .LBB7_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = mul <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %mul = mul i32 %11, %x
  store i32 %mul, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_add_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB8_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB8_5
; CHECK-NEXT:  .LBB8_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB8_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vadd.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB8_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB8_7
; CHECK-NEXT:  .LBB8_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB8_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    addw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB8_6
; CHECK-NEXT:  .LBB8_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %add = add i32 %11, %x
  store i32 %add, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sub_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB9_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB9_5
; CHECK-NEXT:  .LBB9_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB9_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vsub.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB9_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB9_7
; CHECK-NEXT:  .LBB9_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB9_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    addw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB9_6
; CHECK-NEXT:  .LBB9_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = sub <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %add = add i32 %11, %x
  store i32 %add, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_rsub_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB10_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB10_5
; CHECK-NEXT:  .LBB10_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB10_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vrsub.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB10_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB10_7
; CHECK-NEXT:  .LBB10_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB10_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    subw a2, a1, a2
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB10_6
; CHECK-NEXT:  .LBB10_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = sub <vscale x 4 x i32> %broadcast.splat, %wide.load
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %add = sub i32 %x, %11
  store i32 %add, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_and_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_and_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB11_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB11_5
; CHECK-NEXT:  .LBB11_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB11_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vand.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB11_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB11_7
; CHECK-NEXT:  .LBB11_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB11_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    and a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB11_6
; CHECK-NEXT:  .LBB11_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = and <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %and = and i32 %11, %x
  store i32 %and, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_or_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_or_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB12_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB12_5
; CHECK-NEXT:  .LBB12_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB12_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vor.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB12_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB12_7
; CHECK-NEXT:  .LBB12_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB12_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    or a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB12_6
; CHECK-NEXT:  .LBB12_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = or <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %or = or i32 %11, %x
  store i32 %or, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_xor_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_xor_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB13_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB13_5
; CHECK-NEXT:  .LBB13_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB13_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vxor.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB13_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB13_7
; CHECK-NEXT:  .LBB13_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB13_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    xor a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB13_6
; CHECK-NEXT:  .LBB13_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = xor <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %xor = xor i32 %11, %x
  store i32 %xor, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_shl(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_shl:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB14_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsll.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB14_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = shl <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_lshr(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_lshr:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB15_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsrl.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB15_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = lshr <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_ashr(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_ashr:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB16_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsra.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB16_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = ashr <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_shl_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_shl_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB17_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB17_5
; CHECK-NEXT:  .LBB17_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB17_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vsll.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB17_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB17_7
; CHECK-NEXT:  .LBB17_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB17_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    sllw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB17_6
; CHECK-NEXT:  .LBB17_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = shl <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %shl = shl i32 %11, %x
  store i32 %shl, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_lshr_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_lshr_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB18_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB18_5
; CHECK-NEXT:  .LBB18_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB18_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vsrl.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB18_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB18_7
; CHECK-NEXT:  .LBB18_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB18_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    srlw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB18_6
; CHECK-NEXT:  .LBB18_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = lshr <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %lshr = lshr i32 %11, %x
  store i32 %lshr, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_ashr_scalable(i32* nocapture %a) {
; CHECK-LABEL: sink_splat_ashr_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a4, vlenb
; CHECK-NEXT:    srli a2, a4, 1
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a2, .LBB19_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB19_5
; CHECK-NEXT:  .LBB19_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a2, -1
; CHECK-NEXT:    andi a3, a1, 1024
; CHECK-NEXT:    xori a1, a3, 1024
; CHECK-NEXT:    slli a4, a4, 1
; CHECK-NEXT:    vsetvli a5, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB19_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a5)
; CHECK-NEXT:    vsra.vi v8, v8, 2
; CHECK-NEXT:    vs2r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a2
; CHECK-NEXT:    add a5, a5, a4
; CHECK-NEXT:    bnez a6, .LBB19_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a3, .LBB19_7
; CHECK-NEXT:  .LBB19_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB19_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a1, 0(a0)
; CHECK-NEXT:    srli a1, a1, 2
; CHECK-NEXT:    sw a1, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB19_6
; CHECK-NEXT:  .LBB19_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = ashr <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %ashr = ashr i32 %11, 2
  store i32 %ashr, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fmul(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fmul:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB20_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfmul.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB20_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fmul <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fdiv(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB21_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB21_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fdiv <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_frdiv(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB22_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB22_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fdiv <4 x float> %broadcast.splat, %wide.load
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fadd(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fadd:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB23_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfadd.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB23_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fadd <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fsub(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB24_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfsub.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB24_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fsub <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_frsub(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB25_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB25_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fsub <4 x float> %broadcast.splat, %wide.load
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fmul_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fmul_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB26_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB26_5
; CHECK-NEXT:  .LBB26_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB26_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfmul.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB26_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB26_7
; CHECK-NEXT:  .LBB26_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB26_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fmul.s ft0, ft0, fa0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB26_6
; CHECK-NEXT:  .LBB26_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fmul <vscale x 2 x float> %wide.load, %broadcast.splat
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fmul float %11, %x
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fdiv_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fdiv_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB27_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB27_5
; CHECK-NEXT:  .LBB27_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB27_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB27_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB27_7
; CHECK-NEXT:  .LBB27_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB27_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fdiv.s ft0, ft0, fa0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB27_6
; CHECK-NEXT:  .LBB27_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fdiv <vscale x 2 x float> %wide.load, %broadcast.splat
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fdiv float %11, %x
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_frdiv_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frdiv_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB28_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB28_5
; CHECK-NEXT:  .LBB28_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB28_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB28_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB28_7
; CHECK-NEXT:  .LBB28_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB28_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fdiv.s ft0, fa0, ft0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB28_6
; CHECK-NEXT:  .LBB28_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fdiv <vscale x 2 x float> %broadcast.splat, %wide.load
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fdiv float %x, %11
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fadd_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB29_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB29_5
; CHECK-NEXT:  .LBB29_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB29_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfadd.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB29_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB29_7
; CHECK-NEXT:  .LBB29_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB29_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fadd.s ft0, ft0, fa0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB29_6
; CHECK-NEXT:  .LBB29_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fadd float %11, %x
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fsub_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fsub_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB30_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB30_5
; CHECK-NEXT:  .LBB30_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB30_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfsub.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB30_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB30_7
; CHECK-NEXT:  .LBB30_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB30_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fsub.s ft0, ft0, fa0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB30_6
; CHECK-NEXT:  .LBB30_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fsub <vscale x 2 x float> %wide.load, %broadcast.splat
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fsub float %11, %x
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_frsub_scalable(float* nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frsub_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    bgeu a1, a3, .LBB31_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a1, 0
; CHECK-NEXT:    j .LBB31_5
; CHECK-NEXT:  .LBB31_2: # %vector.ph
; CHECK-NEXT:    addiw a1, a3, -1
; CHECK-NEXT:    andi a4, a1, 1024
; CHECK-NEXT:    xori a1, a4, 1024
; CHECK-NEXT:    vsetvli a5, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a5, a0
; CHECK-NEXT:    mv a6, a1
; CHECK-NEXT:  .LBB31_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl1re32.v v8, (a5)
; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
; CHECK-NEXT:    vs1r.v v8, (a5)
; CHECK-NEXT:    sub a6, a6, a3
; CHECK-NEXT:    add a5, a5, a2
; CHECK-NEXT:    bnez a6, .LBB31_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB31_7
; CHECK-NEXT:  .LBB31_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a1, -1024
; CHECK-NEXT:    slli a1, a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:  .LBB31_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    fsub.s ft0, fa0, ft0
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB31_6
; CHECK-NEXT:  .LBB31_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = fsub <vscale x 2 x float> %broadcast.splat, %wide.load
  %9 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %11 = load float, float* %arrayidx, align 4
  %mul = fsub float %x, %11
  store float %mul, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fma(float* noalias nocapture %a, float* nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB32_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vle32.v v9, (a1)
; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
; CHECK-NEXT:    vse32.v v9, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a1, a1, 16
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB32_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = getelementptr inbounds float, float* %b, i64 %index
  %3 = bitcast float* %2 to <4 x float>*
  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
  %4 = call <4 x float> @llvm.fma.v4f32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x float> %wide.load12)
  %5 = bitcast float* %0 to <4 x float>*
  store <4 x float> %4, <4 x float>* %5, align 4
  %index.next = add nuw i64 %index, 4
  %6 = icmp eq i64 %index.next, 1024
  br i1 %6, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fma_commute(float* noalias nocapture %a, float* nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma_commute:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB33_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vle32.v v9, (a1)
; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
; CHECK-NEXT:    vse32.v v9, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a1, a1, 16
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB33_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = getelementptr inbounds float, float* %b, i64 %index
  %3 = bitcast float* %2 to <4 x float>*
  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
  %4 = call <4 x float> @llvm.fma.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
  %5 = bitcast float* %0 to <4 x float>*
  store <4 x float> %4, <4 x float>* %5, align 4
  %index.next = add nuw i64 %index, 4
  %6 = icmp eq i64 %index.next, 1024
  br i1 %6, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_fma_scalable(float* noalias nocapture %a, float* noalias nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a4, 1024
; CHECK-NEXT:    bgeu a4, a3, .LBB34_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a4, 0
; CHECK-NEXT:    j .LBB34_5
; CHECK-NEXT:  .LBB34_2: # %vector.ph
; CHECK-NEXT:    li a6, 0
; CHECK-NEXT:    addiw a4, a3, -1
; CHECK-NEXT:    andi a5, a4, 1024
; CHECK-NEXT:    xori a4, a5, 1024
; CHECK-NEXT:    vsetvli a7, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a7, a4
; CHECK-NEXT:  .LBB34_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    add t0, a0, a6
; CHECK-NEXT:    vl1re32.v v8, (t0)
; CHECK-NEXT:    add t1, a1, a6
; CHECK-NEXT:    vl1re32.v v9, (t1)
; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
; CHECK-NEXT:    vs1r.v v9, (t0)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a2
; CHECK-NEXT:    bnez a7, .LBB34_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a5, .LBB34_7
; CHECK-NEXT:  .LBB34_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a4, -1024
; CHECK-NEXT:    slli a3, a4, 2
; CHECK-NEXT:    add a1, a1, a3
; CHECK-NEXT:    add a0, a0, a3
; CHECK-NEXT:  .LBB34_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    flw ft1, 0(a1)
; CHECK-NEXT:    fmadd.s ft0, ft0, fa0, ft1
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a1, a1, 4
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB34_6
; CHECK-NEXT:  .LBB34_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = getelementptr inbounds float, float* %b, i64 %index
  %9 = bitcast float* %8 to <vscale x 2 x float>*
  %wide.load12 = load <vscale x 2 x float>, <vscale x 2 x float>* %9, align 4
  %10 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %wide.load, <vscale x 2 x float> %broadcast.splat, <vscale x 2 x float> %wide.load12)
  %11 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %10, <vscale x 2 x float>* %11, align 4
  %index.next = add nuw i64 %index, %5
  %12 = icmp eq i64 %index.next, %n.vec
  br i1 %12, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %13 = load float, float* %arrayidx, align 4
  %arrayidx2 = getelementptr inbounds float, float* %b, i64 %indvars.iv
  %14 = load float, float* %arrayidx2, align 4
  %15 = tail call float @llvm.fma.f32(float %13, float %x, float %14)
  store float %15, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_fma_commute_scalable(float* noalias nocapture %a, float* noalias nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma_commute_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a2, vlenb
; CHECK-NEXT:    srli a3, a2, 2
; CHECK-NEXT:    li a4, 1024
; CHECK-NEXT:    bgeu a4, a3, .LBB35_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a4, 0
; CHECK-NEXT:    j .LBB35_5
; CHECK-NEXT:  .LBB35_2: # %vector.ph
; CHECK-NEXT:    li a6, 0
; CHECK-NEXT:    addiw a4, a3, -1
; CHECK-NEXT:    andi a5, a4, 1024
; CHECK-NEXT:    xori a4, a5, 1024
; CHECK-NEXT:    vsetvli a7, zero, e32, m1, ta, mu
; CHECK-NEXT:    mv a7, a4
; CHECK-NEXT:  .LBB35_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    add t0, a0, a6
; CHECK-NEXT:    vl1re32.v v8, (t0)
; CHECK-NEXT:    add t1, a1, a6
; CHECK-NEXT:    vl1re32.v v9, (t1)
; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
; CHECK-NEXT:    vs1r.v v9, (t0)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a2
; CHECK-NEXT:    bnez a7, .LBB35_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a5, .LBB35_7
; CHECK-NEXT:  .LBB35_5: # %for.body.preheader
; CHECK-NEXT:    addi a2, a4, -1024
; CHECK-NEXT:    slli a3, a4, 2
; CHECK-NEXT:    add a1, a1, a3
; CHECK-NEXT:    add a0, a0, a3
; CHECK-NEXT:  .LBB35_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    flw ft0, 0(a0)
; CHECK-NEXT:    flw ft1, 0(a1)
; CHECK-NEXT:    fmadd.s ft0, fa0, ft0, ft1
; CHECK-NEXT:    fsw ft0, 0(a0)
; CHECK-NEXT:    addi a2, a2, 1
; CHECK-NEXT:    addi a1, a1, 4
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a2, .LBB35_6
; CHECK-NEXT:  .LBB35_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 1
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 1
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 1
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds float, float* %a, i64 %index
  %7 = bitcast float* %6 to <vscale x 2 x float>*
  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
  %8 = getelementptr inbounds float, float* %b, i64 %index
  %9 = bitcast float* %8 to <vscale x 2 x float>*
  %wide.load12 = load <vscale x 2 x float>, <vscale x 2 x float>* %9, align 4
  %10 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %broadcast.splat, <vscale x 2 x float> %wide.load, <vscale x 2 x float> %wide.load12)
  %11 = bitcast float* %6 to <vscale x 2 x float>*
  store <vscale x 2 x float> %10, <vscale x 2 x float>* %11, align 4
  %index.next = add nuw i64 %index, %5
  %12 = icmp eq i64 %index.next, %n.vec
  br i1 %12, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
  %13 = load float, float* %arrayidx, align 4
  %arrayidx2 = getelementptr inbounds float, float* %b, i64 %indvars.iv
  %14 = load float, float* %arrayidx2, align 4
  %15 = tail call float @llvm.fma.f32(float %x, float %13, float %14)
  store float %15, float* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

declare i64 @llvm.vscale.i64()
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
declare <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>)
declare float @llvm.fma.f32(float, float, float)

define void @sink_splat_icmp(i32* nocapture %x, i32 signext %y) {
; CHECK-LABEL: sink_splat_icmp:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vmv.v.i v8, 0
; CHECK-NEXT:  .LBB36_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v9, (a0)
; CHECK-NEXT:    vmseq.vx v0, v9, a1
; CHECK-NEXT:    vse32.v v8, (a0), v0.t
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB36_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %y, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %x, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = icmp eq <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> zeroinitializer, <4 x i32>* %3, i32 4, <4 x i1> %2)
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)

define void @sink_splat_fcmp(float* nocapture %x, float %y) {
; CHECK-LABEL: sink_splat_fcmp:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vmv.v.i v8, 0
; CHECK-NEXT:  .LBB37_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v9, (a0)
; CHECK-NEXT:    vmfeq.vf v0, v9, fa0
; CHECK-NEXT:    vse32.v v8, (a0), v0.t
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB37_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %y, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %x, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = fcmp fast oeq <4 x float> %wide.load, %broadcast.splat
  %3 = bitcast float* %0 to <4 x float>*
  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> zeroinitializer, <4 x float>* %3, i32 4, <4 x i1> %2)
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}
declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)

define void @sink_splat_udiv(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_udiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB38_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vdivu.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB38_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = udiv <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_sdiv(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB39_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vdiv.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB39_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = sdiv <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_urem(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_urem:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB40_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vremu.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB40_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = urem <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_srem(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_srem:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB41_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vrem.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB41_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = srem <4 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_udiv_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_udiv_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB42_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB42_5
; CHECK-NEXT:  .LBB42_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB42_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vdivu.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB42_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB42_7
; CHECK-NEXT:  .LBB42_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB42_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    divuw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB42_6
; CHECK-NEXT:  .LBB42_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = udiv <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %div = udiv i32 %11, %x
  store i32 %div, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_sdiv_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sdiv_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB43_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB43_5
; CHECK-NEXT:  .LBB43_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB43_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vdiv.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB43_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB43_7
; CHECK-NEXT:  .LBB43_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB43_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    divw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB43_6
; CHECK-NEXT:  .LBB43_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = sdiv <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %div = sdiv i32 %11, %x
  store i32 %div, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_urem_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_urem_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB44_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB44_5
; CHECK-NEXT:  .LBB44_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB44_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vremu.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB44_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB44_7
; CHECK-NEXT:  .LBB44_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB44_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    remuw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB44_6
; CHECK-NEXT:  .LBB44_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = urem <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %rem = urem i32 %11, %x
  store i32 %rem, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

define void @sink_splat_srem_scalable(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_srem_scalable:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    csrr a5, vlenb
; CHECK-NEXT:    srli a3, a5, 1
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    bgeu a2, a3, .LBB45_2
; CHECK-NEXT:  # %bb.1:
; CHECK-NEXT:    li a2, 0
; CHECK-NEXT:    j .LBB45_5
; CHECK-NEXT:  .LBB45_2: # %vector.ph
; CHECK-NEXT:    addiw a2, a3, -1
; CHECK-NEXT:    andi a4, a2, 1024
; CHECK-NEXT:    xori a2, a4, 1024
; CHECK-NEXT:    slli a5, a5, 1
; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, mu
; CHECK-NEXT:    mv a6, a0
; CHECK-NEXT:    mv a7, a2
; CHECK-NEXT:  .LBB45_3: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vl2re32.v v8, (a6)
; CHECK-NEXT:    vrem.vx v8, v8, a1
; CHECK-NEXT:    vs2r.v v8, (a6)
; CHECK-NEXT:    sub a7, a7, a3
; CHECK-NEXT:    add a6, a6, a5
; CHECK-NEXT:    bnez a7, .LBB45_3
; CHECK-NEXT:  # %bb.4: # %middle.block
; CHECK-NEXT:    beqz a4, .LBB45_7
; CHECK-NEXT:  .LBB45_5: # %for.body.preheader
; CHECK-NEXT:    addi a3, a2, -1024
; CHECK-NEXT:    slli a2, a2, 2
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:  .LBB45_6: # %for.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    lw a2, 0(a0)
; CHECK-NEXT:    remw a2, a2, a1
; CHECK-NEXT:    sw a2, 0(a0)
; CHECK-NEXT:    addi a3, a3, 1
; CHECK-NEXT:    addi a0, a0, 4
; CHECK-NEXT:    bnez a3, .LBB45_6
; CHECK-NEXT:  .LBB45_7: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = shl i64 %0, 2
  %min.iters.check = icmp ugt i64 %1, 1024
  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph

vector.ph:                                        ; preds = %entry
  %2 = call i64 @llvm.vscale.i64()
  %3 = shl i64 %2, 2
  %n.mod.vf = urem i64 1024, %3
  %n.vec = sub nsw i64 1024, %n.mod.vf
  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
  %4 = call i64 @llvm.vscale.i64()
  %5 = shl i64 %4, 2
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %vector.ph
  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
  %6 = getelementptr inbounds i32, i32* %a, i64 %index
  %7 = bitcast i32* %6 to <vscale x 4 x i32>*
  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
  %8 = srem <vscale x 4 x i32> %wide.load, %broadcast.splat
  %9 = bitcast i32* %6 to <vscale x 4 x i32>*
  store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
  %index.next = add nuw i64 %index, %5
  %10 = icmp eq i64 %index.next, %n.vec
  br i1 %10, label %middle.block, label %vector.body

middle.block:                                     ; preds = %vector.body
  %cmp.n = icmp eq i64 %n.mod.vf, 0
  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader

for.body.preheader:                               ; preds = %entry, %middle.block
  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
  br label %for.body

for.cond.cleanup:                                 ; preds = %for.body, %middle.block
  ret void

for.body:                                         ; preds = %for.body.preheader, %for.body
  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
  %11 = load i32, i32* %arrayidx, align 4
  %rem = srem i32 %11, %x
  store i32 %rem, i32* %arrayidx, align 4
  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
}

declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_mul(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_mul:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB46_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vmul.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB46_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_add:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB47_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vadd.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB47_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

; FIXME: This doesn't match against vadd.vx because our patterns aren't
; commutative.

define void @sink_splat_vp_add_commute(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_add_commute:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vmv.v.x v8, a1
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:  .LBB48_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v9, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vadd.vv v9, v8, v9, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v9, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB48_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_sub(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_sub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB49_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vsub.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB49_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_vp_rsub(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_rsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB50_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vrsub.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB50_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_shl(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_shl:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB51_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vsll.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB51_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_lshr(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_lshr:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB52_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vsrl.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB52_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_ashr(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_ashr:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB53_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vsra.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB53_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.fmul.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fmul(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fmul:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB54_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB54_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fmul.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.fdiv.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fdiv(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB55_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB55_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fdiv.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_vp_frdiv(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_frdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB56_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB56_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fdiv.v4i32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fadd:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB57_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB57_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.fsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fsub(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB58_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB58_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fsub.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.frsub.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_frsub(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_frsub:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB59_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT:    vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB59_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = call <4 x float> @llvm.vp.fsub.v4i32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x i1> %m, i32 %vl)
  %3 = bitcast float* %0 to <4 x float>*
  store <4 x float> %2, <4 x float>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_udiv(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_udiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB60_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vdivu.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB60_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_sdiv(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_sdiv:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB61_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vdiv.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB61_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_urem(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_urem:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB62_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vremu.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB62_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)

define void @sink_splat_vp_srem(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_srem:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB63_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vrem.vx v8, v8, a1, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB63_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

; Check that we don't sink a splat operand that has no chance of being folded.

define void @sink_splat_vp_srem_commute(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_srem_commute:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vmv.v.x v8, a1
; CHECK-NEXT:    li a1, 1024
; CHECK-NEXT:  .LBB64_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v9, (a0)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vrem.vv v9, v8, v9, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v9, (a0)
; CHECK-NEXT:    addi a1, a1, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a1, .LBB64_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <4 x i32>*
  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
  %2 = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %broadcast.splat, <4 x i32> %wide.load, <4 x i1> %m, i32 %vl)
  %3 = bitcast i32* %0 to <4 x i32>*
  store <4 x i32> %2, <4 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32)

define void @sink_splat_vp_fma(float* noalias nocapture %a, float* nocapture readonly %b, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fma:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB65_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vle32.v v9, (a1)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a1, a1, 16
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB65_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = getelementptr inbounds float, float* %b, i64 %index
  %3 = bitcast float* %2 to <4 x float>*
  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
  %4 = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x float> %wide.load12, <4 x i1> %m, i32 %vl)
  %5 = bitcast float* %0 to <4 x float>*
  store <4 x float> %4, <4 x float>* %5, align 4
  %index.next = add nuw i64 %index, 4
  %6 = icmp eq i64 %index.next, 1024
  br i1 %6, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_vp_fma_commute(float* noalias nocapture %a, float* nocapture readonly %b, float %x, <4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: sink_splat_vp_fma_commute:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a3, 1024
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:  .LBB66_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vle32.v v9, (a1)
; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a3, a3, -4
; CHECK-NEXT:    addi a1, a1, 16
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a3, .LBB66_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds float, float* %a, i64 %index
  %1 = bitcast float* %0 to <4 x float>*
  %wide.load = load <4 x float>, <4 x float>* %1, align 4
  %2 = getelementptr inbounds float, float* %b, i64 %index
  %3 = bitcast float* %2 to <4 x float>*
  %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
  %4 = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12, <4 x i1> %m, i32 %vl)
  %5 = bitcast float* %0 to <4 x float>*
  store <4 x float> %4, <4 x float>* %5, align 4
  %index.next = add nuw i64 %index, 4
  %6 = icmp eq i64 %index.next, 1024
  br i1 %6, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}


define void @sink_splat_mul_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_mul_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB67_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vmul.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB67_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = mul <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_add_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_add_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB68_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vadd.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB68_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = add <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_sub_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_sub_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB69_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vsub.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB69_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = sub <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_rsub_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_rsub_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB70_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vrsub.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB70_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = sub <4 x i64> %broadcast.splat, %wide.load
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_and_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_and_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB71_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vand.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB71_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = and <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_or_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_or_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB72_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vor.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB72_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = or <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_xor_lmul2(i64* nocapture %a, i64 signext %x) {
; CHECK-LABEL: sink_splat_xor_lmul2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT:  .LBB73_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle64.v v8, (a0)
; CHECK-NEXT:    vxor.vx v8, v8, a1
; CHECK-NEXT:    vse64.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB73_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <4 x i64> poison, i64 %x, i64 0
  %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> poison, <4 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <4 x i64>*
  %wide.load = load <4 x i64>, <4 x i64>* %1, align 8
  %2 = xor <4 x i64> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <4 x i64>*
  store <4 x i64> %2, <4 x i64>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_mul_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_mul_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB74_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vmul.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB74_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = mul <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_add_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_add_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB75_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB75_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = add <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_sub_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sub_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB76_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB76_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = sub <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_rsub_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_rsub_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB77_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vrsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB77_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = sub <32 x i32> %broadcast.splat, %wide.load
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_and_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_and_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB78_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vand.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB78_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = and <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_or_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_or_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB79_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB79_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = or <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_xor_lmul8(i32* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_xor_lmul8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    li a3, 32
; CHECK-NEXT:  .LBB80_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vxor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 16
; CHECK-NEXT:    bnez a2, .LBB80_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <32 x i32> poison, i32 %x, i32 0
  %broadcast.splat = shufflevector <32 x i32> %broadcast.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i32, i32* %a, i64 %index
  %1 = bitcast i32* %0 to <32 x i32>*
  %wide.load = load <32 x i32>, <32 x i32>* %1, align 4
  %2 = xor <32 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i32* %0 to <32 x i32>*
  store <32 x i32> %2, <32 x i32>* %3, align 4
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_mul_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_mul_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB81_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vmul.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB81_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = mul <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_add_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_add_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB82_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vadd.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB82_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = add <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_sub_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_sub_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB83_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB83_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = sub <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_rsub_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_rsub_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB84_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vrsub.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB84_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = sub <2 x i32> %broadcast.splat, %wide.load
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_and_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_and_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB85_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vand.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB85_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = and <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_or_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_or_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB86_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB86_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = or <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}

define void @sink_splat_xor_lmulmf2(i64* nocapture %a, i32 signext %x) {
; CHECK-LABEL: sink_splat_xor_lmulmf2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li a2, 1024
; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT:  .LBB87_1: # %vector.body
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    vle32.v v8, (a0)
; CHECK-NEXT:    vxor.vx v8, v8, a1
; CHECK-NEXT:    vse32.v v8, (a0)
; CHECK-NEXT:    addi a2, a2, -4
; CHECK-NEXT:    addi a0, a0, 32
; CHECK-NEXT:    bnez a2, .LBB87_1
; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
; CHECK-NEXT:    ret
entry:
  %broadcast.splatinsert = insertelement <2 x i32> poison, i32 %x, i64 0
  %broadcast.splat = shufflevector <2 x i32> %broadcast.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
  br label %vector.body

vector.body:                                      ; preds = %vector.body, %entry
  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
  %0 = getelementptr inbounds i64, i64* %a, i64 %index
  %1 = bitcast i64* %0 to <2 x i32>*
  %wide.load = load <2 x i32>, <2 x i32>* %1, align 8
  %2 = xor <2 x i32> %wide.load, %broadcast.splat
  %3 = bitcast i64* %0 to <2 x i32>*
  store <2 x i32> %2, <2 x i32>* %3, align 8
  %index.next = add nuw i64 %index, 4
  %4 = icmp eq i64 %index.next, 1024
  br i1 %4, label %for.cond.cleanup, label %vector.body

for.cond.cleanup:                                 ; preds = %vector.body
  ret void
}