Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc %s -o - -mtriple=riscv64 -mattr=v \
# RUN:     -run-pass=riscv-insert-vsetvli | FileCheck %s

--- |
  ; ModuleID = 'vsetvli-insert.ll'
  source_filename = "vsetvli-insert.ll"
  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
  target triple = "riscv64"

  define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %a
  }

  define <vscale x 1 x i64> @load_add(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %b
  }

  define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
  entry:
    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %1)
    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
    ret <vscale x 1 x i64> %b
  }

  ; Function Attrs: nounwind readnone
  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1

  define i64 @vmv_x_s(<vscale x 1 x i64> %0) #0 {
  entry:
    %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %0)
    ret i64 %a
  }

  define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) #0 {
    %a = load <2 x i64>, <2 x i64>* %x, align 16
    %b = load <2 x i64>, <2 x i64>* %y, align 16
    %c = add <2 x i64> %a, %b
    store <2 x i64> %c, <2 x i64>* %x, align 16
    ret void
  }

  ; Function Attrs: nofree nosync nounwind readnone willreturn
  declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2

  define i64 @vreduce_add_v2i64(<2 x i64>* %x) #0 {
    %v = load <2 x i64>, <2 x i64>* %x, align 16
    %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
    ret i64 %red
  }

  ; Function Attrs: nounwind
  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3

  define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
  entry:
    %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
    ret <vscale x 1 x i64> %b
  }

  define <vscale x 1 x i64> @load_add_inlineasm(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
    call void asm sideeffect "", ""()
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %b
  }

  define void @vmv_v_i_different_lmuls() {
    ret void
  }

  ; Function Attrs: nounwind readnone
  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1

  ; Function Attrs: nounwind readonly
  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4

  ; Function Attrs: nounwind readonly
  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #4

  ; Function Attrs: nounwind readnone
  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1

  attributes #0 = { "target-features"="+v" }
  attributes #1 = { nounwind readnone }
  attributes #2 = { nofree nosync nounwind readnone willreturn }
  attributes #3 = { nounwind }
  attributes #4 = { nounwind readonly }

...
---
name:            add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
  - { reg: '$v9', virtual-reg: '%1' }
  - { reg: '$x10', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8, $v9, $x10

    ; CHECK-LABEL: name: add
    ; CHECK: liveins: $v8, $v9, $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x10
    %1:vr = COPY $v9
    %0:vr = COPY $v8
    %3:vr = PseudoVADD_VV_M1 %0, %1, %2, 6
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            load_add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$v8', virtual-reg: '%1' }
  - { reg: '$x11', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: load_add
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x11
    %1:vr = COPY $v8
    %0:gpr = COPY $x10
    %3:vr = PseudoVLE64_V_M1 %0, %2, 6
    %4:vr = PseudoVADD_VV_M1 killed %3, %1, %2, 6
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            load_zext
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: gprnox0 }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$x11', virtual-reg: '%1' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $x11

    ; CHECK-LABEL: name: load_zext
    ; CHECK: liveins: $x10, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY %3
    ; CHECK-NEXT: PseudoRET implicit $v8
    %1:gprnox0 = COPY $x11
    %0:gpr = COPY $x10
    %2:vr = PseudoVLE32_V_MF2 %0, %1, 5
    early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            vmv_x_s
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: gpr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8

    ; CHECK-LABEL: name: vmv_x_s
    ; CHECK: liveins: $v8
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $x10
    %0:vr = COPY $v8
    %1:gpr = PseudoVMV_X_S_M1 %0, 6
    $x10 = COPY %1
    PseudoRET implicit $x10

...
---
name:            add_v2i64
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: gpr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$x11', virtual-reg: '%1' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0 (%ir-block.0):
    liveins: $x10, $x11

    ; CHECK-LABEL: name: add_v2i64
    ; CHECK: liveins: $x10, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
    ; CHECK-NEXT: PseudoRET
    %1:gpr = COPY $x11
    %0:gpr = COPY $x10
    %2:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
    %3:vr = PseudoVLE64_V_M1 %1, 2, 6 :: (load (s128) from %ir.y)
    %4:vr = PseudoVADD_VV_M1 killed %2, killed %3, 2, 6
    PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
    PseudoRET

...
---
name:            vreduce_add_v2i64
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
  - { id: 5, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0 (%ir-block.0):
    liveins: $x10

    ; CHECK-LABEL: name: vreduce_add_v2i64
    ; CHECK: liveins: $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
    ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $x10
    %0:gpr = COPY $x10
    %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
    %2:vr = PseudoVMV_V_I_M1 0, -1, 6
    %4:vr = IMPLICIT_DEF
    %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6
    %5:gpr = PseudoVMV_X_S_M1 killed %3, 6
    $x10 = COPY %5
    PseudoRET implicit $x10

...
---
name:            vsetvli_add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: gprnox0 }
  - { id: 4, class: vr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
  - { reg: '$v9', virtual-reg: '%1' }
  - { reg: '$x10', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8, $v9, $x10

    ; CHECK-LABEL: name: vsetvli_add
    ; CHECK: liveins: $v8, $v9, $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x10
    %1:vr = COPY $v9
    %0:vr = COPY $v8
    %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
    %4:vr = PseudoVADD_VV_M1 %0, %1, killed %3, 6
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            load_add_inlineasm
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$v8', virtual-reg: '%1' }
  - { reg: '$x11', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: load_add_inlineasm
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x11
    %1:vr = COPY $v8
    %0:gpr = COPY $x10
    %3:vr = PseudoVLE64_V_M1 %0, %2, 6
    INLINEASM &"", 1 /* sideeffect attdialect */
    %4:vr = PseudoVADD_VV_M1 killed %3, %1, %2, 6
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            vmv_v_i_different_lmuls
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: vmv_v_i_different_lmuls
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 89 /* e64, m2, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 4, 6 /* e64 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 70 /* e8, mf4, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
    ; CHECK-NEXT: [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 0, 4, 3 /* e8 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    %0:vrm2 = PseudoVID_V_M2 4, 6
    %4:vr = PseudoVMV_V_I_MF4 0, 4, 3
    PseudoRET
...