; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-UNKNOWN ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-256 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-512 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-UNKNOWN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-256 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-512 ; ; VECTOR_REVERSE - masks ; define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv2i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv2i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv2i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv2i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1> %a) ret <vscale x 2 x i1> %res } define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv4i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv4i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv4i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv4i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1> %a) ret <vscale x 4 x i1> %res } define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1> %a) ret <vscale x 8 x i1> %res } define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v10 ; RV32-BITS-256-NEXT: vand.vi v8, v12, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v10 ; RV32-BITS-512-NEXT: vand.vi v8, v12, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v10 ; RV64-BITS-256-NEXT: vand.vi v8, v12, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v10 ; RV64-BITS-512-NEXT: vand.vi v8, v12, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> %a) ret <vscale x 16 x i1> %res } define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vx v12, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v12 ; RV32-BITS-256-NEXT: vand.vi v8, v16, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vx v12, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v16, v8, v12 ; RV32-BITS-512-NEXT: vand.vi v8, v16, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v12 ; RV64-BITS-256-NEXT: vand.vi v8, v16, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v12 ; RV64-BITS-512-NEXT: vand.vi v8, v16, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1> %a) ret <vscale x 32 x i1> %res } define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vrsub.vx v16, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v24, v8, v16 ; RV32-BITS-256-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV32-BITS-512-NEXT: vid.v v8 ; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV32-BITS-512-NEXT: vmv.v.i v16, 0 ; RV32-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV32-BITS-512-NEXT: vrgather.vv v28, v16, v8 ; RV32-BITS-512-NEXT: vrgather.vv v24, v20, v8 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV32-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v24, v8, v16 ; RV64-BITS-256-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV64-BITS-512-NEXT: vmv.v.i v16, 0 ; RV64-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; RV64-BITS-512-NEXT: vrgather.vv v28, v16, v8 ; RV64-BITS-512-NEXT: vrgather.vv v24, v20, v8 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; RV64-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 64 x i1> @llvm.experimental.vector.reverse.nxv64i1(<vscale x 64 x i1> %a) ret <vscale x 64 x i1> %res } ; ; VECTOR_REVERSE - integer ; define <vscale x 1 x i8> @reverse_nxv1i8(<vscale x 1 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv1i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv1i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv1i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 3 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv1i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv1i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv1i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 3 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 1 x i8> @llvm.experimental.vector.reverse.nxv1i8(<vscale x 1 x i8> %a) ret <vscale x 1 x i8> %res } define <vscale x 2 x i8> @reverse_nxv2i8(<vscale x 2 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv2i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv2i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv2i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv2i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8> %a) ret <vscale x 2 x i8> %res } define <vscale x 4 x i8> @reverse_nxv4i8(<vscale x 4 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv4i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv4i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv4i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv4i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 4 x i8> @llvm.experimental.vector.reverse.nxv4i8(<vscale x 4 x i8> %a) ret <vscale x 4 x i8> %res } define <vscale x 8 x i8> @reverse_nxv8i8(<vscale x 8 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv.v.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv.v.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv.v.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv.v.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 8 x i8> @llvm.experimental.vector.reverse.nxv8i8(<vscale x 8 x i8> %a) ret <vscale x 8 x i8> %res } define <vscale x 16 x i8> @reverse_nxv16i8(<vscale x 16 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-256-NEXT: vmv.v.v v8, v10 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> %a) ret <vscale x 16 x i8> %res } define <vscale x 32 x i8> @reverse_nxv32i8(<vscale x 32 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-256-NEXT: vmv.v.v v8, v12 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8> %a) ret <vscale x 32 x i8> %res } define <vscale x 64 x i8> @reverse_nxv64i8(<vscale x 64 x i8> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 ; RV32-BITS-256-NEXT: vmv.v.v v8, v16 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV32-BITS-512-NEXT: vid.v v16 ; RV32-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-512-NEXT: vrgather.vv v20, v8, v24 ; RV32-BITS-512-NEXT: vrgather.vv v16, v12, v24 ; RV32-BITS-512-NEXT: vmv8r.v v8, v16 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 ; RV64-BITS-256-NEXT: vmv.v.v v8, v16 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; RV64-BITS-512-NEXT: vid.v v16 ; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-512-NEXT: vrgather.vv v20, v8, v24 ; RV64-BITS-512-NEXT: vrgather.vv v16, v12, v24 ; RV64-BITS-512-NEXT: vmv8r.v v8, v16 ; RV64-BITS-512-NEXT: ret %res = call <vscale x 64 x i8> @llvm.experimental.vector.reverse.nxv64i8(<vscale x 64 x i8> %a) ret <vscale x 64 x i8> %res } define <vscale x 1 x i16> @reverse_nxv1i16(<vscale x 1 x i16> %a) { ; CHECK-LABEL: reverse_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x i16> @llvm.experimental.vector.reverse.nxv1i16(<vscale x 1 x i16> %a) ret <vscale x 1 x i16> %res } define <vscale x 2 x i16> @reverse_nxv2i16(<vscale x 2 x i16> %a) { ; CHECK-LABEL: reverse_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16> %a) ret <vscale x 2 x i16> %res } define <vscale x 4 x i16> @reverse_nxv4i16(<vscale x 4 x i16> %a) { ; CHECK-LABEL: reverse_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16> %a) ret <vscale x 4 x i16> %res } define <vscale x 8 x i16> @reverse_nxv8i16(<vscale x 8 x i16> %a) { ; CHECK-LABEL: reverse_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> %a) ret <vscale x 8 x i16> %res } define <vscale x 16 x i16> @reverse_nxv16i16(<vscale x 16 x i16> %a) { ; CHECK-LABEL: reverse_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16> %a) ret <vscale x 16 x i16> %res } define <vscale x 32 x i16> @reverse_nxv32i16(<vscale x 32 x i16> %a) { ; CHECK-LABEL: reverse_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 32 x i16> @llvm.experimental.vector.reverse.nxv32i16(<vscale x 32 x i16> %a) ret <vscale x 32 x i16> %res } define <vscale x 1 x i32> @reverse_nxv1i32(<vscale x 1 x i32> %a) { ; CHECK-LABEL: reverse_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x i32> @llvm.experimental.vector.reverse.nxv1i32(<vscale x 1 x i32> %a) ret <vscale x 1 x i32> %res } define <vscale x 2 x i32> @reverse_nxv2i32(<vscale x 2 x i32> %a) { ; CHECK-LABEL: reverse_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 2 x i32> @llvm.experimental.vector.reverse.nxv2i32(<vscale x 2 x i32> %a) ret <vscale x 2 x i32> %res } define <vscale x 4 x i32> @reverse_nxv4i32(<vscale x 4 x i32> %a) { ; CHECK-LABEL: reverse_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> %a) ret <vscale x 4 x i32> %res } define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) { ; CHECK-LABEL: reverse_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> %a) ret <vscale x 8 x i32> %res } define <vscale x 16 x i32> @reverse_nxv16i32(<vscale x 16 x i32> %a) { ; CHECK-LABEL: reverse_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 16 x i32> @llvm.experimental.vector.reverse.nxv16i32(<vscale x 16 x i32> %a) ret <vscale x 16 x i32> %res } define <vscale x 1 x i64> @reverse_nxv1i64(<vscale x 1 x i64> %a) { ; CHECK-LABEL: reverse_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x i64> @llvm.experimental.vector.reverse.nxv1i64(<vscale x 1 x i64> %a) ret <vscale x 1 x i64> %res } define <vscale x 2 x i64> @reverse_nxv2i64(<vscale x 2 x i64> %a) { ; CHECK-LABEL: reverse_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> %a) ret <vscale x 2 x i64> %res } define <vscale x 4 x i64> @reverse_nxv4i64(<vscale x 4 x i64> %a) { ; CHECK-LABEL: reverse_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64> %a) ret <vscale x 4 x i64> %res } define <vscale x 8 x i64> @reverse_nxv8i64(<vscale x 8 x i64> %a) { ; CHECK-LABEL: reverse_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64> %a) ret <vscale x 8 x i64> %res } ; ; VECTOR_REVERSE - floating point ; define <vscale x 1 x half> @reverse_nxv1f16(<vscale x 1 x half> %a) { ; CHECK-LABEL: reverse_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x half> @llvm.experimental.vector.reverse.nxv1f16(<vscale x 1 x half> %a) ret <vscale x 1 x half> %res } define <vscale x 2 x half> @reverse_nxv2f16(<vscale x 2 x half> %a) { ; CHECK-LABEL: reverse_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half> %a) ret <vscale x 2 x half> %res } define <vscale x 4 x half> @reverse_nxv4f16(<vscale x 4 x half> %a) { ; CHECK-LABEL: reverse_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half> %a) ret <vscale x 4 x half> %res } define <vscale x 8 x half> @reverse_nxv8f16(<vscale x 8 x half> %a) { ; CHECK-LABEL: reverse_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half> %a) ret <vscale x 8 x half> %res } define <vscale x 16 x half> @reverse_nxv16f16(<vscale x 16 x half> %a) { ; CHECK-LABEL: reverse_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 16 x half> @llvm.experimental.vector.reverse.nxv16f16(<vscale x 16 x half> %a) ret <vscale x 16 x half> %res } define <vscale x 32 x half> @reverse_nxv32f16(<vscale x 32 x half> %a) { ; CHECK-LABEL: reverse_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 32 x half> @llvm.experimental.vector.reverse.nxv32f16(<vscale x 32 x half> %a) ret <vscale x 32 x half> %res } define <vscale x 1 x float> @reverse_nxv1f32(<vscale x 1 x float> %a) { ; CHECK-LABEL: reverse_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x float> @llvm.experimental.vector.reverse.nxv1f32(<vscale x 1 x float> %a) ret <vscale x 1 x float> %res } define <vscale x 2 x float> @reverse_nxv2f32(<vscale x 2 x float> %a) { ; CHECK-LABEL: reverse_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float> %a) ret <vscale x 2 x float> %res } define <vscale x 4 x float> @reverse_nxv4f32(<vscale x 4 x float> %a) { ; CHECK-LABEL: reverse_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float> %a) ret <vscale x 4 x float> %res } define <vscale x 8 x float> @reverse_nxv8f32(<vscale x 8 x float> %a) { ; CHECK-LABEL: reverse_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 8 x float> @llvm.experimental.vector.reverse.nxv8f32(<vscale x 8 x float> %a) ret <vscale x 8 x float> %res } define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) { ; CHECK-LABEL: reverse_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float> %a) ret <vscale x 16 x float> %res } define <vscale x 1 x double> @reverse_nxv1f64(<vscale x 1 x double> %a) { ; CHECK-LABEL: reverse_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call <vscale x 1 x double> @llvm.experimental.vector.reverse.nxv1f64(<vscale x 1 x double> %a) ret <vscale x 1 x double> %res } define <vscale x 2 x double> @reverse_nxv2f64(<vscale x 2 x double> %a) { ; CHECK-LABEL: reverse_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double> %a) ret <vscale x 2 x double> %res } define <vscale x 4 x double> @reverse_nxv4f64(<vscale x 4 x double> %a) { ; CHECK-LABEL: reverse_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call <vscale x 4 x double> @llvm.experimental.vector.reverse.nxv4f64(<vscale x 4 x double> %a) ret <vscale x 4 x double> %res } define <vscale x 8 x double> @reverse_nxv8f64(<vscale x 8 x double> %a) { ; CHECK-LABEL: reverse_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double> %a) ret <vscale x 8 x double> %res } declare <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1>) declare <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1>) declare <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1>) declare <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1>) declare <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1>) declare <vscale x 64 x i1> @llvm.experimental.vector.reverse.nxv64i1(<vscale x 64 x i1>) declare <vscale x 1 x i8> @llvm.experimental.vector.reverse.nxv1i8(<vscale x 1 x i8>) declare <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8>) declare <vscale x 4 x i8> @llvm.experimental.vector.reverse.nxv4i8(<vscale x 4 x i8>) declare <vscale x 8 x i8> @llvm.experimental.vector.reverse.nxv8i8(<vscale x 8 x i8>) declare <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8>) declare <vscale x 32 x i8> @llvm.experimental.vector.reverse.nxv32i8(<vscale x 32 x i8>) declare <vscale x 64 x i8> @llvm.experimental.vector.reverse.nxv64i8(<vscale x 64 x i8>) declare <vscale x 1 x i16> @llvm.experimental.vector.reverse.nxv1i16(<vscale x 1 x i16>) declare <vscale x 2 x i16> @llvm.experimental.vector.reverse.nxv2i16(<vscale x 2 x i16>) declare <vscale x 4 x i16> @llvm.experimental.vector.reverse.nxv4i16(<vscale x 4 x i16>) declare <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16>) declare <vscale x 16 x i16> @llvm.experimental.vector.reverse.nxv16i16(<vscale x 16 x i16>) declare <vscale x 32 x i16> @llvm.experimental.vector.reverse.nxv32i16(<vscale x 32 x i16>) declare <vscale x 1 x i32> @llvm.experimental.vector.reverse.nxv1i32(<vscale x 1 x i32>) declare <vscale x 2 x i32> @llvm.experimental.vector.reverse.nxv2i32(<vscale x 2 x i32>) declare <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32>) declare <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32>) declare <vscale x 16 x i32> @llvm.experimental.vector.reverse.nxv16i32(<vscale x 16 x i32>) declare <vscale x 1 x i64> @llvm.experimental.vector.reverse.nxv1i64(<vscale x 1 x i64>) declare <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64>) declare <vscale x 4 x i64> @llvm.experimental.vector.reverse.nxv4i64(<vscale x 4 x i64>) declare <vscale x 8 x i64> @llvm.experimental.vector.reverse.nxv8i64(<vscale x 8 x i64>) declare <vscale x 1 x half> @llvm.experimental.vector.reverse.nxv1f16(<vscale x 1 x half>) declare <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half>) declare <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half>) declare <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half>) declare <vscale x 16 x half> @llvm.experimental.vector.reverse.nxv16f16(<vscale x 16 x half>) declare <vscale x 32 x half> @llvm.experimental.vector.reverse.nxv32f16(<vscale x 32 x half>) declare <vscale x 1 x float> @llvm.experimental.vector.reverse.nxv1f32(<vscale x 1 x float>) declare <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float>) declare <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float>) declare <vscale x 8 x float> @llvm.experimental.vector.reverse.nxv8f32(<vscale x 8 x float>) declare <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float>) declare <vscale x 1 x double> @llvm.experimental.vector.reverse.nxv1f64(<vscale x 1 x double>) declare <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double>) declare <vscale x 4 x double> @llvm.experimental.vector.reverse.nxv4f64(<vscale x 4 x double>) declare <vscale x 8 x double> @llvm.experimental.vector.reverse.nxv8f64(<vscale x 8 x double>)