; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.umax.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.smax.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.umin.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.smin.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.and.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.or.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.xor.v2i8(i8, <2 x i8>, <2 x i1>, i32) define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.umin.v3i8(i8, <3 x i8>, <3 x i1>, i32) define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v3i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.v3i8(i8 %s, <3 x i8> %v, <3 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.umax.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.smax.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.umin.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.smin.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.and.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.or.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i8 @llvm.vp.reduce.xor.v4i8(i8, <4 x i8>, <4 x i1>, i32) define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r } declare i16 @llvm.vp.reduce.add.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.umax.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.smax.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.umin.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.smin.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.and.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.or.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.xor.v2i16(i16, <2 x i16>, <2 x i1>, i32) define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.add.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.umax.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i16: ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.smax.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.umin.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i16: ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.smin.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.and.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.or.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i16 @llvm.vp.reduce.xor.v4i16(i16, <4 x i16>, <4 x i1>, i32) define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r } declare i32 @llvm.vp.reduce.add.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.umax.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.smax.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.umin.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.smin.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.and.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.or.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.xor.v2i32(i32, <2 x i32>, <2 x i1>, i32) define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32) define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r } declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32) define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, a1, -32 ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: bltu a1, a3, .LBB49_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 ; CHECK-NEXT: bltu a1, a3, .LBB49_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: .LBB49_4: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v25, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vredxor.vs v8, v16, v8, v0.t ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.v64i32(i32 %s, <64 x i32> %v, <64 x i1> %m, i32 %evl) ret i32 %r } declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.umax.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.smax.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.umin.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.smin.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.and.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.or.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.xor.v2i64(i64, <2 x i64>, <2 x i1>, i32) define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_add_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.umax.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.smax.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smax_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.umin.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.smin.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_smin_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.and.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_and_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.or.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_or_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r } declare i64 @llvm.vp.reduce.xor.v4i64(i64, <4 x i64>, <4 x i1>, i32) define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_xor_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r }