; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 declare i8 @llvm.vector.reduce.add.v1i8(<1 x i8>) define i8 @vreduce_add_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>) define i8 @vreduce_add_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>) define i8 @vreduce_add_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) define i8 @vreduce_add_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) define i8 @vreduce_add_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) define i8 @vreduce_add_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>) define i8 @vreduce_add_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>) define i8 @vreduce_add_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.add.v256i8(<256 x i8>) define i8 @vreduce_add_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.add.v1i16(<1 x i16>) define i16 @vreduce_add_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v1i16(<1 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %e = sext <1 x i8> %v to <1 x i16> %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v1i16(<1 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %e = zext <1 x i8> %v to <1 x i16> %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v2i16(<2 x i16>) define i16 @vreduce_add_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %e = sext <2 x i8> %v to <2 x i16> %red = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %e = zext <2 x i8> %v to <2 x i16> %red = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) define i16 @vreduce_add_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %e = sext <4 x i8> %v to <4 x i16> %red = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %e = zext <4 x i8> %v to <4 x i16> %red = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) define i16 @vreduce_add_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %e = sext <8 x i8> %v to <8 x i16> %red = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %e = zext <8 x i8> %v to <8 x i16> %red = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) define i16 @vreduce_add_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v16i16(<16 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %e = sext <16 x i8> %v to <16 x i16> %red = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v16i16(<16 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %e = zext <16 x i8> %v to <16 x i16> %red = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>) define i16 @vreduce_add_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v32i16(<32 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v10 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %e = sext <32 x i8> %v to <32 x i16> %red = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v32i16(<32 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v10 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %e = zext <32 x i8> %v to <32 x i16> %red = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>) define i16 @vreduce_add_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v64i16(<64 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v12 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %e = sext <64 x i8> %v to <64 x i16> %red = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v64i16(<64 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v12 ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %e = zext <64 x i8> %v to <64 x i16> %red = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %e) ret i16 %red } declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>) define i16 @vreduce_add_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v128i16(<128 x i16> %v) ret i16 %red } define i16 @vwreduce_add_v128i16(<128 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwadd.vv v24, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %e = sext <128 x i8> %v to <128 x i16> %red = call i16 @llvm.vector.reduce.add.v128i16(<128 x i16> %e) ret i16 %red } define i16 @vwreduce_uadd_v128i16(<128 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v24, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %e = zext <128 x i8> %v to <128 x i16> %red = call i16 @llvm.vector.reduce.add.v128i16(<128 x i16> %e) ret i16 %red } declare i32 @llvm.vector.reduce.add.v1i32(<1 x i32>) define i32 @vreduce_add_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v1i32(<1 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %e = sext <1 x i16> %v to <1 x i32> %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v1i32(<1 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %e = zext <1 x i16> %v to <1 x i32> %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) define i32 @vreduce_add_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %e = sext <2 x i16> %v to <2 x i32> %red = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %e = zext <2 x i16> %v to <2 x i32> %red = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) define i32 @vreduce_add_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %e = sext <4 x i16> %v to <4 x i32> %red = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %e = zext <4 x i16> %v to <4 x i32> %red = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) define i32 @vreduce_add_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v8i32(<8 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %e = sext <8 x i16> %v to <8 x i32> %red = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v8i32(<8 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %e = zext <8 x i16> %v to <8 x i32> %red = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) define i32 @vreduce_add_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v16i32(<16 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v10 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %e = sext <16 x i16> %v to <16 x i32> %red = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v16i32(<16 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v10 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %e = zext <16 x i16> %v to <16 x i32> %red = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>) define i32 @vreduce_add_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v32i32(<32 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwredsum.vs v8, v8, v12 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %e = sext <32 x i16> %v to <32 x i32> %red = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v32i32(<32 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vwredsumu.vs v8, v8, v12 ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %e = zext <32 x i16> %v to <32 x i32> %red = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %e) ret i32 %red } declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>) define i32 @vreduce_add_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %v) ret i32 %red } define i32 @vwreduce_add_v64i32(<64 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwadd.vv v24, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %e = sext <64 x i16> %v to <64 x i32> %red = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %e) ret i32 %red } define i32 @vwreduce_uadd_v64i32(<64 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vwaddu.vv v24, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %e = zext <64 x i16> %v to <64 x i32> %red = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %e) ret i32 %red } declare i64 @llvm.vector.reduce.add.v1i64(<1 x i64>) define i64 @vreduce_add_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_add_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v1i64(<1 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsext.vf2 v9, v8 ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v8, v9, a0 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsext.vf2 v9, v8 ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %e = sext <1 x i32> %v to <1 x i64> %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v1i64(<1 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vzext.vf2 v9, v8 ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v8, v9, a0 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vzext.vf2 v9, v8 ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %e = zext <1 x i32> %v to <1 x i64> %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) define i64 @vreduce_add_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_add_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredsum.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredsum.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v2i64(<2 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vwredsum.vs v8, v8, v9 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vwredsum.vs v8, v8, v9 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %e = sext <2 x i32> %v to <2 x i64> %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v2i64(<2 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vwredsumu.vs v8, v8, v9 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vwredsumu.vs v8, v8, v9 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %e = zext <2 x i32> %v to <2 x i64> %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) define i64 @vreduce_add_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_add_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredsum.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredsum.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v4i64(<4 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vwredsum.vs v8, v8, v9 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vwredsum.vs v8, v8, v9 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %e = sext <4 x i32> %v to <4 x i64> %red = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v4i64(<4 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vwredsumu.vs v8, v8, v9 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vwredsumu.vs v8, v8, v9 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %e = zext <4 x i32> %v to <4 x i64> %red = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) define i64 @vreduce_add_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_add_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredsum.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredsum.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v8i64(<8 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vwredsum.vs v8, v8, v10 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vwredsum.vs v8, v8, v10 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %e = sext <8 x i32> %v to <8 x i64> %red = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v8i64(<8 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vwredsumu.vs v8, v8, v10 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vwredsumu.vs v8, v8, v10 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %e = zext <8 x i32> %v to <8 x i64> %red = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) define i64 @vreduce_add_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_add_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v16i64(<16 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vwredsum.vs v8, v8, v12 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vwredsum.vs v8, v8, v12 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %e = sext <16 x i32> %v to <16 x i64> %red = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v16i64(<16 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vwredsumu.vs v8, v8, v12 ; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vwredsumu.vs v8, v8, v12 ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %e = zext <16 x i32> %v to <16 x i64> %red = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v32i64(<32 x i64>) define i64 @vreduce_add_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_add_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: vredsum.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: vredsum.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v32i64(<32 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v16, v8, 16 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vwadd.vv v24, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v8, zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v24, v8 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vwadd.vv v24, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v8, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v24, v8 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %e = sext <32 x i32> %v to <32 x i64> %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v32i64(<32 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v16, v8, 16 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vwaddu.vv v24, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.s.x v8, zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v24, v8 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV64-NEXT: vslidedown.vi v16, v8, 16 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vwaddu.vv v24, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v8, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v24, v8 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %e = zext <32 x i32> %v to <32 x i64> %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> %e) ret i64 %red } declare i64 @llvm.vector.reduce.add.v64i64(<64 x i64>) define i64 @vreduce_add_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_add_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vadd.vv v16, v24, v16 ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vadd.vv v16, v24, v16 ; RV64-NEXT: vadd.vv v8, v8, v0 ; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v64i64(<64 x i64> %v) ret i64 %red } define i64 @vwreduce_add_v64i64(<64 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: addi a1, a0, 128 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vle32.v v16, (a1) ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwadd.vv v0, v24, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwadd.vv v0, v8, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vadd.vv v8, v0, v8 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a2 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 5 ; RV32-NEXT: add sp, sp, a2 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: li a2, 32 ; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vle32.v v16, (a1) ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV64-NEXT: vslidedown.vi v24, v8, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vslidedown.vi v24, v16, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwadd.vv v0, v24, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwadd.vv v0, v8, v16 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vv v8, v0, v8 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add sp, sp, a1 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %e = sext <64 x i32> %v to <64 x i64> %red = call i64 @llvm.vector.reduce.add.v64i64(<64 x i64> %e) ret i64 %red } define i64 @vwreduce_uadd_v64i64(<64 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: addi a1, a0, 128 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vle32.v v16, (a1) ; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwaddu.vv v0, v24, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwaddu.vv v0, v8, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vadd.vv v8, v0, v8 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a2 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 5 ; RV32-NEXT: add sp, sp, a2 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: li a2, 32 ; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vle32.v v16, (a1) ; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu ; RV64-NEXT: vslidedown.vi v24, v8, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vslidedown.vi v24, v16, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwaddu.vv v0, v24, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwaddu.vv v0, v8, v16 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vv v8, v0, v8 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 5 ; RV64-NEXT: add sp, sp, a1 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %e = zext <64 x i32> %v to <64 x i64> %red = call i64 @llvm.vector.reduce.add.v64i64(<64 x i64> %e) ret i64 %red } declare i8 @llvm.vector.reduce.and.v1i8(<1 x i8>) define i8 @vreduce_and_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>) define i8 @vreduce_and_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>) define i8 @vreduce_and_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>) define i8 @vreduce_and_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>) define i8 @vreduce_and_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>) define i8 @vreduce_and_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>) define i8 @vreduce_and_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>) define i8 @vreduce_and_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.and.v256i8(<256 x i8>) define i8 @vreduce_and_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.and.v1i16(<1 x i16>) define i16 @vreduce_and_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>) define i16 @vreduce_and_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>) define i16 @vreduce_and_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>) define i16 @vreduce_and_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>) define i16 @vreduce_and_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v32i16(<32 x i16>) define i16 @vreduce_and_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v64i16(<64 x i16>) define i16 @vreduce_and_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.and.v128i16(<128 x i16>) define i16 @vreduce_and_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.and.v1i32(<1 x i32>) define i32 @vreduce_and_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>) define i32 @vreduce_and_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>) define i32 @vreduce_and_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>) define i32 @vreduce_and_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>) define i32 @vreduce_and_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v32i32(<32 x i32>) define i32 @vreduce_and_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.and.v64i32(<64 x i32>) define i32 @vreduce_and_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>) define i64 @vreduce_and_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_and_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>) define i64 @vreduce_and_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_and_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, -1 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, -1 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>) define i64 @vreduce_and_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_and_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, -1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, -1 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>) define i64 @vreduce_and_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_and_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>) define i64 @vreduce_and_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_and_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v32i64(<32 x i64>) define i64 @vreduce_and_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_and_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.and.v64i64(<64 x i64>) define i64 @vreduce_and_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_and_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vand.vv v16, v24, v16 ; RV64-NEXT: vand.vv v8, v8, v0 ; RV64-NEXT: vand.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.or.v1i8(<1 x i8>) define i8 @vreduce_or_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>) define i8 @vreduce_or_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>) define i8 @vreduce_or_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>) define i8 @vreduce_or_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>) define i8 @vreduce_or_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>) define i8 @vreduce_or_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v64i8(<64 x i8>) define i8 @vreduce_or_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v128i8(<128 x i8>) define i8 @vreduce_or_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.or.v256i8(<256 x i8>) define i8 @vreduce_or_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.or.v1i16(<1 x i16>) define i16 @vreduce_or_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>) define i16 @vreduce_or_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>) define i16 @vreduce_or_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>) define i16 @vreduce_or_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>) define i16 @vreduce_or_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v32i16(<32 x i16>) define i16 @vreduce_or_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v64i16(<64 x i16>) define i16 @vreduce_or_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.or.v128i16(<128 x i16>) define i16 @vreduce_or_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.or.v1i32(<1 x i32>) define i32 @vreduce_or_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>) define i32 @vreduce_or_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>) define i32 @vreduce_or_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>) define i32 @vreduce_or_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>) define i32 @vreduce_or_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v32i32(<32 x i32>) define i32 @vreduce_or_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.or.v64i32(<64 x i32>) define i32 @vreduce_or_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>) define i64 @vreduce_or_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_or_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>) define i64 @vreduce_or_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_or_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>) define i64 @vreduce_or_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_or_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>) define i64 @vreduce_or_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_or_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>) define i64 @vreduce_or_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_or_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v32i64(<32 x i64>) define i64 @vreduce_or_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_or_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vredor.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vredor.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.or.v64i64(<64 x i64>) define i64 @vreduce_or_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_or_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: vor.vv v8, v8, v0 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vor.vv v16, v24, v16 ; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.xor.v1i8(<1 x i8>) define i8 @vreduce_xor_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v2i8(<2 x i8>) define i8 @vreduce_xor_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v4i8(<4 x i8>) define i8 @vreduce_xor_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>) define i8 @vreduce_xor_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>) define i8 @vreduce_xor_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>) define i8 @vreduce_xor_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>) define i8 @vreduce_xor_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v128i8(<128 x i8>) define i8 @vreduce_xor_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.xor.v256i8(<256 x i8>) define i8 @vreduce_xor_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.xor.v1i16(<1 x i16>) define i16 @vreduce_xor_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v2i16(<2 x i16>) define i16 @vreduce_xor_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>) define i16 @vreduce_xor_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>) define i16 @vreduce_xor_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>) define i16 @vreduce_xor_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v32i16(<32 x i16>) define i16 @vreduce_xor_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v64i16(<64 x i16>) define i16 @vreduce_xor_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.xor.v128i16(<128 x i16>) define i16 @vreduce_xor_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.xor.v1i32(<1 x i32>) define i32 @vreduce_xor_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>) define i32 @vreduce_xor_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>) define i32 @vreduce_xor_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>) define i32 @vreduce_xor_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v16i32(<16 x i32>) define i32 @vreduce_xor_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v32i32(<32 x i32>) define i32 @vreduce_xor_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.xor.v64i32(<64 x i32>) define i32 @vreduce_xor_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>) define i64 @vreduce_xor_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>) define i64 @vreduce_xor_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredxor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredxor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>) define i64 @vreduce_xor_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredxor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredxor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v8i64(<8 x i64>) define i64 @vreduce_xor_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredxor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredxor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>) define i64 @vreduce_xor_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v32i64(<32 x i64>) define i64 @vreduce_xor_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: vredxor.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vxor.vv v8, v8, v16 ; RV64-NEXT: vredxor.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.xor.v64i64(<64 x i64>) define i64 @vreduce_xor_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_xor_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vxor.vv v16, v24, v16 ; RV32-NEXT: vxor.vv v8, v8, v0 ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vxor.vv v16, v24, v16 ; RV64-NEXT: vxor.vv v8, v8, v0 ; RV64-NEXT: vxor.vv v8, v8, v16 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.smin.v1i8(<1 x i8>) define i8 @vreduce_smin_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>) define i8 @vreduce_smin_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>) define i8 @vreduce_smin_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>) define i8 @vreduce_smin_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>) define i8 @vreduce_smin_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>) define i8 @vreduce_smin_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>) define i8 @vreduce_smin_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>) define i8 @vreduce_smin_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smin.v256i8(<256 x i8>) define i8 @vreduce_smin_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.smin.v1i16(<1 x i16>) define i16 @vreduce_smin_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_smin_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>) define i16 @vreduce_smin_v2i16(<2 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v2i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>) define i16 @vreduce_smin_v4i16(<4 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v4i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>) define i16 @vreduce_smin_v8i16(<8 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v8i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) define i16 @vreduce_smin_v16i16(<16 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v16i16: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>) define i16 @vreduce_smin_v32i16(<32 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v12, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>) define i16 @vreduce_smin_v64i16(<64 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v64i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 64 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v64i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 64 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>) define i16 @vreduce_smin_v128i16(<128 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v128i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 64 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v128i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 64 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle16.v v16, (a0) ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.smin.v1i32(<1 x i32>) define i32 @vreduce_smin_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_smin_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>) define i32 @vreduce_smin_v2i32(<2 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) define i32 @vreduce_smin_v4i32(<4 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>) define i32 @vreduce_smin_v8i32(<8 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v8i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>) define i32 @vreduce_smin_v16i32(<16 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v16i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vmv.s.x v12, a0 ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>) define i32 @vreduce_smin_v32i32(<32 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v32i32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i32: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>) define i32 @vreduce_smin_v64i32(<64 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v64i32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle32.v v16, (a0) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v64i32: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle32.v v16, (a0) ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.smin.v1i64(<1 x i64>) define i64 @vreduce_smin_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>) define i64 @vreduce_smin_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) define i64 @vreduce_smin_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v8i64(<8 x i64>) define i64 @vreduce_smin_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>) define i64 @vreduce_smin_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v32i64(<32 x i64>) define i64 @vreduce_smin_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v24, a0 ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: vredmin.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smin.v64i64(<64 x i64>) define i64 @vreduce_smin_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_smin_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v24, (a1) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v0, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: vmin.vv v24, v0, v24 ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: vmin.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vmin.vv v16, v24, v16 ; RV64-NEXT: vmin.vv v8, v8, v0 ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.smax.v1i8(<1 x i8>) define i8 @vreduce_smax_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>) define i8 @vreduce_smax_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>) define i8 @vreduce_smax_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>) define i8 @vreduce_smax_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>) define i8 @vreduce_smax_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>) define i8 @vreduce_smax_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>) define i8 @vreduce_smax_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>) define i8 @vreduce_smax_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.smax.v256i8(<256 x i8>) define i8 @vreduce_smax_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.smax.v1i16(<1 x i16>) define i16 @vreduce_smax_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>) define i16 @vreduce_smax_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>) define i16 @vreduce_smax_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>) define i16 @vreduce_smax_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) define i16 @vreduce_smax_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>) define i16 @vreduce_smax_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>) define i16 @vreduce_smax_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>) define i16 @vreduce_smax_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.smax.v1i32(<1 x i32>) define i32 @vreduce_smax_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>) define i32 @vreduce_smax_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) define i32 @vreduce_smax_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>) define i32 @vreduce_smax_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>) define i32 @vreduce_smax_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>) define i32 @vreduce_smax_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>) define i32 @vreduce_smax_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.smax.v1i64(<1 x i64>) define i64 @vreduce_smax_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>) define i64 @vreduce_smax_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vredmax.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) define i64 @vreduce_smax_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vredmax.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v8i64(<8 x i64>) define i64 @vreduce_smax_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vredmax.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>) define i64 @vreduce_smax_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vredmax.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v32i64(<32 x i64>) define i64 @vreduce_smax_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v24, a0 ; RV64-NEXT: vmax.vv v8, v8, v16 ; RV64-NEXT: vredmax.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.smax.v64i64(<64 x i64>) define i64 @vreduce_smax_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_smax_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v24, (a1) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v0, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: vmax.vv v24, v0, v24 ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: vmax.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vmax.vv v16, v24, v16 ; RV64-NEXT: vmax.vv v8, v8, v0 ; RV64-NEXT: vmax.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vredmax.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.umin.v1i8(<1 x i8>) define i8 @vreduce_umin_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>) define i8 @vreduce_umin_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>) define i8 @vreduce_umin_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>) define i8 @vreduce_umin_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>) define i8 @vreduce_umin_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>) define i8 @vreduce_umin_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>) define i8 @vreduce_umin_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>) define i8 @vreduce_umin_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umin.v256i8(<256 x i8>) define i8 @vreduce_umin_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.umin.v1i16(<1 x i16>) define i16 @vreduce_umin_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>) define i16 @vreduce_umin_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>) define i16 @vreduce_umin_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>) define i16 @vreduce_umin_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) define i16 @vreduce_umin_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>) define i16 @vreduce_umin_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>) define i16 @vreduce_umin_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>) define i16 @vreduce_umin_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.umin.v1i32(<1 x i32>) define i32 @vreduce_umin_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>) define i32 @vreduce_umin_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>) define i32 @vreduce_umin_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>) define i32 @vreduce_umin_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>) define i32 @vreduce_umin_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>) define i32 @vreduce_umin_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>) define i32 @vreduce_umin_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.umin.v1i64(<1 x i64>) define i64 @vreduce_umin_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>) define i64 @vreduce_umin_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, -1 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, -1 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) define i64 @vreduce_umin_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, -1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, -1 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v8i64(<8 x i64>) define i64 @vreduce_umin_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>) define i64 @vreduce_umin_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v32i64(<32 x i64>) define i64 @vreduce_umin_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vminu.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umin.v64i64(<64 x i64>) define i64 @vreduce_umin_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_umin_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vminu.vv v16, v24, v16 ; RV32-NEXT: vminu.vv v8, v8, v0 ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vminu.vv v16, v24, v16 ; RV64-NEXT: vminu.vv v8, v8, v0 ; RV64-NEXT: vminu.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8>) define i8 @vreduce_umax_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>) define i8 @vreduce_umax_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>) define i8 @vreduce_umax_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>) define i8 @vreduce_umax_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>) define i8 @vreduce_umax_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>) define i8 @vreduce_umax_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>) define i8 @vreduce_umax_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>) define i8 @vreduce_umax_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.umax.v256i8(<256 x i8>) define i8 @vreduce_umax_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.umax.v1i16(<1 x i16>) define i16 @vreduce_umax_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>) define i16 @vreduce_umax_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>) define i16 @vreduce_umax_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>) define i16 @vreduce_umax_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) define i16 @vreduce_umax_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>) define i16 @vreduce_umax_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>) define i16 @vreduce_umax_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>) define i16 @vreduce_umax_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.umax.v1i32(<1 x i32>) define i32 @vreduce_umax_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>) define i32 @vreduce_umax_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>) define i32 @vreduce_umax_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>) define i32 @vreduce_umax_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>) define i32 @vreduce_umax_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>) define i32 @vreduce_umax_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>) define i32 @vreduce_umax_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.umax.v1i64(<1 x i64>) define i64 @vreduce_umax_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>) define i64 @vreduce_umax_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) define i64 @vreduce_umax_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v8i64(<8 x i64>) define i64 @vreduce_umax_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>) define i64 @vreduce_umax_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v32i64(<32 x i64>) define i64 @vreduce_umax_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmv.s.x v24, zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: vredmaxu.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vmaxu.vv v8, v8, v16 ; RV64-NEXT: vredmaxu.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.umax.v64i64(<64 x i64>) define i64 @vreduce_umax_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_umax_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vmaxu.vv v16, v24, v16 ; RV32-NEXT: vmaxu.vv v8, v8, v0 ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vmaxu.vv v16, v24, v16 ; RV64-NEXT: vmaxu.vv v8, v8, v0 ; RV64-NEXT: vmaxu.vv v8, v8, v16 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v64i64(<64 x i64> %v) ret i64 %red } declare i8 @llvm.vector.reduce.mul.v1i8(<1 x i8>) define i8 @vreduce_mul_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v1i8(<1 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>) define i8 @vreduce_mul_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: lb a0, 1(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>) define i8 @vreduce_mul_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>) define i8 @vreduce_mul_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>) define i8 @vreduce_mul_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>) define i8 @vreduce_mul_v32i8(<32 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v64i8(<64 x i8>) define i8 @vreduce_mul_v64i8(<64 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v12, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vrgather.vi v12, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v64i8(<64 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v128i8(<128 x i8>) define i8 @vreduce_mul_v128i8(<128 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v128i8(<128 x i8> %v) ret i8 %red } declare i8 @llvm.vector.reduce.mul.v256i8(<256 x i8>) define i8 @vreduce_mul_v256i8(<256 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.mul.v256i8(<256 x i8> %v) ret i8 %red } declare i16 @llvm.vector.reduce.mul.v1i16(<1 x i16>) define i16 @vreduce_mul_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v1i16(<1 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>) define i16 @vreduce_mul_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lh a0, 2(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>) define i16 @vreduce_mul_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>) define i16 @vreduce_mul_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>) define i16 @vreduce_mul_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v32i16(<32 x i16>) define i16 @vreduce_mul_v32i16(<32 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v12, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vrgather.vi v12, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v32i16(<32 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v64i16(<64 x i16>) define i16 @vreduce_mul_v64i16(<64 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v64i16(<64 x i16> %v) ret i16 %red } declare i16 @llvm.vector.reduce.mul.v128i16(<128 x i16>) define i16 @vreduce_mul_v128i16(<128 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.mul.v128i16(<128 x i16> %v) ret i16 %red } declare i32 @llvm.vector.reduce.mul.v1i32(<1 x i32>) define i32 @vreduce_mul_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v1i32(<1 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>) define i32 @vreduce_mul_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lw a0, 4(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>) define i32 @vreduce_mul_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>) define i32 @vreduce_mul_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vslidedown.vi v10, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>) define i32 @vreduce_mul_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v12, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vslidedown.vi v12, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vrgather.vi v12, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v32i32(<32 x i32>) define i32 @vreduce_mul_v32i32(<32 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v32i32(<32 x i32> %v) ret i32 %red } declare i32 @llvm.vector.reduce.mul.v64i32(<64 x i32>) define i32 @vreduce_mul_v64i32(<64 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vslidedown.vi v16, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vrgather.vi v16, v8, 1 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.mul.v64i32(<64 x i32> %v) ret i32 %red } declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>) define i64 @vreduce_mul_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v1i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>) define i64 @vreduce_mul_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: ld a0, 8(a0) ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>) define i64 @vreduce_mul_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v10, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: vrgather.vi v10, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v10, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v10 ; RV64-NEXT: vrgather.vi v10, v8, 1 ; RV64-NEXT: vmul.vv v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>) define i64 @vreduce_mul_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v12, v8, 4 ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: vslidedown.vi v12, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: vrgather.vi v12, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v12, v8, 4 ; RV64-NEXT: vmul.vv v8, v8, v12 ; RV64-NEXT: vslidedown.vi v12, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v12 ; RV64-NEXT: vrgather.vi v12, v8, 1 ; RV64-NEXT: vmul.vv v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>) define i64 @vreduce_mul_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v16, v8, 8 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 4 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vrgather.vi v16, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 ; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v16, v8, 8 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 4 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vrgather.vi v16, v8, 1 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v32i64(<32 x i64>) define i64 @vreduce_mul_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 8 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 4 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vrgather.vi v16, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 8 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 4 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vrgather.vi v16, v8, 1 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v32i64(<32 x i64> %v) ret i64 %red } declare i64 @llvm.vector.reduce.mul.v64i64(<64 x i64>) define i64 @vreduce_mul_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_mul_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v24, (a0) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: vmul.vv v16, v24, v16 ; RV32-NEXT: vmul.vv v8, v8, v0 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 8 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 4 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vslidedown.vi v16, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vrgather.vi v16, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v64i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) ; RV64-NEXT: addi a1, a0, 256 ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: vle64.v v0, (a1) ; RV64-NEXT: vmul.vv v16, v24, v16 ; RV64-NEXT: vmul.vv v8, v8, v0 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 8 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 4 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vslidedown.vi v16, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vrgather.vi v16, v8, 1 ; RV64-NEXT: vmul.vv v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <64 x i64>, <64 x i64>* %x %red = call i64 @llvm.vector.reduce.mul.v64i64(<64 x i64> %v) ret i64 %red }