; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv32 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV32 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV64 ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zba -verify-machineinstrs | FileCheck %s --check-prefix=RV32ZBA ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zba -verify-machineinstrs | FileCheck %s --check-prefix=RV64ZBA ; ; Get the actual value of the overflow bit. ; define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: saddo1.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a3, a0, a1 ; RV32-NEXT: slt a0, a3, a0 ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: add a3, a0, a1 ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a3, a0, a1 ; RV32ZBA-NEXT: slt a0, a3, a0 ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: add a3, a0, a1 ; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } ; Test the immediate version. define zeroext i1 @saddo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: saddo2.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: addi a2, a0, 4 ; RV64-NEXT: addiw a0, a0, 4 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: addi a2, a0, 4 ; RV64ZBA-NEXT: addiw a0, a0, 4 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } ; Test negative immediates. define zeroext i1 @saddo3.i32(i32 %v1, i32* %res) { ; RV32-LABEL: saddo3.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, -4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: addi a2, a0, -4 ; RV64-NEXT: addiw a0, a0, -4 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, -4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: addi a2, a0, -4 ; RV64ZBA-NEXT: addiw a0, a0, -4 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } ; Test immediates that are too large to be encoded. define zeroext i1 @saddo4.i32(i32 %v1, i32* %res) { ; RV32-LABEL: saddo4.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: lui a2, 4096 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo4.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: lui a2, 4096 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: addw a0, a0, a2 ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo4.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: lui a2, 4096 ; RV32ZBA-NEXT: addi a2, a2, -1 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo4.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: lui a2, 4096 ; RV64ZBA-NEXT: addiw a2, a2, -1 ; RV64ZBA-NEXT: add a3, a0, a2 ; RV64ZBA-NEXT: addw a0, a0, a2 ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: saddo1.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a5, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a5, a5, a0 ; RV32-NEXT: xor a0, a1, a5 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a3, a0, a1 ; RV64-NEXT: slt a0, a3, a0 ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: sd a3, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a5, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a5, a5, a0 ; RV32ZBA-NEXT: xor a0, a1, a5 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a3, a0, a1 ; RV64ZBA-NEXT: slt a0, a3, a0 ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: xor a0, a1, a0 ; RV64ZBA-NEXT: sd a3, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @saddo2.i64(i64 %v1, i64* %res) { ; RV32-LABEL: saddo2.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a3, a0, 4 ; RV32-NEXT: sltu a0, a3, a0 ; RV32-NEXT: add a4, a1, a0 ; RV32-NEXT: xor a0, a1, a4 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, 4 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a3, a0, 4 ; RV32ZBA-NEXT: sltu a0, a3, a0 ; RV32ZBA-NEXT: add a4, a1, a0 ; RV32ZBA-NEXT: xor a0, a1, a4 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, 4 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @saddo3.i64(i64 %v1, i64* %res) { ; RV32-LABEL: saddo3.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a3, a0, -4 ; RV32-NEXT: sltu a0, a3, a0 ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: addi a4, a0, -1 ; RV32-NEXT: xor a0, a1, a4 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, -4 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a3, a0, -4 ; RV32ZBA-NEXT: sltu a0, a3, a0 ; RV32ZBA-NEXT: add a0, a1, a0 ; RV32ZBA-NEXT: addi a4, a0, -1 ; RV32ZBA-NEXT: xor a0, a1, a4 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, -4 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: uaddo.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: sw a1, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: sw a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @uaddo.i32.constant(i32 %v1, i32* %res) { ; RV32-LABEL: uaddo.i32.constant: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, -2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a2, a0 ; RV64-NEXT: addiw a3, a0, -2 ; RV64-NEXT: sltu a0, a3, a2 ; RV64-NEXT: sw a3, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, -2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a2, a0 ; RV64ZBA-NEXT: addiw a3, a0, -2 ; RV64ZBA-NEXT: sltu a0, a3, a2 ; RV64ZBA-NEXT: sw a3, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @uaddo.i32.constant_one(i32 %v1, i32* %res) { ; RV32-LABEL: uaddo.i32.constant_one: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, 1 ; RV32-NEXT: seqz a0, a2 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant_one: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addiw a2, a0, 1 ; RV64-NEXT: seqz a0, a2 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant_one: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, 1 ; RV32ZBA-NEXT: seqz a0, a2 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant_one: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addiw a2, a0, 1 ; RV64ZBA-NEXT: seqz a0, a2 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: uaddo.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a3, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a3, a3, a0 ; RV32-NEXT: beq a3, a1, .LBB10_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a3, a1 ; RV32-NEXT: .LBB10_2: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sd a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a3, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a3, a3, a0 ; RV32ZBA-NEXT: beq a3, a1, .LBB10_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a3, a1 ; RV32ZBA-NEXT: .LBB10_2: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @uaddo.i64.constant_one(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant_one: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 ; RV32-NEXT: addi a4, a0, 1 ; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB11_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 ; RV32-NEXT: .LBB11_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64.constant_one: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, 1 ; RV64-NEXT: seqz a0, a2 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64.constant_one: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 ; RV32ZBA-NEXT: addi a4, a0, 1 ; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB11_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 ; RV32ZBA-NEXT: .LBB11_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64.constant_one: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, 1 ; RV64ZBA-NEXT: seqz a0, a2 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: ssubo1.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sgtz a3, a1 ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: sw a1, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo1.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo1.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sgtz a3, a1 ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo1.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sub a3, a0, a1 ; RV64ZBA-NEXT: subw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @ssubo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: ssubo2.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo2.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: addi a2, a0, 4 ; RV64-NEXT: addiw a0, a0, 4 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo2.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo2.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: addi a2, a0, 4 ; RV64ZBA-NEXT: addiw a0, a0, 4 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: ssubo.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a5, a0, a2 ; RV32-NEXT: sub a6, a1, a3 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: xor a6, a1, a5 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a1, a1, a6 ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: sub a0, a0, a2 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sgtz a3, a1 ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: sd a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a5, a0, a2 ; RV32ZBA-NEXT: sub a6, a1, a3 ; RV32ZBA-NEXT: sub a5, a6, a5 ; RV32ZBA-NEXT: xor a6, a1, a5 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a1, a1, a6 ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: sub a0, a0, a2 ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sgtz a3, a1 ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: usubo.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: sw a1, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: sw a1, 0(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: sw a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @usubo.i32.constant.rhs(i32 %v1, i32* %res) { ; RV32-LABEL: usubo.i32.constant.rhs: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, 2 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.rhs: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addiw a2, a0, 2 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a2 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.rhs: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, 2 ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.rhs: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addiw a2, a0, 2 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a2 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) { ; RV32-LABEL: usubo.i32.constant.lhs: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a2, -2 ; RV32-NEXT: sub a2, a2, a0 ; RV32-NEXT: addi a0, a2, 1 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.lhs: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a2, -2 ; RV64-NEXT: subw a2, a2, a0 ; RV64-NEXT: addi a0, a2, 1 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.lhs: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a2, -2 ; RV32ZBA-NEXT: sub a2, a2, a0 ; RV32ZBA-NEXT: addi a0, a2, 1 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.lhs: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: li a2, -2 ; RV64ZBA-NEXT: subw a2, a2, a0 ; RV64ZBA-NEXT: addi a0, a2, 1 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: usubo.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a5, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a5 ; RV32-NEXT: sub a2, a0, a2 ; RV32-NEXT: beq a3, a1, .LBB18_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: j .LBB18_3 ; RV32-NEXT: .LBB18_2: ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: .LBB18_3: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sd a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a5, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a5 ; RV32ZBA-NEXT: sub a2, a0, a2 ; RV32ZBA-NEXT: beq a3, a1, .LBB18_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: j .LBB18_3 ; RV32ZBA-NEXT: .LBB18_2: ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: .LBB18_3: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: sd a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: smulo.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulh a3, a0, a1 ; RV32-NEXT: mul a1, a0, a1 ; RV32-NEXT: srai a0, a1, 31 ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a1, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: mul a3, a0, a1 ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulh a3, a0, a1 ; RV32ZBA-NEXT: mul a1, a0, a1 ; RV32ZBA-NEXT: srai a0, a1, 31 ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: mul a3, a0, a1 ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: smulo2.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a2, 13 ; RV32-NEXT: mulh a3, a0, a2 ; RV32-NEXT: mul a2, a0, a2 ; RV32-NEXT: srai a0, a2, 31 ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: li a2, 13 ; RV64-NEXT: mul a3, a0, a2 ; RV64-NEXT: mulw a0, a0, a2 ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a2, 13 ; RV32ZBA-NEXT: mulh a3, a0, a2 ; RV32ZBA-NEXT: mul a2, a0, a2 ; RV32ZBA-NEXT: srai a0, a2, 31 ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sh1add a2, a0, a0 ; RV64ZBA-NEXT: sh2add a2, a2, a0 ; RV64ZBA-NEXT: sext.w a0, a2 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: smulo.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset s0, -4 ; RV32-NEXT: .cfi_offset s1, -8 ; RV32-NEXT: mulhu a5, a0, a2 ; RV32-NEXT: mul a6, a1, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: sltu a6, a5, a6 ; RV32-NEXT: mulhu a7, a1, a2 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: mul a7, a0, a3 ; RV32-NEXT: add a5, a7, a5 ; RV32-NEXT: sltu a7, a5, a7 ; RV32-NEXT: mulhu t0, a0, a3 ; RV32-NEXT: add a7, t0, a7 ; RV32-NEXT: add a7, a6, a7 ; RV32-NEXT: mul t0, a1, a3 ; RV32-NEXT: add t1, t0, a7 ; RV32-NEXT: srai t2, a1, 31 ; RV32-NEXT: mul t3, a2, t2 ; RV32-NEXT: srai t4, a3, 31 ; RV32-NEXT: mul t5, t4, a0 ; RV32-NEXT: add t6, t5, t3 ; RV32-NEXT: add s0, t1, t6 ; RV32-NEXT: sltu s1, s0, t1 ; RV32-NEXT: sltu t0, t1, t0 ; RV32-NEXT: sltu a6, a7, a6 ; RV32-NEXT: mulhu a7, a1, a3 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: add a6, a6, t0 ; RV32-NEXT: mulhu a7, a2, t2 ; RV32-NEXT: add a7, a7, t3 ; RV32-NEXT: mul a3, a3, t2 ; RV32-NEXT: add a3, a7, a3 ; RV32-NEXT: mul a1, t4, a1 ; RV32-NEXT: mulhu a7, t4, a0 ; RV32-NEXT: add a1, a7, a1 ; RV32-NEXT: add a1, a1, t5 ; RV32-NEXT: add a1, a1, a3 ; RV32-NEXT: sltu a3, t6, t5 ; RV32-NEXT: add a1, a1, a3 ; RV32-NEXT: add a1, a6, a1 ; RV32-NEXT: add a1, a1, s1 ; RV32-NEXT: srai a3, a5, 31 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: xor a3, s0, a3 ; RV32-NEXT: or a1, a3, a1 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: mul a0, a0, a2 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulh a3, a0, a1 ; RV64-NEXT: mul a1, a0, a1 ; RV64-NEXT: srai a0, a1, 63 ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi sp, sp, -16 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 16 ; RV32ZBA-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset s0, -4 ; RV32ZBA-NEXT: .cfi_offset s1, -8 ; RV32ZBA-NEXT: mulhu a5, a0, a2 ; RV32ZBA-NEXT: mul a6, a1, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: sltu a6, a5, a6 ; RV32ZBA-NEXT: mulhu a7, a1, a2 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: mul a7, a0, a3 ; RV32ZBA-NEXT: add a5, a7, a5 ; RV32ZBA-NEXT: sltu a7, a5, a7 ; RV32ZBA-NEXT: mulhu t0, a0, a3 ; RV32ZBA-NEXT: add a7, t0, a7 ; RV32ZBA-NEXT: add a7, a6, a7 ; RV32ZBA-NEXT: mul t0, a1, a3 ; RV32ZBA-NEXT: add t1, t0, a7 ; RV32ZBA-NEXT: srai t2, a1, 31 ; RV32ZBA-NEXT: mul t3, a2, t2 ; RV32ZBA-NEXT: srai t4, a3, 31 ; RV32ZBA-NEXT: mul t5, t4, a0 ; RV32ZBA-NEXT: add t6, t5, t3 ; RV32ZBA-NEXT: add s0, t1, t6 ; RV32ZBA-NEXT: sltu s1, s0, t1 ; RV32ZBA-NEXT: sltu t0, t1, t0 ; RV32ZBA-NEXT: sltu a6, a7, a6 ; RV32ZBA-NEXT: mulhu a7, a1, a3 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: add a6, a6, t0 ; RV32ZBA-NEXT: mulhu a7, a2, t2 ; RV32ZBA-NEXT: add a7, a7, t3 ; RV32ZBA-NEXT: mul a3, a3, t2 ; RV32ZBA-NEXT: add a3, a7, a3 ; RV32ZBA-NEXT: mul a1, t4, a1 ; RV32ZBA-NEXT: mulhu a7, t4, a0 ; RV32ZBA-NEXT: add a1, a7, a1 ; RV32ZBA-NEXT: add a1, a1, t5 ; RV32ZBA-NEXT: add a1, a1, a3 ; RV32ZBA-NEXT: sltu a3, t6, t5 ; RV32ZBA-NEXT: add a1, a1, a3 ; RV32ZBA-NEXT: add a1, a6, a1 ; RV32ZBA-NEXT: add a1, a1, s1 ; RV32ZBA-NEXT: srai a3, a5, 31 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: xor a3, s0, a3 ; RV32ZBA-NEXT: or a1, a3, a1 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: mul a0, a0, a2 ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulh a3, a0, a1 ; RV64ZBA-NEXT: mul a1, a0, a1 ; RV64ZBA-NEXT: srai a0, a1, 63 ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) { ; RV32-LABEL: smulo2.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a3, 13 ; RV32-NEXT: mulhu a4, a0, a3 ; RV32-NEXT: mul a5, a1, a3 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a5, a4, a5 ; RV32-NEXT: mulhu a6, a1, a3 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: srai a1, a1, 31 ; RV32-NEXT: mul a6, a1, a3 ; RV32-NEXT: add a6, a5, a6 ; RV32-NEXT: srai a7, a4, 31 ; RV32-NEXT: xor t0, a6, a7 ; RV32-NEXT: sltu a5, a6, a5 ; RV32-NEXT: mulh a1, a1, a3 ; RV32-NEXT: add a1, a1, a5 ; RV32-NEXT: xor a1, a1, a7 ; RV32-NEXT: or a1, t0, a1 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a2, 13 ; RV64-NEXT: mulh a3, a0, a2 ; RV64-NEXT: mul a2, a0, a2 ; RV64-NEXT: srai a0, a2, 63 ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a3, 13 ; RV32ZBA-NEXT: mulhu a4, a0, a3 ; RV32ZBA-NEXT: mul a5, a1, a3 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a5, a4, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a3 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: srai a1, a1, 31 ; RV32ZBA-NEXT: mul a6, a1, a3 ; RV32ZBA-NEXT: add a6, a5, a6 ; RV32ZBA-NEXT: srai a7, a4, 31 ; RV32ZBA-NEXT: xor t0, a6, a7 ; RV32ZBA-NEXT: sltu a5, a6, a5 ; RV32ZBA-NEXT: mulh a1, a1, a3 ; RV32ZBA-NEXT: add a1, a1, a5 ; RV32ZBA-NEXT: xor a1, a1, a7 ; RV32ZBA-NEXT: or a1, t0, a1 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: mul a0, a0, a3 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: li a2, 13 ; RV64ZBA-NEXT: mulh a3, a0, a2 ; RV64ZBA-NEXT: mul a2, a0, a2 ; RV64ZBA-NEXT: srai a0, a2, 63 ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: umulo.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a3, a0, a1 ; RV32-NEXT: snez a3, a3 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a1, a0, a1 ; RV64-NEXT: srli a0, a1, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a3, a0, a1 ; RV32ZBA-NEXT: snez a3, a3 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: zext.w a1, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a1, a0, a1 ; RV64ZBA-NEXT: srli a0, a1, 32 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) { ; RV32-LABEL: umulo2.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a3, 13 ; RV32-NEXT: mulhu a2, a0, a3 ; RV32-NEXT: snez a2, a2 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: sw a0, 0(a1) ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a2, 13 ; RV64-NEXT: slli a2, a2, 32 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a2, a0, a2 ; RV64-NEXT: srli a0, a2, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a3, 13 ; RV32ZBA-NEXT: mulhu a2, a0, a3 ; RV32ZBA-NEXT: snez a2, a2 ; RV32ZBA-NEXT: mul a0, a0, a3 ; RV32ZBA-NEXT: sw a0, 0(a1) ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: sh1add a2, a0, a0 ; RV64ZBA-NEXT: sh2add a2, a2, a0 ; RV64ZBA-NEXT: srli a0, a2, 32 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 store i32 %val, i32* %res ret i1 %obit } ; Similar to umulo.i32, but storing the overflow and returning the result. define signext i32 @umulo3.i32(i32 signext %0, i32 signext %1, i32* %2) { ; RV32-LABEL: umulo3.i32: ; RV32: # %bb.0: ; RV32-NEXT: mul a3, a0, a1 ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo3.i32: ; RV64: # %bb.0: ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a1, a0, 32 ; RV64-NEXT: snez a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo3.i32: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: mul a3, a0, a1 ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo3.i32: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: zext.w a1, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a3, a0, a1 ; RV64ZBA-NEXT: srli a3, a3, 32 ; RV64ZBA-NEXT: snez a3, a3 ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: sw a3, 0(a2) ; RV64ZBA-NEXT: ret %4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1) %5 = extractvalue { i32, i1 } %4, 1 %6 = extractvalue { i32, i1 } %4, 0 %7 = zext i1 %5 to i32 store i32 %7, i32* %2, align 4 ret i32 %6 } define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) { ; RV32-LABEL: umulo.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mul a5, a3, a0 ; RV32-NEXT: mul a6, a1, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: mulhu a6, a0, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: sltu a6, a5, a6 ; RV32-NEXT: snez a7, a3 ; RV32-NEXT: snez t0, a1 ; RV32-NEXT: and a7, t0, a7 ; RV32-NEXT: mulhu a1, a1, a2 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: or a1, a7, a1 ; RV32-NEXT: mulhu a3, a3, a0 ; RV32-NEXT: snez a3, a3 ; RV32-NEXT: or a1, a1, a3 ; RV32-NEXT: or a1, a1, a6 ; RV32-NEXT: mul a0, a0, a2 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a3, a0, a1 ; RV64-NEXT: snez a3, a3 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a3 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mul a5, a3, a0 ; RV32ZBA-NEXT: mul a6, a1, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: mulhu a6, a0, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: sltu a6, a5, a6 ; RV32ZBA-NEXT: snez a7, a3 ; RV32ZBA-NEXT: snez t0, a1 ; RV32ZBA-NEXT: and a7, t0, a7 ; RV32ZBA-NEXT: mulhu a1, a1, a2 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: or a1, a7, a1 ; RV32ZBA-NEXT: mulhu a3, a3, a0 ; RV32ZBA-NEXT: snez a3, a3 ; RV32ZBA-NEXT: or a1, a1, a3 ; RV32ZBA-NEXT: or a1, a1, a6 ; RV32ZBA-NEXT: mul a0, a0, a2 ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a3, a0, a1 ; RV64ZBA-NEXT: snez a3, a3 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: sd a0, 0(a2) ; RV64ZBA-NEXT: mv a0, a3 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) { ; RV32-LABEL: umulo2.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a3, 13 ; RV32-NEXT: mul a4, a1, a3 ; RV32-NEXT: mulhu a5, a0, a3 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a5, a4, a5 ; RV32-NEXT: mulhu a1, a1, a3 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: or a1, a1, a5 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a3, 13 ; RV64-NEXT: mulhu a2, a0, a3 ; RV64-NEXT: snez a2, a2 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: sd a0, 0(a1) ; RV64-NEXT: mv a0, a2 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a3, 13 ; RV32ZBA-NEXT: mul a4, a1, a3 ; RV32ZBA-NEXT: mulhu a5, a0, a3 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a5, a4, a5 ; RV32ZBA-NEXT: mulhu a1, a1, a3 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: or a1, a1, a5 ; RV32ZBA-NEXT: mul a0, a0, a3 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: li a3, 13 ; RV64ZBA-NEXT: mulhu a2, a0, a3 ; RV64ZBA-NEXT: snez a2, a2 ; RV64ZBA-NEXT: mul a0, a0, a3 ; RV64ZBA-NEXT: sd a0, 0(a1) ; RV64ZBA-NEXT: mv a0, a2 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } ; ; Check the use of the overflow bit in combination with a select instruction. ; define i32 @saddo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: saddo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a2, a2, a0 ; RV32-NEXT: slti a3, a1, 0 ; RV32-NEXT: bne a3, a2, .LBB28_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB28_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a2, a1 ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: addw a2, a3, a2 ; RV64-NEXT: bne a2, a4, .LBB28_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB28_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a2, a2, a0 ; RV32ZBA-NEXT: slti a3, a1, 0 ; RV32ZBA-NEXT: bne a3, a2, .LBB28_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB28_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a2, a1 ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: add a4, a3, a2 ; RV64ZBA-NEXT: addw a2, a3, a2 ; RV64ZBA-NEXT: bne a2, a4, .LBB28_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB28_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @saddo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: saddo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @saddo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: saddo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a4, a1, a3 ; RV32-NEXT: add a5, a0, a2 ; RV32-NEXT: sltu a5, a5, a0 ; RV32-NEXT: add a4, a4, a5 ; RV32-NEXT: xor a4, a1, a4 ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: not a5, a5 ; RV32-NEXT: and a4, a5, a4 ; RV32-NEXT: bltz a4, .LBB30_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB30_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a2, a2, a0 ; RV64-NEXT: slti a3, a1, 0 ; RV64-NEXT: bne a3, a2, .LBB30_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB30_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a4, a1, a3 ; RV32ZBA-NEXT: add a5, a0, a2 ; RV32ZBA-NEXT: sltu a5, a5, a0 ; RV32ZBA-NEXT: add a4, a4, a5 ; RV32ZBA-NEXT: xor a4, a1, a4 ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: not a5, a5 ; RV32ZBA-NEXT: and a4, a5, a4 ; RV32ZBA-NEXT: bltz a4, .LBB30_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB30_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a2, a2, a0 ; RV64ZBA-NEXT: slti a3, a1, 0 ; RV64ZBA-NEXT: bne a3, a2, .LBB30_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB30_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @saddo.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: saddo.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a4, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a0, a4, a0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: li a1, -1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a4, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a0, a4, a0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: li a1, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: xor a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i32 @uaddo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: uaddo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: bltu a2, a0, .LBB32_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB32_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: bltu a2, a3, .LBB32_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB32_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: bltu a2, a0, .LBB32_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB32_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: bltu a2, a3, .LBB32_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB32_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @uaddo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: uaddo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @uaddo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: uaddo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a5, a1, a3 ; RV32-NEXT: add a4, a0, a2 ; RV32-NEXT: sltu a4, a4, a0 ; RV32-NEXT: add a5, a5, a4 ; RV32-NEXT: bne a5, a1, .LBB34_3 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: beqz a4, .LBB34_4 ; RV32-NEXT: .LBB34_2: # %entry ; RV32-NEXT: ret ; RV32-NEXT: .LBB34_3: # %entry ; RV32-NEXT: sltu a4, a5, a1 ; RV32-NEXT: bnez a4, .LBB34_2 ; RV32-NEXT: .LBB34_4: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: bltu a2, a0, .LBB34_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB34_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a5, a1, a3 ; RV32ZBA-NEXT: add a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a4, a0 ; RV32ZBA-NEXT: add a5, a5, a4 ; RV32ZBA-NEXT: bne a5, a1, .LBB34_3 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: beqz a4, .LBB34_4 ; RV32ZBA-NEXT: .LBB34_2: # %entry ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB34_3: # %entry ; RV32ZBA-NEXT: sltu a4, a5, a1 ; RV32ZBA-NEXT: bnez a4, .LBB34_2 ; RV32ZBA-NEXT: .LBB34_4: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: bltu a2, a0, .LBB34_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB34_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @uaddo.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: uaddo.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a3, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 ; RV32-NEXT: beq a2, a1, .LBB35_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 ; RV32-NEXT: .LBB35_2: # %entry ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a3, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 ; RV32ZBA-NEXT: beq a2, a1, .LBB35_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 ; RV32ZBA-NEXT: .LBB35_2: # %entry ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i32 @ssubo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: ssubo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a3, a0, a1 ; RV32-NEXT: slt a3, a3, a0 ; RV32-NEXT: bne a2, a3, .LBB36_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB36_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a2, a1 ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: sub a4, a3, a2 ; RV64-NEXT: subw a2, a3, a2 ; RV64-NEXT: bne a2, a4, .LBB36_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB36_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a3, a0, a1 ; RV32ZBA-NEXT: slt a3, a3, a0 ; RV32ZBA-NEXT: bne a2, a3, .LBB36_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB36_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a2, a1 ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: sub a4, a3, a2 ; RV64ZBA-NEXT: subw a2, a3, a2 ; RV64ZBA-NEXT: bne a2, a4, .LBB36_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB36_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @ssubo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: ssubo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sub a2, a0, a1 ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sub a2, a0, a1 ; RV64ZBA-NEXT: subw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @ssubo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: ssubo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a5, a1, a3 ; RV32-NEXT: sub a4, a5, a4 ; RV32-NEXT: xor a4, a1, a4 ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: and a4, a5, a4 ; RV32-NEXT: bltz a4, .LBB38_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB38_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: slt a3, a3, a0 ; RV64-NEXT: bne a2, a3, .LBB38_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB38_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a5, a1, a3 ; RV32ZBA-NEXT: sub a4, a5, a4 ; RV32ZBA-NEXT: xor a4, a1, a4 ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: and a4, a5, a4 ; RV32ZBA-NEXT: bltz a4, .LBB38_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB38_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a3, a0, a1 ; RV64ZBA-NEXT: slt a3, a3, a0 ; RV64ZBA-NEXT: bne a2, a3, .LBB38_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB38_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @ssub.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: ssub.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sub a2, a1, a3 ; RV32-NEXT: sub a0, a2, a0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssub.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sub a2, a1, a3 ; RV32ZBA-NEXT: sub a0, a2, a0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssub.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: xor a0, a2, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i32 @usubo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: usubo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a2, a0, a1 ; RV32-NEXT: bltu a0, a2, .LBB40_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB40_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: bltu a3, a2, .LBB40_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB40_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a2, a0, a1 ; RV32ZBA-NEXT: bltu a0, a2, .LBB40_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB40_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: bltu a3, a2, .LBB40_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB40_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @usubo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: usubo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @usubo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: usubo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a5, a1, a3 ; RV32-NEXT: sub a4, a5, a4 ; RV32-NEXT: beq a4, a1, .LBB42_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a4, a1, a4 ; RV32-NEXT: beqz a4, .LBB42_3 ; RV32-NEXT: j .LBB42_4 ; RV32-NEXT: .LBB42_2: ; RV32-NEXT: sub a4, a0, a2 ; RV32-NEXT: sltu a4, a0, a4 ; RV32-NEXT: bnez a4, .LBB42_4 ; RV32-NEXT: .LBB42_3: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB42_4: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a2, a0, a1 ; RV64-NEXT: bltu a0, a2, .LBB42_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB42_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a5, a1, a3 ; RV32ZBA-NEXT: sub a4, a5, a4 ; RV32ZBA-NEXT: beq a4, a1, .LBB42_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a4, a1, a4 ; RV32ZBA-NEXT: beqz a4, .LBB42_3 ; RV32ZBA-NEXT: j .LBB42_4 ; RV32ZBA-NEXT: .LBB42_2: ; RV32ZBA-NEXT: sub a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a0, a4 ; RV32ZBA-NEXT: bnez a4, .LBB42_4 ; RV32ZBA-NEXT: .LBB42_3: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB42_4: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a2, a0, a1 ; RV64ZBA-NEXT: bltu a0, a2, .LBB42_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB42_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @usubo.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: usubo.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 ; RV32-NEXT: beq a3, a1, .LBB43_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; RV32-NEXT: .LBB43_2: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 ; RV32ZBA-NEXT: beq a3, a1, .LBB43_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB43_2: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i32 @smulo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: smulo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a3, a0, a1 ; RV32-NEXT: srai a3, a3, 31 ; RV32-NEXT: bne a2, a3, .LBB44_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB44_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a2, a1 ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: mul a4, a3, a2 ; RV64-NEXT: mulw a2, a3, a2 ; RV64-NEXT: bne a2, a4, .LBB44_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB44_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a3, a0, a1 ; RV32ZBA-NEXT: srai a3, a3, 31 ; RV32ZBA-NEXT: bne a2, a3, .LBB44_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB44_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a2, a1 ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: mul a4, a3, a2 ; RV64ZBA-NEXT: mulw a2, a3, a2 ; RV64ZBA-NEXT: bne a2, a4, .LBB44_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB44_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @smulo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: smulo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: mul a2, a0, a1 ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: srai a0, a0, 31 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: mul a2, a0, a1 ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @smulo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: smulo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset s0, -4 ; RV32-NEXT: mulhu a4, a0, a2 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a5, a4, a5 ; RV32-NEXT: mulhu a6, a1, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: mul a6, a0, a3 ; RV32-NEXT: add a4, a6, a4 ; RV32-NEXT: sltu a6, a4, a6 ; RV32-NEXT: mulhu a7, a0, a3 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: add a6, a5, a6 ; RV32-NEXT: mul a7, a1, a3 ; RV32-NEXT: add t0, a7, a6 ; RV32-NEXT: srai t1, a1, 31 ; RV32-NEXT: mul t2, a2, t1 ; RV32-NEXT: srai t3, a3, 31 ; RV32-NEXT: mul t4, t3, a0 ; RV32-NEXT: add t5, t4, t2 ; RV32-NEXT: add t6, t0, t5 ; RV32-NEXT: sltu s0, t6, t0 ; RV32-NEXT: sltu a7, t0, a7 ; RV32-NEXT: sltu a5, a6, a5 ; RV32-NEXT: mulhu a6, a1, a3 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: add a5, a5, a7 ; RV32-NEXT: mulhu a6, a2, t1 ; RV32-NEXT: add a6, a6, t2 ; RV32-NEXT: mul a7, a3, t1 ; RV32-NEXT: add a6, a6, a7 ; RV32-NEXT: mul a7, t3, a1 ; RV32-NEXT: mulhu t0, t3, a0 ; RV32-NEXT: add a7, t0, a7 ; RV32-NEXT: add a7, a7, t4 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: sltu a7, t5, t4 ; RV32-NEXT: add a6, a6, a7 ; RV32-NEXT: add a5, a5, a6 ; RV32-NEXT: add a5, a5, s0 ; RV32-NEXT: srai a4, a4, 31 ; RV32-NEXT: xor a5, a5, a4 ; RV32-NEXT: xor a4, t6, a4 ; RV32-NEXT: or a4, a4, a5 ; RV32-NEXT: bnez a4, .LBB46_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB46_2: # %entry ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a3, a0, a1 ; RV64-NEXT: srai a3, a3, 63 ; RV64-NEXT: bne a2, a3, .LBB46_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB46_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi sp, sp, -16 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 16 ; RV32ZBA-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset s0, -4 ; RV32ZBA-NEXT: mulhu a4, a0, a2 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a5, a4, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: mul a6, a0, a3 ; RV32ZBA-NEXT: add a4, a6, a4 ; RV32ZBA-NEXT: sltu a6, a4, a6 ; RV32ZBA-NEXT: mulhu a7, a0, a3 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: add a6, a5, a6 ; RV32ZBA-NEXT: mul a7, a1, a3 ; RV32ZBA-NEXT: add t0, a7, a6 ; RV32ZBA-NEXT: srai t1, a1, 31 ; RV32ZBA-NEXT: mul t2, a2, t1 ; RV32ZBA-NEXT: srai t3, a3, 31 ; RV32ZBA-NEXT: mul t4, t3, a0 ; RV32ZBA-NEXT: add t5, t4, t2 ; RV32ZBA-NEXT: add t6, t0, t5 ; RV32ZBA-NEXT: sltu s0, t6, t0 ; RV32ZBA-NEXT: sltu a7, t0, a7 ; RV32ZBA-NEXT: sltu a5, a6, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a3 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: add a5, a5, a7 ; RV32ZBA-NEXT: mulhu a6, a2, t1 ; RV32ZBA-NEXT: add a6, a6, t2 ; RV32ZBA-NEXT: mul a7, a3, t1 ; RV32ZBA-NEXT: add a6, a6, a7 ; RV32ZBA-NEXT: mul a7, t3, a1 ; RV32ZBA-NEXT: mulhu t0, t3, a0 ; RV32ZBA-NEXT: add a7, t0, a7 ; RV32ZBA-NEXT: add a7, a7, t4 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: sltu a7, t5, t4 ; RV32ZBA-NEXT: add a6, a6, a7 ; RV32ZBA-NEXT: add a5, a5, a6 ; RV32ZBA-NEXT: add a5, a5, s0 ; RV32ZBA-NEXT: srai a4, a4, 31 ; RV32ZBA-NEXT: xor a5, a5, a4 ; RV32ZBA-NEXT: xor a4, t6, a4 ; RV32ZBA-NEXT: or a4, a4, a5 ; RV32ZBA-NEXT: bnez a4, .LBB46_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB46_2: # %entry ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a3, a0, a1 ; RV64ZBA-NEXT: srai a3, a3, 63 ; RV64ZBA-NEXT: bne a2, a3, .LBB46_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB46_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @smulo.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: smulo.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset s0, -4 ; RV32-NEXT: mulhu a4, a0, a2 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a5, a4, a5 ; RV32-NEXT: mulhu a6, a1, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: mul a6, a0, a3 ; RV32-NEXT: add a4, a6, a4 ; RV32-NEXT: sltu a6, a4, a6 ; RV32-NEXT: mulhu a7, a0, a3 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: add a6, a5, a6 ; RV32-NEXT: mul a7, a1, a3 ; RV32-NEXT: add t0, a7, a6 ; RV32-NEXT: srai t1, a1, 31 ; RV32-NEXT: mul t2, a2, t1 ; RV32-NEXT: srai t3, a3, 31 ; RV32-NEXT: mul t4, t3, a0 ; RV32-NEXT: add t5, t4, t2 ; RV32-NEXT: add t6, t0, t5 ; RV32-NEXT: sltu s0, t6, t0 ; RV32-NEXT: sltu a7, t0, a7 ; RV32-NEXT: sltu a5, a6, a5 ; RV32-NEXT: mulhu a6, a1, a3 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: add a5, a5, a7 ; RV32-NEXT: mulhu a2, a2, t1 ; RV32-NEXT: add a2, a2, t2 ; RV32-NEXT: mul a3, a3, t1 ; RV32-NEXT: add a2, a2, a3 ; RV32-NEXT: mul a1, t3, a1 ; RV32-NEXT: mulhu a0, t3, a0 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, a0, t4 ; RV32-NEXT: add a0, a0, a2 ; RV32-NEXT: sltu a1, t5, t4 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, a5, a0 ; RV32-NEXT: add a0, a0, s0 ; RV32-NEXT: srai a1, a4, 31 ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t6, a1 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi sp, sp, -16 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 16 ; RV32ZBA-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset s0, -4 ; RV32ZBA-NEXT: mulhu a4, a0, a2 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a5, a4, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: mul a6, a0, a3 ; RV32ZBA-NEXT: add a4, a6, a4 ; RV32ZBA-NEXT: sltu a6, a4, a6 ; RV32ZBA-NEXT: mulhu a7, a0, a3 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: add a6, a5, a6 ; RV32ZBA-NEXT: mul a7, a1, a3 ; RV32ZBA-NEXT: add t0, a7, a6 ; RV32ZBA-NEXT: srai t1, a1, 31 ; RV32ZBA-NEXT: mul t2, a2, t1 ; RV32ZBA-NEXT: srai t3, a3, 31 ; RV32ZBA-NEXT: mul t4, t3, a0 ; RV32ZBA-NEXT: add t5, t4, t2 ; RV32ZBA-NEXT: add t6, t0, t5 ; RV32ZBA-NEXT: sltu s0, t6, t0 ; RV32ZBA-NEXT: sltu a7, t0, a7 ; RV32ZBA-NEXT: sltu a5, a6, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a3 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: add a5, a5, a7 ; RV32ZBA-NEXT: mulhu a2, a2, t1 ; RV32ZBA-NEXT: add a2, a2, t2 ; RV32ZBA-NEXT: mul a3, a3, t1 ; RV32ZBA-NEXT: add a2, a2, a3 ; RV32ZBA-NEXT: mul a1, t3, a1 ; RV32ZBA-NEXT: mulhu a0, t3, a0 ; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: add a0, a0, t4 ; RV32ZBA-NEXT: add a0, a0, a2 ; RV32ZBA-NEXT: sltu a1, t5, t4 ; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: add a0, a5, a0 ; RV32ZBA-NEXT: add a0, a0, s0 ; RV32ZBA-NEXT: srai a1, a4, 31 ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t6, a1 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: xor a0, a2, a0 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i32 @umulo.select.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: umulo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a2, a0, a1 ; RV32-NEXT: bnez a2, .LBB48_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB48_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: slli a2, a1, 32 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: mulhu a2, a3, a2 ; RV64-NEXT: srli a2, a2, 32 ; RV64-NEXT: bnez a2, .LBB48_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB48_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a2, a0, a1 ; RV32ZBA-NEXT: bnez a2, .LBB48_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB48_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: zext.w a2, a1 ; RV64ZBA-NEXT: zext.w a3, a0 ; RV64ZBA-NEXT: mul a2, a3, a2 ; RV64ZBA-NEXT: srli a2, a2, 32 ; RV64ZBA-NEXT: bnez a2, .LBB48_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB48_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = select i1 %obit, i32 %v1, i32 %v2 ret i32 %ret } define i1 @umulo.not.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: umulo.not.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: zext.w a1, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srli a0, a0, 32 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) %obit = extractvalue {i32, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } define i64 @umulo.select.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: umulo.select.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mul a4, a3, a0 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: mulhu a5, a0, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a4, a4, a5 ; RV32-NEXT: snez a5, a3 ; RV32-NEXT: snez a6, a1 ; RV32-NEXT: and a5, a6, a5 ; RV32-NEXT: mulhu a6, a1, a2 ; RV32-NEXT: snez a6, a6 ; RV32-NEXT: or a5, a5, a6 ; RV32-NEXT: mulhu a6, a3, a0 ; RV32-NEXT: snez a6, a6 ; RV32-NEXT: or a5, a5, a6 ; RV32-NEXT: or a4, a5, a4 ; RV32-NEXT: bnez a4, .LBB50_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB50_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a2, a0, a1 ; RV64-NEXT: bnez a2, .LBB50_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB50_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mul a4, a3, a0 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: mulhu a5, a0, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a4, a4, a5 ; RV32ZBA-NEXT: snez a5, a3 ; RV32ZBA-NEXT: snez a6, a1 ; RV32ZBA-NEXT: and a5, a6, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a2 ; RV32ZBA-NEXT: snez a6, a6 ; RV32ZBA-NEXT: or a5, a5, a6 ; RV32ZBA-NEXT: mulhu a6, a3, a0 ; RV32ZBA-NEXT: snez a6, a6 ; RV32ZBA-NEXT: or a5, a5, a6 ; RV32ZBA-NEXT: or a4, a5, a4 ; RV32ZBA-NEXT: bnez a4, .LBB50_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB50_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a2, a0, a1 ; RV64ZBA-NEXT: bnez a2, .LBB50_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB50_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = select i1 %obit, i64 %v1, i64 %v2 ret i64 %ret } define i1 @umulo.not.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: umulo.not.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mul a4, a3, a0 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: mulhu a5, a0, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a4, a4, a5 ; RV32-NEXT: snez a5, a3 ; RV32-NEXT: snez a6, a1 ; RV32-NEXT: and a5, a6, a5 ; RV32-NEXT: mulhu a1, a1, a2 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: or a1, a5, a1 ; RV32-NEXT: mulhu a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a4 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mul a4, a3, a0 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: mulhu a5, a0, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a4, a4, a5 ; RV32ZBA-NEXT: snez a5, a3 ; RV32ZBA-NEXT: snez a6, a1 ; RV32ZBA-NEXT: and a5, a6, a5 ; RV32ZBA-NEXT: mulhu a1, a1, a2 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: or a1, a5, a1 ; RV32ZBA-NEXT: mulhu a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a4 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a0, a0, a1 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) %obit = extractvalue {i64, i1} %t, 1 %ret = xor i1 %obit, true ret i1 %ret } ; ; Check the use of the overflow bit in combination with a branch instruction. ; define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: saddo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: beq a1, a0, .LBB52_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB52_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB52_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB52_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: beq a1, a0, .LBB52_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB52_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB52_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB52_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: saddo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a4, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a0, a4, a0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: bgez a0, .LBB53_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB53_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: beq a1, a0, .LBB53_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB53_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a4, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a0, a4, a0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: bgez a0, .LBB53_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB53_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: beq a1, a0, .LBB53_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB53_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: uaddo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: bgeu a1, a0, .LBB54_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB54_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: bgeu a1, a0, .LBB54_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB54_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: bgeu a1, a0, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB54_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB54_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB54_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: uaddo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a3, a1, a3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 ; RV32-NEXT: beq a2, a1, .LBB55_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 ; RV32-NEXT: .LBB55_2: # %entry ; RV32-NEXT: beqz a0, .LBB55_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB55_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: bgeu a1, a0, .LBB55_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB55_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a3, a1, a3 ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 ; RV32ZBA-NEXT: beq a2, a1, .LBB55_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 ; RV32ZBA-NEXT: .LBB55_2: # %entry ; RV32ZBA-NEXT: beqz a0, .LBB55_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB55_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB55_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB55_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: ssubo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: beq a2, a0, .LBB56_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB56_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sub a2, a0, a1 ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB56_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: beq a2, a0, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB56_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sub a2, a0, a1 ; RV64ZBA-NEXT: subw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB56_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB56_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: ssubo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sub a2, a1, a3 ; RV32-NEXT: sub a0, a2, a0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: bgez a0, .LBB57_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB57_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: beq a2, a0, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB57_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sub a2, a1, a3 ; RV32ZBA-NEXT: sub a0, a2, a0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: bgez a0, .LBB57_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB57_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: beq a2, a0, .LBB57_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB57_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: usubo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: bgeu a0, a1, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB58_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: bgeu a0, a1, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB58_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: bgeu a0, a1, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB58_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: bgeu a0, a1, .LBB58_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB58_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: usubo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 ; RV32-NEXT: beq a3, a1, .LBB59_3 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: bnez a0, .LBB59_4 ; RV32-NEXT: .LBB59_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; RV32-NEXT: .LBB59_3: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: beqz a0, .LBB59_2 ; RV32-NEXT: .LBB59_4: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: bgeu a0, a1, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB59_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 ; RV32ZBA-NEXT: beq a3, a1, .LBB59_3 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: bnez a0, .LBB59_4 ; RV32ZBA-NEXT: .LBB59_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB59_3: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: beqz a0, .LBB59_2 ; RV32ZBA-NEXT: .LBB59_4: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: bgeu a0, a1, .LBB59_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB59_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: smulo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: beq a2, a0, .LBB60_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB60_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: mul a2, a0, a1 ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: beq a0, a2, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB60_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: srai a0, a0, 31 ; RV32ZBA-NEXT: beq a2, a0, .LBB60_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB60_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a1, a1 ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: mul a2, a0, a1 ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: beq a0, a2, .LBB60_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB60_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: smulo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset s0, -4 ; RV32-NEXT: mulhu a4, a0, a2 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a5, a4, a5 ; RV32-NEXT: mulhu a6, a1, a2 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: mul a6, a0, a3 ; RV32-NEXT: add a4, a6, a4 ; RV32-NEXT: sltu a6, a4, a6 ; RV32-NEXT: mulhu a7, a0, a3 ; RV32-NEXT: add a6, a7, a6 ; RV32-NEXT: add a6, a5, a6 ; RV32-NEXT: mul a7, a1, a3 ; RV32-NEXT: add t0, a7, a6 ; RV32-NEXT: srai t1, a1, 31 ; RV32-NEXT: mul t2, a2, t1 ; RV32-NEXT: srai t3, a3, 31 ; RV32-NEXT: mul t4, t3, a0 ; RV32-NEXT: add t5, t4, t2 ; RV32-NEXT: add t6, t0, t5 ; RV32-NEXT: sltu s0, t6, t0 ; RV32-NEXT: sltu a7, t0, a7 ; RV32-NEXT: sltu a5, a6, a5 ; RV32-NEXT: mulhu a6, a1, a3 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: add a5, a5, a7 ; RV32-NEXT: mulhu a2, a2, t1 ; RV32-NEXT: add a2, a2, t2 ; RV32-NEXT: mul a3, a3, t1 ; RV32-NEXT: add a2, a2, a3 ; RV32-NEXT: mul a1, t3, a1 ; RV32-NEXT: mulhu a0, t3, a0 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, a0, t4 ; RV32-NEXT: add a0, a0, a2 ; RV32-NEXT: sltu a1, t5, t4 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: add a0, a5, a0 ; RV32-NEXT: add a0, a0, s0 ; RV32-NEXT: srai a1, a4, 31 ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t6, a1 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: beqz a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: j .LBB61_3 ; RV32-NEXT: .LBB61_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB61_3: # %overflow ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: beq a2, a0, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB61_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi sp, sp, -16 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 16 ; RV32ZBA-NEXT: sw s0, 12(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset s0, -4 ; RV32ZBA-NEXT: mulhu a4, a0, a2 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a5, a4, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a2 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: mul a6, a0, a3 ; RV32ZBA-NEXT: add a4, a6, a4 ; RV32ZBA-NEXT: sltu a6, a4, a6 ; RV32ZBA-NEXT: mulhu a7, a0, a3 ; RV32ZBA-NEXT: add a6, a7, a6 ; RV32ZBA-NEXT: add a6, a5, a6 ; RV32ZBA-NEXT: mul a7, a1, a3 ; RV32ZBA-NEXT: add t0, a7, a6 ; RV32ZBA-NEXT: srai t1, a1, 31 ; RV32ZBA-NEXT: mul t2, a2, t1 ; RV32ZBA-NEXT: srai t3, a3, 31 ; RV32ZBA-NEXT: mul t4, t3, a0 ; RV32ZBA-NEXT: add t5, t4, t2 ; RV32ZBA-NEXT: add t6, t0, t5 ; RV32ZBA-NEXT: sltu s0, t6, t0 ; RV32ZBA-NEXT: sltu a7, t0, a7 ; RV32ZBA-NEXT: sltu a5, a6, a5 ; RV32ZBA-NEXT: mulhu a6, a1, a3 ; RV32ZBA-NEXT: add a5, a6, a5 ; RV32ZBA-NEXT: add a5, a5, a7 ; RV32ZBA-NEXT: mulhu a2, a2, t1 ; RV32ZBA-NEXT: add a2, a2, t2 ; RV32ZBA-NEXT: mul a3, a3, t1 ; RV32ZBA-NEXT: add a2, a2, a3 ; RV32ZBA-NEXT: mul a1, t3, a1 ; RV32ZBA-NEXT: mulhu a0, t3, a0 ; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: add a0, a0, t4 ; RV32ZBA-NEXT: add a0, a0, a2 ; RV32ZBA-NEXT: sltu a1, t5, t4 ; RV32ZBA-NEXT: add a0, a0, a1 ; RV32ZBA-NEXT: add a0, a5, a0 ; RV32ZBA-NEXT: add a0, a0, s0 ; RV32ZBA-NEXT: srai a1, a4, 31 ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t6, a1 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: beqz a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: j .LBB61_3 ; RV32ZBA-NEXT: .LBB61_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: .LBB61_3: # %overflow ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: beq a2, a0, .LBB61_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB61_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @smulo2.br.i64(i64 %v1) { ; RV32-LABEL: smulo2.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: li a2, -13 ; RV32-NEXT: mulhu a3, a0, a2 ; RV32-NEXT: mul a4, a1, a2 ; RV32-NEXT: add a3, a4, a3 ; RV32-NEXT: sltu a4, a3, a4 ; RV32-NEXT: mulhu a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sub a3, a3, a0 ; RV32-NEXT: neg a5, a0 ; RV32-NEXT: sltu a6, a3, a5 ; RV32-NEXT: li a7, -1 ; RV32-NEXT: mulhu t0, a0, a7 ; RV32-NEXT: add a6, t0, a6 ; RV32-NEXT: add a6, a4, a6 ; RV32-NEXT: sub t1, a6, a1 ; RV32-NEXT: srai t2, a1, 31 ; RV32-NEXT: mul t3, t2, a2 ; RV32-NEXT: sub t3, t3, a0 ; RV32-NEXT: add t4, t1, t3 ; RV32-NEXT: sltu t5, t4, t1 ; RV32-NEXT: neg t6, a1 ; RV32-NEXT: sltu t1, t1, t6 ; RV32-NEXT: sltu a4, a6, a4 ; RV32-NEXT: mulhu a6, a1, a7 ; RV32-NEXT: add a4, a6, a4 ; RV32-NEXT: add a4, a4, t1 ; RV32-NEXT: sltu a5, t3, a5 ; RV32-NEXT: mulh a2, t2, a2 ; RV32-NEXT: sub a0, t0, a0 ; RV32-NEXT: sub a0, a0, a1 ; RV32-NEXT: add a0, a0, a2 ; RV32-NEXT: add a0, a0, a5 ; RV32-NEXT: add a0, a4, a0 ; RV32-NEXT: add a0, a0, t5 ; RV32-NEXT: srai a1, a3, 31 ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t4, a1 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: beqz a0, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB62_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a1, -13 ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: beq a2, a0, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB62_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: li a2, -13 ; RV32ZBA-NEXT: mulhu a3, a0, a2 ; RV32ZBA-NEXT: mul a4, a1, a2 ; RV32ZBA-NEXT: add a3, a4, a3 ; RV32ZBA-NEXT: sltu a4, a3, a4 ; RV32ZBA-NEXT: mulhu a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sub a3, a3, a0 ; RV32ZBA-NEXT: neg a5, a0 ; RV32ZBA-NEXT: sltu a6, a3, a5 ; RV32ZBA-NEXT: li a7, -1 ; RV32ZBA-NEXT: mulhu t0, a0, a7 ; RV32ZBA-NEXT: add a6, t0, a6 ; RV32ZBA-NEXT: add a6, a4, a6 ; RV32ZBA-NEXT: sub t1, a6, a1 ; RV32ZBA-NEXT: srai t2, a1, 31 ; RV32ZBA-NEXT: mul t3, t2, a2 ; RV32ZBA-NEXT: sub t3, t3, a0 ; RV32ZBA-NEXT: add t4, t1, t3 ; RV32ZBA-NEXT: sltu t5, t4, t1 ; RV32ZBA-NEXT: neg t6, a1 ; RV32ZBA-NEXT: sltu t1, t1, t6 ; RV32ZBA-NEXT: sltu a4, a6, a4 ; RV32ZBA-NEXT: mulhu a6, a1, a7 ; RV32ZBA-NEXT: add a4, a6, a4 ; RV32ZBA-NEXT: add a4, a4, t1 ; RV32ZBA-NEXT: sltu a5, t3, a5 ; RV32ZBA-NEXT: mulh a2, t2, a2 ; RV32ZBA-NEXT: sub a0, t0, a0 ; RV32ZBA-NEXT: sub a0, a0, a1 ; RV32ZBA-NEXT: add a0, a0, a2 ; RV32ZBA-NEXT: add a0, a0, a5 ; RV32ZBA-NEXT: add a0, a4, a0 ; RV32ZBA-NEXT: add a0, a0, t5 ; RV32ZBA-NEXT: srai a1, a3, 31 ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t4, a1 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: beqz a0, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB62_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: li a1, -13 ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: beq a2, a0, .LBB62_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB62_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) { ; RV32-LABEL: umulo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: beqz a0, .LBB63_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB63_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: beqz a0, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB63_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: beqz a0, .LBB63_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB63_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: zext.w a1, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srli a0, a0, 32 ; RV64ZBA-NEXT: beqz a0, .LBB63_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB63_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) %val = extractvalue {i32, i1} %t, 0 %obit = extractvalue {i32, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) { ; RV32-LABEL: umulo.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mul a4, a3, a0 ; RV32-NEXT: mul a5, a1, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: mulhu a5, a0, a2 ; RV32-NEXT: add a4, a5, a4 ; RV32-NEXT: sltu a4, a4, a5 ; RV32-NEXT: snez a5, a3 ; RV32-NEXT: snez a6, a1 ; RV32-NEXT: and a5, a6, a5 ; RV32-NEXT: mulhu a1, a1, a2 ; RV32-NEXT: snez a1, a1 ; RV32-NEXT: or a1, a5, a1 ; RV32-NEXT: mulhu a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a4 ; RV32-NEXT: beqz a0, .LBB64_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB64_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: beqz a0, .LBB64_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB64_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mul a4, a3, a0 ; RV32ZBA-NEXT: mul a5, a1, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: mulhu a5, a0, a2 ; RV32ZBA-NEXT: add a4, a5, a4 ; RV32ZBA-NEXT: sltu a4, a4, a5 ; RV32ZBA-NEXT: snez a5, a3 ; RV32ZBA-NEXT: snez a6, a1 ; RV32ZBA-NEXT: and a5, a6, a5 ; RV32ZBA-NEXT: mulhu a1, a1, a2 ; RV32ZBA-NEXT: snez a1, a1 ; RV32ZBA-NEXT: or a1, a5, a1 ; RV32ZBA-NEXT: mulhu a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a4 ; RV32ZBA-NEXT: beqz a0, .LBB64_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB64_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a0, a0, a1 ; RV64ZBA-NEXT: beqz a0, .LBB64_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB64_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @umulo2.br.i64(i64 %v1) { ; RV32-LABEL: umulo2.br.i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a0 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a1, a1 ; RV32-NEXT: add a2, a2, a0 ; RV32-NEXT: beq a2, a1, .LBB65_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 ; RV32-NEXT: .LBB65_2: # %entry ; RV32-NEXT: beqz a0, .LBB65_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB65_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a0 ; RV64-NEXT: bgeu a1, a0, .LBB65_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB65_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.br.i64: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a0 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a1, a1 ; RV32ZBA-NEXT: add a2, a2, a0 ; RV32ZBA-NEXT: beq a2, a1, .LBB65_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 ; RV32ZBA-NEXT: .LBB65_2: # %entry ; RV32ZBA-NEXT: beqz a0, .LBB65_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB65_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a0 ; RV64ZBA-NEXT: bgeu a1, a0, .LBB65_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB65_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %overflow, label %continue overflow: ret i1 false continue: ret i1 true } define zeroext i1 @uaddo.i64.constant(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 ; RV32-NEXT: addi a4, a0, 2 ; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB66_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 ; RV32-NEXT: .LBB66_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64.constant: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, 2 ; RV64-NEXT: sltu a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64.constant: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 ; RV32ZBA-NEXT: addi a4, a0, 2 ; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB66_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 ; RV32ZBA-NEXT: .LBB66_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64.constant: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, 2 ; RV64ZBA-NEXT: sltu a0, a2, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant_2048: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 ; RV32-NEXT: addi a0, a0, 2047 ; RV32-NEXT: addi a4, a0, 1 ; RV32-NEXT: sltu a0, a4, a3 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB67_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 ; RV32-NEXT: .LBB67_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64.constant_2048: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, 2047 ; RV64-NEXT: addi a2, a2, 1 ; RV64-NEXT: sltu a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64.constant_2048: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 ; RV32ZBA-NEXT: addi a0, a0, 2047 ; RV32ZBA-NEXT: addi a4, a0, 1 ; RV32ZBA-NEXT: sltu a0, a4, a3 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB67_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 ; RV32ZBA-NEXT: .LBB67_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64.constant_2048: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, 2047 ; RV64ZBA-NEXT: addi a2, a2, 1 ; RV64ZBA-NEXT: sltu a0, a2, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2048) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant_2049: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 ; RV32-NEXT: addi a0, a0, 2047 ; RV32-NEXT: addi a4, a0, 2 ; RV32-NEXT: sltu a0, a4, a3 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB68_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 ; RV32-NEXT: .LBB68_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64.constant_2049: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addi a2, a0, 2047 ; RV64-NEXT: addi a2, a2, 2 ; RV64-NEXT: sltu a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64.constant_2049: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 ; RV32ZBA-NEXT: addi a0, a0, 2047 ; RV32ZBA-NEXT: addi a4, a0, 2 ; RV32ZBA-NEXT: sltu a0, a4, a3 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB68_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 ; RV32ZBA-NEXT: .LBB68_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64.constant_2049: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addi a2, a0, 2047 ; RV64ZBA-NEXT: addi a2, a2, 2 ; RV64ZBA-NEXT: sltu a0, a2, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2049) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 store i64 %val, i64* %res ret i1 %obit } define i64 @uaddo.i64.constant_setcc_on_overflow_flag(i64* %p) { ; RV32-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: ; RV32: # %bb.0: # %entry ; RV32-NEXT: lw a4, 0(a0) ; RV32-NEXT: lw a3, 4(a0) ; RV32-NEXT: addi a0, a4, 2 ; RV32-NEXT: sltu a2, a0, a4 ; RV32-NEXT: add a1, a3, a2 ; RV32-NEXT: bltu a0, a4, .LBB69_3 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: beqz a2, .LBB69_4 ; RV32-NEXT: .LBB69_2: # %IfNoOverflow ; RV32-NEXT: ret ; RV32-NEXT: .LBB69_3: # %entry ; RV32-NEXT: sltu a2, a1, a3 ; RV32-NEXT: bnez a2, .LBB69_2 ; RV32-NEXT: .LBB69_4: # %IfOverflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: ; RV64: # %bb.0: # %entry ; RV64-NEXT: ld a1, 0(a0) ; RV64-NEXT: addi a0, a1, 2 ; RV64-NEXT: bltu a0, a1, .LBB69_2 ; RV64-NEXT: # %bb.1: # %IfOverflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: .LBB69_2: # %IfNoOverflow ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: lw a4, 0(a0) ; RV32ZBA-NEXT: lw a3, 4(a0) ; RV32ZBA-NEXT: addi a0, a4, 2 ; RV32ZBA-NEXT: sltu a2, a0, a4 ; RV32ZBA-NEXT: add a1, a3, a2 ; RV32ZBA-NEXT: bltu a0, a4, .LBB69_3 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: beqz a2, .LBB69_4 ; RV32ZBA-NEXT: .LBB69_2: # %IfNoOverflow ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB69_3: # %entry ; RV32ZBA-NEXT: sltu a2, a1, a3 ; RV32ZBA-NEXT: bnez a2, .LBB69_2 ; RV32ZBA-NEXT: .LBB69_4: # %IfOverflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: li a1, 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: ld a1, 0(a0) ; RV64ZBA-NEXT: addi a0, a1, 2 ; RV64ZBA-NEXT: bltu a0, a1, .LBB69_2 ; RV64ZBA-NEXT: # %bb.1: # %IfOverflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: .LBB69_2: # %IfNoOverflow ; RV64ZBA-NEXT: ret entry: %v1 = load i64, i64* %p %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2) %val = extractvalue {i64, i1} %t, 0 %obit = extractvalue {i64, i1} %t, 1 br i1 %obit, label %IfNoOverflow, label %IfOverflow IfOverflow: ret i64 0 IfNoOverflow: ret i64 %val } declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone