; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=ilp32f \ ; RUN: | FileCheck -check-prefix=CHECKIF %s ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64f \ ; RUN: | FileCheck -check-prefix=CHECKIF %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_oeq: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: feq.s a0, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_oeq: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ogt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a1 ; CHECKIF-NEXT: flt.s a0, fa1, fa0 ; CHECKIF-NEXT: fsflags a1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ogt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: sgtz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_oge(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_oge: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a1 ; CHECKIF-NEXT: fle.s a0, fa1, fa0 ; CHECKIF-NEXT: fsflags a1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_oge: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_olt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_olt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a1 ; CHECKIF-NEXT: flt.s a0, fa0, fa1 ; CHECKIF-NEXT: fsflags a1 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_olt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_ole(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ole: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a1 ; CHECKIF-NEXT: fle.s a0, fa0, fa1 ; CHECKIF-NEXT: fsflags a1 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __lesf2@plt ; RV32I-NEXT: slti a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ole: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __lesf2@plt ; RV64I-NEXT: slti a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } ; FIXME: We only need one frflags before the two flts and one fsflags after the ; two flts. define i32 @fcmp_one(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_one: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a1, fa0, fa1 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a2, fa1, fa0 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: or a0, a2, a1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: snez s2, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_one: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: snez s2, a0 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_ord(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ord: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: feq.s a0, fa1, fa1 ; CHECKIF-NEXT: feq.s a1, fa0, fa0 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ord: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } ; FIXME: We only need one frflags before the two flts and one fsflags after the ; two flts. define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ueq: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a1, fa0, fa1 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a2, fa1, fa0 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: or a0, a2, a1 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: seqz s2, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: or a0, a0, s2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ueq: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: seqz s2, a0 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: or a0, a0, s2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ugt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: fle.s a1, fa0, fa1 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: xori a0, a1, 1 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __lesf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ugt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __lesf2@plt ; RV64I-NEXT: sgtz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_uge(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_uge: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a1, fa0, fa1 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: xori a0, a1, 1 ; CHECKIF-NEXT: feq.s zero, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_uge: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_ult(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ult: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: fle.s a1, fa1, fa0 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: xori a0, a1, 1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ult: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_ule(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_ule: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a1, fa1, fa0 ; CHECKIF-NEXT: fsflags a0 ; CHECKIF-NEXT: xori a0, a1, 1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: slti a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_ule: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: slti a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_une(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_une: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: feq.s a0, fa0, fa1 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __nesf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_une: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __nesf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmp_uno(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmp_uno: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: feq.s a0, fa1, fa1 ; CHECKIF-NEXT: feq.s a1, fa0, fa0 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmp_uno: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_oeq: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa0 ; CHECKIF-NEXT: fle.s a1, fa0, fa1 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_oeq: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ogt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ogt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: sgtz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_oge(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_oge: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_oge: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_olt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_olt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_olt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ole(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ole: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa0, fa1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __lesf2@plt ; RV32I-NEXT: slti a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ole: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __lesf2@plt ; RV64I-NEXT: slti a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_one(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_one: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa0, fa1 ; CHECKIF-NEXT: flt.s a1, fa1, fa0 ; CHECKIF-NEXT: or a0, a1, a0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_one: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: snez s2, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: and a0, a0, s2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_one: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: snez s2, a0 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ord(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ord: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa1 ; CHECKIF-NEXT: fle.s a1, fa0, fa0 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ord: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ueq: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa0, fa1 ; CHECKIF-NEXT: flt.s a1, fa1, fa0 ; CHECKIF-NEXT: or a0, a1, a0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: call __eqsf2@plt ; RV32I-NEXT: seqz s2, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: or a0, a0, s2 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ueq: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: call __eqsf2@plt ; RV64I-NEXT: seqz s2, a0 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: or a0, a0, s2 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ugt: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa0, fa1 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __lesf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ugt: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __lesf2@plt ; RV64I-NEXT: sgtz a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_uge(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_uge: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa0, fa1 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_uge: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ult(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ult: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ult: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt ; RV64I-NEXT: slti a0, a0, 0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_ule(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_ule: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: flt.s a0, fa1, fa0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: slti a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_ule: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: slti a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_une(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_une: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa0 ; CHECKIF-NEXT: fle.s a1, fa0, fa1 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_une: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __nesf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_une: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __nesf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 } define i32 @fcmps_uno(float %a, float %b) nounwind strictfp { ; CHECKIF-LABEL: fcmps_uno: ; CHECKIF: # %bb.0: ; CHECKIF-NEXT: fle.s a0, fa1, fa1 ; CHECKIF-NEXT: fle.s a1, fa0, fa0 ; CHECKIF-NEXT: and a0, a1, a0 ; CHECKIF-NEXT: xori a0, a0, 1 ; CHECKIF-NEXT: ret ; ; RV32I-LABEL: fcmps_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fcmps_uno: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 }