Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32IF
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64IF
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32-ILP32F
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64-LP64F
; RUN: llc -mtriple=riscv32 -mattr=+f,+zfhmin -target-abi=ilp32f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32-ILP32ZFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+f,+zfhmin -target-abi=lp64f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64-LP64ZFHMIN

; Tests passing half arguments and returns without Zfh.
; Covers with and without F extension and ilp32f/ilp64f
; calling conventions.

define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV32I-LABEL: callee_half_in_regs:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT:    mv s0, a0
; RV32I-NEXT:    slli a0, a1, 16
; RV32I-NEXT:    srli a0, a0, 16
; RV32I-NEXT:    call __extendhfsf2@plt
; RV32I-NEXT:    call __fixsfsi@plt
; RV32I-NEXT:    add a0, s0, a0
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: callee_half_in_regs:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT:    mv s0, a0
; RV64I-NEXT:    slli a0, a1, 48
; RV64I-NEXT:    srli a0, a0, 48
; RV64I-NEXT:    call __extendhfsf2@plt
; RV64I-NEXT:    call __fixsfdi@plt
; RV64I-NEXT:    addw a0, s0, a0
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: callee_half_in_regs:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    mv s0, a0
; RV32IF-NEXT:    mv a0, a1
; RV32IF-NEXT:    call __extendhfsf2@plt
; RV32IF-NEXT:    fmv.w.x ft0, a0
; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT:    add a0, s0, a0
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: callee_half_in_regs:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    mv s0, a0
; RV64IF-NEXT:    mv a0, a1
; RV64IF-NEXT:    call __extendhfsf2@plt
; RV64IF-NEXT:    fmv.w.x ft0, a0
; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT:    addw a0, s0, a0
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: callee_half_in_regs:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    addi sp, sp, -16
; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    mv s0, a0
; RV32-ILP32F-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32F-NEXT:    call __extendhfsf2@plt
; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT:    add a0, s0, a0
; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    addi sp, sp, 16
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: callee_half_in_regs:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    addi sp, sp, -16
; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    mv s0, a0
; RV64-LP64F-NEXT:    fmv.x.w a0, fa0
; RV64-LP64F-NEXT:    call __extendhfsf2@plt
; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT:    addw a0, s0, a0
; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    addi sp, sp, 16
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: callee_half_in_regs:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    mv s0, a0
; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32ZFHMIN-NEXT:    add a0, s0, a0
; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: callee_half_in_regs:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    mv s0, a0
; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV64-LP64ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64ZFHMIN-NEXT:    addw a0, s0, a0
; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT:    ret
  %b_fptosi = fptosi half %b to i32
  %1 = add i32 %a, %b_fptosi
  ret i32 %1
}

define i32 @caller_half_in_regs() nounwind {
; RV32I-LABEL: caller_half_in_regs:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    li a0, 1
; RV32I-NEXT:    lui a1, 4
; RV32I-NEXT:    call callee_half_in_regs@plt
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: caller_half_in_regs:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    li a0, 1
; RV64I-NEXT:    lui a1, 4
; RV64I-NEXT:    call callee_half_in_regs@plt
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: caller_half_in_regs:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    li a0, 1
; RV32IF-NEXT:    lui a1, 1048564
; RV32IF-NEXT:    call callee_half_in_regs@plt
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: caller_half_in_regs:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    lui a0, %hi(.LCPI1_0)
; RV64IF-NEXT:    flw ft0, %lo(.LCPI1_0)(a0)
; RV64IF-NEXT:    fmv.x.w a1, ft0
; RV64IF-NEXT:    li a0, 1
; RV64IF-NEXT:    call callee_half_in_regs@plt
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: caller_half_in_regs:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    addi sp, sp, -16
; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    lui a0, %hi(.LCPI1_0)
; RV32-ILP32F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
; RV32-ILP32F-NEXT:    li a0, 1
; RV32-ILP32F-NEXT:    call callee_half_in_regs@plt
; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    addi sp, sp, 16
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: caller_half_in_regs:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    addi sp, sp, -16
; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI1_0)
; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
; RV64-LP64F-NEXT:    li a0, 1
; RV64-LP64F-NEXT:    call callee_half_in_regs@plt
; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    addi sp, sp, 16
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: caller_half_in_regs:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
; RV32-ILP32ZFHMIN-NEXT:    li a0, 1
; RV32-ILP32ZFHMIN-NEXT:    call callee_half_in_regs@plt
; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: caller_half_in_regs:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
; RV64-LP64ZFHMIN-NEXT:    li a0, 1
; RV64-LP64ZFHMIN-NEXT:    call callee_half_in_regs@plt
; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT:    ret
  %1 = call i32 @callee_half_in_regs(i32 1, half 2.0)
  ret i32 %1
}

define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
; RV32I-LABEL: callee_half_on_stack:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT:    lhu a0, 16(sp)
; RV32I-NEXT:    mv s0, a7
; RV32I-NEXT:    call __extendhfsf2@plt
; RV32I-NEXT:    call __fixsfsi@plt
; RV32I-NEXT:    add a0, s0, a0
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: callee_half_on_stack:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT:    lhu a0, 16(sp)
; RV64I-NEXT:    mv s0, a7
; RV64I-NEXT:    call __extendhfsf2@plt
; RV64I-NEXT:    call __fixsfdi@plt
; RV64I-NEXT:    addw a0, s0, a0
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: callee_half_on_stack:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    lhu a0, 16(sp)
; RV32IF-NEXT:    mv s0, a7
; RV32IF-NEXT:    call __extendhfsf2@plt
; RV32IF-NEXT:    fmv.w.x ft0, a0
; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT:    add a0, s0, a0
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: callee_half_on_stack:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    lhu a0, 16(sp)
; RV64IF-NEXT:    mv s0, a7
; RV64IF-NEXT:    call __extendhfsf2@plt
; RV64IF-NEXT:    fmv.w.x ft0, a0
; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT:    addw a0, s0, a0
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: callee_half_on_stack:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    addi sp, sp, -16
; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    mv s0, a7
; RV32-ILP32F-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32F-NEXT:    call __extendhfsf2@plt
; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT:    add a0, s0, a0
; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    addi sp, sp, 16
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: callee_half_on_stack:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    addi sp, sp, -16
; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    mv s0, a7
; RV64-LP64F-NEXT:    fmv.x.w a0, fa0
; RV64-LP64F-NEXT:    call __extendhfsf2@plt
; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT:    addw a0, s0, a0
; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    addi sp, sp, 16
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: callee_half_on_stack:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    mv s0, a7
; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32ZFHMIN-NEXT:    add a0, s0, a0
; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: callee_half_on_stack:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    mv s0, a7
; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV64-LP64ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64ZFHMIN-NEXT:    addw a0, s0, a0
; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT:    ret
  %1 = fptosi half %i to i32
  %2 = add i32 %h, %1
  ret i32 %2
}

define i32 @caller_half_on_stack() nounwind {
; RV32I-LABEL: caller_half_on_stack:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    lui a0, 5
; RV32I-NEXT:    addi t0, a0, -1792
; RV32I-NEXT:    li a0, 1
; RV32I-NEXT:    li a1, 2
; RV32I-NEXT:    li a2, 3
; RV32I-NEXT:    li a3, 4
; RV32I-NEXT:    li a4, 5
; RV32I-NEXT:    li a5, 6
; RV32I-NEXT:    li a6, 7
; RV32I-NEXT:    li a7, 8
; RV32I-NEXT:    sw t0, 0(sp)
; RV32I-NEXT:    call callee_half_on_stack@plt
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: caller_half_on_stack:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    lui a0, 5
; RV64I-NEXT:    addiw t0, a0, -1792
; RV64I-NEXT:    li a0, 1
; RV64I-NEXT:    li a1, 2
; RV64I-NEXT:    li a2, 3
; RV64I-NEXT:    li a3, 4
; RV64I-NEXT:    li a4, 5
; RV64I-NEXT:    li a5, 6
; RV64I-NEXT:    li a6, 7
; RV64I-NEXT:    li a7, 8
; RV64I-NEXT:    sd t0, 0(sp)
; RV64I-NEXT:    call callee_half_on_stack@plt
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: caller_half_on_stack:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    lui a0, 1048565
; RV32IF-NEXT:    addi t0, a0, -1792
; RV32IF-NEXT:    li a0, 1
; RV32IF-NEXT:    li a1, 2
; RV32IF-NEXT:    li a2, 3
; RV32IF-NEXT:    li a3, 4
; RV32IF-NEXT:    li a4, 5
; RV32IF-NEXT:    li a5, 6
; RV32IF-NEXT:    li a6, 7
; RV32IF-NEXT:    li a7, 8
; RV32IF-NEXT:    sw t0, 0(sp)
; RV32IF-NEXT:    call callee_half_on_stack@plt
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: caller_half_on_stack:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    lui a0, 1048565
; RV64IF-NEXT:    addiw t0, a0, -1792
; RV64IF-NEXT:    li a0, 1
; RV64IF-NEXT:    li a1, 2
; RV64IF-NEXT:    li a2, 3
; RV64IF-NEXT:    li a3, 4
; RV64IF-NEXT:    li a4, 5
; RV64IF-NEXT:    li a5, 6
; RV64IF-NEXT:    li a6, 7
; RV64IF-NEXT:    li a7, 8
; RV64IF-NEXT:    sw t0, 0(sp)
; RV64IF-NEXT:    call callee_half_on_stack@plt
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: caller_half_on_stack:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    addi sp, sp, -16
; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    lui a0, %hi(.LCPI3_0)
; RV32-ILP32F-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
; RV32-ILP32F-NEXT:    li a0, 1
; RV32-ILP32F-NEXT:    li a1, 2
; RV32-ILP32F-NEXT:    li a2, 3
; RV32-ILP32F-NEXT:    li a3, 4
; RV32-ILP32F-NEXT:    li a4, 5
; RV32-ILP32F-NEXT:    li a5, 6
; RV32-ILP32F-NEXT:    li a6, 7
; RV32-ILP32F-NEXT:    li a7, 8
; RV32-ILP32F-NEXT:    call callee_half_on_stack@plt
; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    addi sp, sp, 16
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: caller_half_on_stack:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    addi sp, sp, -16
; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI3_0)
; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
; RV64-LP64F-NEXT:    li a0, 1
; RV64-LP64F-NEXT:    li a1, 2
; RV64-LP64F-NEXT:    li a2, 3
; RV64-LP64F-NEXT:    li a3, 4
; RV64-LP64F-NEXT:    li a4, 5
; RV64-LP64F-NEXT:    li a5, 6
; RV64-LP64F-NEXT:    li a6, 7
; RV64-LP64F-NEXT:    li a7, 8
; RV64-LP64F-NEXT:    call callee_half_on_stack@plt
; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    addi sp, sp, 16
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: caller_half_on_stack:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
; RV32-ILP32ZFHMIN-NEXT:    li a0, 1
; RV32-ILP32ZFHMIN-NEXT:    li a1, 2
; RV32-ILP32ZFHMIN-NEXT:    li a2, 3
; RV32-ILP32ZFHMIN-NEXT:    li a3, 4
; RV32-ILP32ZFHMIN-NEXT:    li a4, 5
; RV32-ILP32ZFHMIN-NEXT:    li a5, 6
; RV32-ILP32ZFHMIN-NEXT:    li a6, 7
; RV32-ILP32ZFHMIN-NEXT:    li a7, 8
; RV32-ILP32ZFHMIN-NEXT:    call callee_half_on_stack@plt
; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: caller_half_on_stack:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
; RV64-LP64ZFHMIN-NEXT:    li a0, 1
; RV64-LP64ZFHMIN-NEXT:    li a1, 2
; RV64-LP64ZFHMIN-NEXT:    li a2, 3
; RV64-LP64ZFHMIN-NEXT:    li a3, 4
; RV64-LP64ZFHMIN-NEXT:    li a4, 5
; RV64-LP64ZFHMIN-NEXT:    li a5, 6
; RV64-LP64ZFHMIN-NEXT:    li a6, 7
; RV64-LP64ZFHMIN-NEXT:    li a7, 8
; RV64-LP64ZFHMIN-NEXT:    call callee_half_on_stack@plt
; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT:    ret
  %1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0)
  ret i32 %1
}

define half @callee_half_ret() nounwind {
; RV32I-LABEL: callee_half_ret:
; RV32I:       # %bb.0:
; RV32I-NEXT:    lui a0, 4
; RV32I-NEXT:    addi a0, a0, -1024
; RV32I-NEXT:    ret
;
; RV64I-LABEL: callee_half_ret:
; RV64I:       # %bb.0:
; RV64I-NEXT:    lui a0, 4
; RV64I-NEXT:    addiw a0, a0, -1024
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: callee_half_ret:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    lui a0, 1048564
; RV32IF-NEXT:    addi a0, a0, -1024
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: callee_half_ret:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    lui a0, %hi(.LCPI4_0)
; RV64IF-NEXT:    flw ft0, %lo(.LCPI4_0)(a0)
; RV64IF-NEXT:    fmv.x.w a0, ft0
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: callee_half_ret:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    lui a0, %hi(.LCPI4_0)
; RV32-ILP32F-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: callee_half_ret:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI4_0)
; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: callee_half_ret:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI4_0)
; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: callee_half_ret:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI4_0)
; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
; RV64-LP64ZFHMIN-NEXT:    ret
  ret half 1.0
}

define i32 @caller_half_ret() nounwind {
; RV32I-LABEL: caller_half_ret:
; RV32I:       # %bb.0:
; RV32I-NEXT:    addi sp, sp, -16
; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT:    call callee_half_ret@plt
; RV32I-NEXT:    slli a0, a0, 16
; RV32I-NEXT:    srli a0, a0, 16
; RV32I-NEXT:    call __extendhfsf2@plt
; RV32I-NEXT:    call __fixsfsi@plt
; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT:    addi sp, sp, 16
; RV32I-NEXT:    ret
;
; RV64I-LABEL: caller_half_ret:
; RV64I:       # %bb.0:
; RV64I-NEXT:    addi sp, sp, -16
; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT:    call callee_half_ret@plt
; RV64I-NEXT:    slli a0, a0, 48
; RV64I-NEXT:    srli a0, a0, 48
; RV64I-NEXT:    call __extendhfsf2@plt
; RV64I-NEXT:    call __fixsfdi@plt
; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT:    addi sp, sp, 16
; RV64I-NEXT:    ret
;
; RV32IF-LABEL: caller_half_ret:
; RV32IF:       # %bb.0:
; RV32IF-NEXT:    addi sp, sp, -16
; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT:    call callee_half_ret@plt
; RV32IF-NEXT:    call __extendhfsf2@plt
; RV32IF-NEXT:    fmv.w.x ft0, a0
; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT:    addi sp, sp, 16
; RV32IF-NEXT:    ret
;
; RV64IF-LABEL: caller_half_ret:
; RV64IF:       # %bb.0:
; RV64IF-NEXT:    addi sp, sp, -16
; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT:    call callee_half_ret@plt
; RV64IF-NEXT:    call __extendhfsf2@plt
; RV64IF-NEXT:    fmv.w.x ft0, a0
; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT:    addi sp, sp, 16
; RV64IF-NEXT:    ret
;
; RV32-ILP32F-LABEL: caller_half_ret:
; RV32-ILP32F:       # %bb.0:
; RV32-ILP32F-NEXT:    addi sp, sp, -16
; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT:    call callee_half_ret@plt
; RV32-ILP32F-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32F-NEXT:    call __extendhfsf2@plt
; RV32-ILP32F-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT:    addi sp, sp, 16
; RV32-ILP32F-NEXT:    ret
;
; RV64-LP64F-LABEL: caller_half_ret:
; RV64-LP64F:       # %bb.0:
; RV64-LP64F-NEXT:    addi sp, sp, -16
; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT:    call callee_half_ret@plt
; RV64-LP64F-NEXT:    fmv.x.w a0, fa0
; RV64-LP64F-NEXT:    call __extendhfsf2@plt
; RV64-LP64F-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT:    addi sp, sp, 16
; RV64-LP64F-NEXT:    ret
;
; RV32-ILP32ZFHMIN-LABEL: caller_half_ret:
; RV32-ILP32ZFHMIN:       # %bb.0:
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32ZFHMIN-NEXT:    call callee_half_ret@plt
; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV32-ILP32ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT:    ret
;
; RV64-LP64ZFHMIN-LABEL: caller_half_ret:
; RV64-LP64ZFHMIN:       # %bb.0:
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64ZFHMIN-NEXT:    call callee_half_ret@plt
; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
; RV64-LP64ZFHMIN-NEXT:    call __extendhfsf2@plt
; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT:    ret
  %1 = call half @callee_half_ret()
  %2 = fptosi half %1 to i32
  ret i32 %2
}