Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2  -O3 | FileCheck %s --check-prefixes=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c  -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f  -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -O3 | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -O3 | FileCheck %s --check-prefixes=X64

declare half @llvm.experimental.constrained.ceil.f16(half, metadata)
declare half @llvm.experimental.constrained.floor.f16(half, metadata)
declare half @llvm.experimental.constrained.trunc.f16(half, metadata)
declare half @llvm.experimental.constrained.rint.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.nearbyint.f16(half, metadata, metadata)
declare half @llvm.experimental.constrained.roundeven.f16(half, metadata)
declare half @llvm.experimental.constrained.round.f16(half, metadata)

define half @fceil32(half %f) #0 {
; SSE2-LABEL: fceil32:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq ceilf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: fceil32:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $10, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: fceil32:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: fceil32:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $10, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq
  %res = call half @llvm.experimental.constrained.ceil.f16(
                        half %f, metadata !"fpexcept.strict") #0
  ret half %res
}

define half @ffloor32(half %f) #0 {
; SSE2-LABEL: ffloor32:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq floorf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: ffloor32:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $9, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: ffloor32:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: ffloor32:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $9, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq
  %res = call half @llvm.experimental.constrained.floor.f16(
                        half %f, metadata !"fpexcept.strict") #0
  ret half %res
}

define half @ftrunc32(half %f) #0 {
; SSE2-LABEL: ftrunc32:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq truncf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: ftrunc32:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: ftrunc32:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: ftrunc32:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq
  %res = call half @llvm.experimental.constrained.trunc.f16(
                        half %f, metadata !"fpexcept.strict") #0
  ret half %res
}

define half @frint32(half %f) #0 {
; SSE2-LABEL: frint32:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq rintf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: frint32:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: frint32:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: frint32:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $4, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq
  %res = call half @llvm.experimental.constrained.rint.f16(
                        half %f,
                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret half %res
}

define half @fnearbyint32(half %f) #0 {
; SSE2-LABEL: fnearbyint32:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq nearbyintf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: fnearbyint32:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: fnearbyint32:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: fnearbyint32:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $12, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq
  %res = call half @llvm.experimental.constrained.nearbyint.f16(
                        half %f,
                        metadata !"round.dynamic", metadata !"fpexcept.strict") #0
  ret half %res
}

define half @froundeven16(half %f) #0 {
; SSE2-LABEL: froundeven16:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq roundevenf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: froundeven16:
; AVX:       # %bb.0:
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    vroundss $8, %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    retq
;
; X86-LABEL: froundeven16:
; X86:       # %bb.0:
; X86-NEXT:    vrndscalesh $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: froundeven16:
; X64:       # %bb.0:
; X64-NEXT:    vrndscalesh $8, %xmm0, %xmm0, %xmm0
; X64-NEXT:    retq

  %res = call half @llvm.experimental.constrained.roundeven.f16(
                        half %f, metadata !"fpexcept.strict") #0
  ret half %res
}

define half @fround16(half %f) #0 {
; SSE2-LABEL: fround16:
; SSE2:       # %bb.0:
; SSE2-NEXT:    pushq %rax
; SSE2-NEXT:    callq __extendhfsf2@PLT
; SSE2-NEXT:    callq roundf@PLT
; SSE2-NEXT:    callq __truncsfhf2@PLT
; SSE2-NEXT:    popq %rax
; SSE2-NEXT:    retq
;
; AVX-LABEL: fround16:
; AVX:       # %bb.0:
; AVX-NEXT:    pushq %rax
; AVX-NEXT:    vpextrw $0, %xmm0, %eax
; AVX-NEXT:    movzwl %ax, %eax
; AVX-NEXT:    vmovd %eax, %xmm0
; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
; AVX-NEXT:    callq roundf@PLT
; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; AVX-NEXT:    vmovd %xmm0, %eax
; AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
; AVX-NEXT:    popq %rax
; AVX-NEXT:    retq
;
; X86-LABEL: fround16:
; X86:       # %bb.0:
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; X86-NEXT:    vmovss %xmm0, (%esp)
; X86-NEXT:    calll roundf
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    wait
; X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    retl
;
; X64-LABEL: fround16:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rax
; X64-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; X64-NEXT:    callq roundf@PLT
; X64-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; X64-NEXT:    popq %rax
; X64-NEXT:    retq

  %res = call half @llvm.experimental.constrained.round.f16(
                        half %f, metadata !"fpexcept.strict") #0
  ret half %res
}

attributes #0 = { strictfp nounwind }