Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVE
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVEFP

define arm_aapcs_vfpcc <4 x float> @vcmp_oeq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, eq
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 eq, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oeq <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_one_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r1, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r2, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r3, #1
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp one <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, gt
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ogt <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, ge
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oge <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, mi
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp olt <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, ls
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ole <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ueq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #1
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ueq <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_une_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_une_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ne, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp une <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, hi
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ugt <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, pl
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uge <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, lt
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ult <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, le
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ule <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    cset r1, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    cset r2, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, vc
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ord <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    cset r1, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    cset r2, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, vs
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uno <4 x float> %src, zeroinitializer
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}



define arm_aapcs_vfpcc <8 x half> @vcmp_oeq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 eq, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oeq <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_one_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp one <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ogt <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oge <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp olt <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ole <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ueq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ueq <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_une_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_une_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s4
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s13, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s13, s14
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s5
; CHECK-MVE-NEXT:    vmovx.f16 s12, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s12, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s6
; CHECK-MVE-NEXT:    vmovx.f16 s12, s10
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s12, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s11
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s7
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ne, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp une <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ugt <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uge <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ult <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ule <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, s12
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, s0
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, s1
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, s2
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, s3
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ord <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, s12
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, s0
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, s1
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, s2
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, s3
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uno <8 x half> %src, zeroinitializer
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}


; Reversed

define arm_aapcs_vfpcc <4 x float> @vcmp_r_oeq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, eq
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 eq, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oeq <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_one_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_one_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r1, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r2, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r3, #1
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp one <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ogt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, mi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, mi
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ogt <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_oge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, ls
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, ls
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oge <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_olt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, gt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, gt
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp olt <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ole_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, ge
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, ge
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ole <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ueq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r1, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r2, #1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r3, #1
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ueq <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_une_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_une_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ne, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp une <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ugt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, lt
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, lt
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ugt <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_uge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, le
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, le
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uge <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ult_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, hi
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, hi
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ult <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ule_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, #0
; CHECK-MVE-NEXT:    cset r1, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, #0
; CHECK-MVE-NEXT:    cset r2, pl
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, pl
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ule <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_ord_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    cset r1, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    cset r2, vc
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, vc
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ord <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}

define arm_aapcs_vfpcc <4 x float> @vcmp_r_uno_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vcmp.f32 s1, s1
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s0, s0
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s3, s3
; CHECK-MVE-NEXT:    cset r1, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f32 s2, s2
; CHECK-MVE-NEXT:    cset r2, vs
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r3, vs
; CHECK-MVE-NEXT:    cmp r2, #0
; CHECK-MVE-NEXT:    vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT:    cmp r3, #0
; CHECK-MVE-NEXT:    vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT:    cmp r1, #0
; CHECK-MVE-NEXT:    vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f32 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uno <4 x float> zeroinitializer, %src
  %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
  ret <4 x float> %s
}



define arm_aapcs_vfpcc <8 x half> @vcmp_r_oeq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 eq, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oeq <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_one_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_one_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    it gt
; CHECK-MVE-NEXT:    movgt r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp one <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ogt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, mi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ogt <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_oge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ls
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp oge <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_olt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, gt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp olt <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ole_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, ge
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ole <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ueq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, eq
; CHECK-MVE-NEXT:    it vs
; CHECK-MVE-NEXT:    movvs r0, #1
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ueq <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_une_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_une_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s4
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s13, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s13, s14
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s5
; CHECK-MVE-NEXT:    vmovx.f16 s12, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s12, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s6
; CHECK-MVE-NEXT:    vmovx.f16 s12, s10
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s12, s8
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s11
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s7
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ne, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp une <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ugt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, lt
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ugt <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_uge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, le
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uge <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ult_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, hi
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ult <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ule_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, #0
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, #0
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, #0
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, #0
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, pl
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ule <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_ord_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, s12
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, s0
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, s1
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, s2
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, s3
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vc
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp ord <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}

define arm_aapcs_vfpcc <8 x half> @vcmp_r_uno_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVE:       @ %bb.0: @ %entry
; CHECK-MVE-NEXT:    vmovx.f16 s12, s0
; CHECK-MVE-NEXT:    vmovx.f16 s14, s8
; CHECK-MVE-NEXT:    vcmp.f16 s12, s12
; CHECK-MVE-NEXT:    vmovx.f16 s12, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s0, s0
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s0, s8, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s1
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s5
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s9
; CHECK-MVE-NEXT:    vcmp.f16 s1, s1
; CHECK-MVE-NEXT:    vins.f16 s0, s12
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vmovx.f16 s8, s10
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s1, s9, s5
; CHECK-MVE-NEXT:    vins.f16 s1, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s2
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s6
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s2, s2
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s8, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s2, s10, s6
; CHECK-MVE-NEXT:    vmovx.f16 s6, s11
; CHECK-MVE-NEXT:    vins.f16 s2, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s3
; CHECK-MVE-NEXT:    vcmp.f16 s4, s4
; CHECK-MVE-NEXT:    vmovx.f16 s4, s7
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    vcmp.f16 s3, s3
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s4, s6, s4
; CHECK-MVE-NEXT:    vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT:    cset r0, vs
; CHECK-MVE-NEXT:    cmp r0, #0
; CHECK-MVE-NEXT:    vseleq.f16 s3, s11, s7
; CHECK-MVE-NEXT:    vins.f16 s3, s4
; CHECK-MVE-NEXT:    bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVEFP:       @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT:    vcmpt.f16 gt, q0, zr
; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
; CHECK-MVEFP-NEXT:    bx lr
entry:
  %c = fcmp uno <8 x half> zeroinitializer, %src
  %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
  ret <8 x half> %s
}