// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone \
// RUN: -emit-llvm -o - %s | opt -S -sroa | FileCheck %s
// REQUIRES: aarch64-registered-target
// EQ
// CHECK-LABEL: @eq_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @eq_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @eq_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @eq_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @eq_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @eq_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @eq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @eq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @eq_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @eq_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @eq_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @eq_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// NEQ
// CHECK-LABEL: @neq_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @neq_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @neq_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @neq_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @neq_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @neq_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @neq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @neq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @neq_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @neq_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @neq_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @neq_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// LT
// CHECK-LABEL: @lt_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @lt_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @lt_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @lt_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @lt_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @lt_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @lt_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @lt_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @lt_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @lt_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @lt_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @lt_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// LEQ
// CHECK-LABEL: @leq_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @leq_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @leq_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @leq_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @leq_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @leq_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @leq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @leq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @leq_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @leq_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @leq_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @leq_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// GT
// CHECK-LABEL: @gt_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @gt_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @gt_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @gt_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @gt_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @gt_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @gt_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @gt_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @gt_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @gt_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @gt_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @gt_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// GEQ
// CHECK-LABEL: @geq_bool(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
//
svbool_t
// CHECK-LABEL: @geq_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @geq_i16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @geq_i32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @geq_i64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @geq_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
// CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
//
svint8_t
// CHECK-LABEL: @geq_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @geq_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @geq_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t
// CHECK-LABEL: @geq_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
//
svint16_t
// CHECK-LABEL: @geq_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
//
svint32_t
// CHECK-LABEL: @geq_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
//
svint64_t