Compiler projects using llvm
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone \
// RUN:  -emit-llvm -o - %s | opt -S -sroa | FileCheck %s

// REQUIRES: aarch64-registered-target

#include <arm_sve.h>

// AND

// CHECK-LABEL: @and_bool(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i1> [[AND]]
//
svbool_t and_bool(svbool_t a, svbool_t b) {
  return a & b;
}

// CHECK-LABEL: @and_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[AND]]
//
svint8_t and_i8(svint8_t a, svint8_t b) {
  return a & b;
}

// CHECK-LABEL: @and_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[AND]]
//
svint16_t and_i16(svint16_t a, svint16_t b) {
  return a & b;
}

// CHECK-LABEL: @and_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[AND]]
//
svint32_t and_i32(svint32_t a, svint32_t b) {
  return a & b;
}

// CHECK-LABEL: @and_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[AND]]
//
svint64_t and_i64(svint64_t a, svint64_t b) {
  return a & b;
}

// CHECK-LABEL: @and_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[AND]]
//
svuint8_t and_u8(svuint8_t a, svuint8_t b) {
  return a & b;
}

// CHECK-LABEL: @and_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[AND]]
//
svuint16_t and_u16(svuint16_t a, svuint16_t b) {
  return a & b;
}

// CHECK-LABEL: @and_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[AND]]
//
svuint32_t and_u32(svuint32_t a, svuint32_t b) {
  return a & b;
}

// CHECK-LABEL: @and_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[AND:%.*]] = and <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[AND]]
//
svuint64_t and_u64(svuint64_t a, svuint64_t b) {
  return a & b;
}

// OR

// CHECK-LABEL: @or_bool(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i1> [[OR]]
//
svbool_t or_bool(svbool_t a, svbool_t b) {
  return a | b;
}

// CHECK-LABEL: @or_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[OR]]
//
svint8_t or_i8(svint8_t a, svint8_t b) {
  return a | b;
}

// CHECK-LABEL: @or_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[OR]]
//
svint16_t or_i16(svint16_t a, svint16_t b) {
  return a | b;
}

// CHECK-LABEL: @or_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[OR]]
//
svint32_t or_i32(svint32_t a, svint32_t b) {
  return a | b;
}

// CHECK-LABEL: @or_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[OR]]
//
svint64_t or_i64(svint64_t a, svint64_t b) {
  return a | b;
}

// CHECK-LABEL: @or_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[OR]]
//
svuint8_t or_u8(svuint8_t a, svuint8_t b) {
  return a | b;
}

// CHECK-LABEL: @or_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[OR]]
//
svuint16_t or_u16(svuint16_t a, svuint16_t b) {
  return a | b;
}

// CHECK-LABEL: @or_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[OR]]
//
svuint32_t or_u32(svuint32_t a, svuint32_t b) {
  return a | b;
}

// CHECK-LABEL: @or_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[OR:%.*]] = or <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[OR]]
//
svuint64_t or_u64(svuint64_t a, svuint64_t b) {
  return a | b;
}

// XOR

// CHECK-LABEL: @xor_bool(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i1> [[XOR]]
//
svbool_t xor_bool(svbool_t a, svbool_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[XOR]]
//
svint8_t xor_i8(svint8_t a, svint8_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[XOR]]
//
svint16_t xor_i16(svint16_t a, svint16_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[XOR]]
//
svint32_t xor_i32(svint32_t a, svint32_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[XOR]]
//
svint64_t xor_i64(svint64_t a, svint64_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 16 x i8> [[XOR]]
//
svuint8_t xor_u8(svuint8_t a, svuint8_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 8 x i16> [[XOR]]
//
svuint16_t xor_u16(svuint16_t a, svuint16_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 4 x i32> [[XOR]]
//
svuint32_t xor_u32(svuint32_t a, svuint32_t b) {
  return a ^ b;
}

// CHECK-LABEL: @xor_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[XOR:%.*]] = xor <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <vscale x 2 x i64> [[XOR]]
//
svuint64_t xor_u64(svuint64_t a, svuint64_t b) {
  return a ^ b;
}

// NEG

// CHECK-LABEL: @neg_bool(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 16 x i1> [[A:%.*]], shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 16 x i1> [[NEG]]
//
svbool_t neg_bool(svbool_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 16 x i8> [[A:%.*]], shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[NEG]]
//
svint8_t neg_i8(svint8_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 8 x i16> [[A:%.*]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[NEG]]
//
svint16_t neg_i16(svint16_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 4 x i32> [[A:%.*]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[NEG]]
//
svint32_t neg_i32(svint32_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 2 x i64> [[A:%.*]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 -1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[NEG]]
//
svint64_t neg_i64(svint64_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 16 x i8> [[A:%.*]], shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[NEG]]
//
svuint8_t neg_u8(svuint8_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 8 x i16> [[A:%.*]], shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[NEG]]
//
svuint16_t neg_u16(svuint16_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 4 x i32> [[A:%.*]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[NEG]]
//
svuint32_t neg_u32(svuint32_t a) {
  return ~a;
}

// CHECK-LABEL: @neg_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[NEG:%.*]] = xor <vscale x 2 x i64> [[A:%.*]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 -1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[NEG]]
//
svuint64_t neg_u64(svuint64_t a) {
  return ~a;
}