; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub ; RUN: llc -mtriple=aarch64-none-eabi -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s ; RUN: llc -mtriple=aarch64-none-eabi -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s define <2 x i64> @bcax_64x2(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) { ; SHA3-LABEL: bcax_64x2: ; SHA3: // %bb.0: ; SHA3-NEXT: bcax v0.16b, v2.16b, v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: bcax_64x2: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: bic v0.16b, v0.16b, v1.16b ; NOSHA3-NEXT: eor v0.16b, v0.16b, v2.16b ; NOSHA3-NEXT: ret %4 = xor <2 x i64> %1, <i64 -1, i64 -1> %5 = and <2 x i64> %4, %0 %6 = xor <2 x i64> %5, %2 ret <2 x i64> %6 } define <4 x i32> @bcax_32x4(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) { ; SHA3-LABEL: bcax_32x4: ; SHA3: // %bb.0: ; SHA3-NEXT: bcax v0.16b, v2.16b, v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: bcax_32x4: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: bic v0.16b, v0.16b, v1.16b ; NOSHA3-NEXT: eor v0.16b, v0.16b, v2.16b ; NOSHA3-NEXT: ret %4 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1> %5 = and <4 x i32> %4, %0 %6 = xor <4 x i32> %5, %2 ret <4 x i32> %6 } define <8 x i16> @bcax_16x8(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) { ; SHA3-LABEL: bcax_16x8: ; SHA3: // %bb.0: ; SHA3-NEXT: bcax v0.16b, v2.16b, v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: bcax_16x8: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: bic v0.16b, v0.16b, v1.16b ; NOSHA3-NEXT: eor v0.16b, v0.16b, v2.16b ; NOSHA3-NEXT: ret %4 = xor <8 x i16> %1, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1> %5 = and <8 x i16> %4, %0 %6 = xor <8 x i16> %5, %2 ret <8 x i16> %6 } define <16 x i8> @bcax_8x16(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) { ; SHA3-LABEL: bcax_8x16: ; SHA3: // %bb.0: ; SHA3-NEXT: bcax v0.16b, v2.16b, v0.16b, v1.16b ; SHA3-NEXT: ret ; ; NOSHA3-LABEL: bcax_8x16: ; NOSHA3: // %bb.0: ; NOSHA3-NEXT: bic v0.16b, v0.16b, v1.16b ; NOSHA3-NEXT: eor v0.16b, v0.16b, v2.16b ; NOSHA3-NEXT: ret %4 = xor <16 x i8> %1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %5 = and <16 x i8> %4, %0 %6 = xor <16 x i8> %5, %2 ret <16 x i8> %6 }