; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256 ; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 ; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 target triple = "aarch64-unknown-linux-gnu" ; ; ICMP EQ ; ; Don't use SVE for 64-bit vectors. define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v8i8: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.8b, v0.8b, v1.8b ; CHECK-NEXT: ret %cmp = icmp eq <8 x i8> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i8> ret <8 x i8> %sext } ; Don't use SVE for 128-bit vectors. define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v16i8: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret %cmp = icmp eq <16 x i8> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i8> ret <16 x i8> %sext } define void @icmp_eq_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v32i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b, vl32 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b ; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b %cmp = icmp eq <32 x i8> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i8> store <32 x i8> %sext, <32 x i8>* %a ret void } define void @icmp_eq_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 { ; VBITS_GE_256-LABEL: icmp_eq_v64i8: ; VBITS_GE_256: // %bb.0: ; VBITS_GE_256-NEXT: mov w8, #32 ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x1, x8] ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] ; VBITS_GE_256-NEXT: cmpeq p1.b, p0/z, z0.b, z2.b ; VBITS_GE_256-NEXT: cmpeq p2.b, p0/z, z1.b, z3.b ; VBITS_GE_256-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: mov z1.b, p2/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] ; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: icmp_eq_v64i8: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b ; VBITS_GE_512-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <64 x i8>, <64 x i8>* %a %op2 = load <64 x i8>, <64 x i8>* %b %cmp = icmp eq <64 x i8> %op1, %op2 %sext = sext <64 x i1> %cmp to <64 x i8> store <64 x i8> %sext, <64 x i8>* %a ret void } define void @icmp_eq_v128i8(<128 x i8>* %a, <128 x i8>* %b) vscale_range(8,0) #0 { ; CHECK-LABEL: icmp_eq_v128i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b, vl128 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b ; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <128 x i8>, <128 x i8>* %a %op2 = load <128 x i8>, <128 x i8>* %b %cmp = icmp eq <128 x i8> %op1, %op2 %sext = sext <128 x i1> %cmp to <128 x i8> store <128 x i8> %sext, <128 x i8>* %a ret void } define void @icmp_eq_v256i8(<256 x i8>* %a, <256 x i8>* %b) vscale_range(16,0) #0 { ; CHECK-LABEL: icmp_eq_v256i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b, vl256 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b ; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <256 x i8>, <256 x i8>* %a %op2 = load <256 x i8>, <256 x i8>* %b %cmp = icmp eq <256 x i8> %op1, %op2 %sext = sext <256 x i1> %cmp to <256 x i8> store <256 x i8> %sext, <256 x i8>* %a ret void } ; Don't use SVE for 64-bit vectors. define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v4i16: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret %cmp = icmp eq <4 x i16> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i16> ret <4 x i16> %sext } ; Don't use SVE for 128-bit vectors. define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %cmp = icmp eq <8 x i16> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i16> ret <8 x i16> %sext } define void @icmp_eq_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v16i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h, vl16 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h ; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b %cmp = icmp eq <16 x i16> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i16> store <16 x i16> %sext, <16 x i16>* %a ret void } define void @icmp_eq_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 { ; VBITS_GE_256-LABEL: icmp_eq_v32i16: ; VBITS_GE_256: // %bb.0: ; VBITS_GE_256-NEXT: mov x8, #16 ; VBITS_GE_256-NEXT: ptrue p0.h, vl16 ; VBITS_GE_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] ; VBITS_GE_256-NEXT: ld1h { z1.h }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x1, x8, lsl #1] ; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1] ; VBITS_GE_256-NEXT: cmpeq p1.h, p0/z, z0.h, z2.h ; VBITS_GE_256-NEXT: cmpeq p2.h, p0/z, z1.h, z3.h ; VBITS_GE_256-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: mov z1.h, p2/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] ; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: icmp_eq_v32i16: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { z1.h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h ; VBITS_GE_512-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <32 x i16>, <32 x i16>* %a %op2 = load <32 x i16>, <32 x i16>* %b %cmp = icmp eq <32 x i16> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i16> store <32 x i16> %sext, <32 x i16>* %a ret void } define void @icmp_eq_v64i16(<64 x i16>* %a, <64 x i16>* %b) vscale_range(8,0) #0 { ; CHECK-LABEL: icmp_eq_v64i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h, vl64 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h ; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x i16>, <64 x i16>* %a %op2 = load <64 x i16>, <64 x i16>* %b %cmp = icmp eq <64 x i16> %op1, %op2 %sext = sext <64 x i1> %cmp to <64 x i16> store <64 x i16> %sext, <64 x i16>* %a ret void } define void @icmp_eq_v128i16(<128 x i16>* %a, <128 x i16>* %b) vscale_range(16,0) #0 { ; CHECK-LABEL: icmp_eq_v128i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h, vl128 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h ; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <128 x i16>, <128 x i16>* %a %op2 = load <128 x i16>, <128 x i16>* %b %cmp = icmp eq <128 x i16> %op1, %op2 %sext = sext <128 x i1> %cmp to <128 x i16> store <128 x i16> %sext, <128 x i16>* %a ret void } ; Don't use SVE for 64-bit vectors. define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %cmp = icmp eq <2 x i32> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i32> ret <2 x i32> %sext } ; Don't use SVE for 128-bit vectors. define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %cmp = icmp eq <4 x i32> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i32> ret <4 x i32> %sext } define void @icmp_eq_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b %cmp = icmp eq <8 x i32> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i32> store <8 x i32> %sext, <8 x i32>* %a ret void } define void @icmp_eq_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 { ; VBITS_GE_256-LABEL: icmp_eq_v16i32: ; VBITS_GE_256: // %bb.0: ; VBITS_GE_256-NEXT: mov x8, #8 ; VBITS_GE_256-NEXT: ptrue p0.s, vl8 ; VBITS_GE_256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] ; VBITS_GE_256-NEXT: ld1w { z1.s }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x1, x8, lsl #2] ; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1] ; VBITS_GE_256-NEXT: cmpeq p1.s, p0/z, z0.s, z2.s ; VBITS_GE_256-NEXT: cmpeq p2.s, p0/z, z1.s, z3.s ; VBITS_GE_256-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: mov z1.s, p2/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] ; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: icmp_eq_v16i32: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { z1.s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s ; VBITS_GE_512-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <16 x i32>, <16 x i32>* %a %op2 = load <16 x i32>, <16 x i32>* %b %cmp = icmp eq <16 x i32> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i32> store <16 x i32> %sext, <16 x i32>* %a ret void } define void @icmp_eq_v32i32(<32 x i32>* %a, <32 x i32>* %b) vscale_range(8,0) #0 { ; CHECK-LABEL: icmp_eq_v32i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s, vl32 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i32>, <32 x i32>* %a %op2 = load <32 x i32>, <32 x i32>* %b %cmp = icmp eq <32 x i32> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i32> store <32 x i32> %sext, <32 x i32>* %a ret void } define void @icmp_eq_v64i32(<64 x i32>* %a, <64 x i32>* %b) vscale_range(16,0) #0 { ; CHECK-LABEL: icmp_eq_v64i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s, vl64 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <64 x i32>, <64 x i32>* %a %op2 = load <64 x i32>, <64 x i32>* %b %cmp = icmp eq <64 x i32> %op1, %op2 %sext = sext <64 x i1> %cmp to <64 x i32> store <64 x i32> %sext, <64 x i32>* %a ret void } ; Don't use SVE for 64-bit vectors. define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v1i64: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq d0, d0, d1 ; CHECK-NEXT: ret %cmp = icmp eq <1 x i64> %op1, %op2 %sext = sext <1 x i1> %cmp to <1 x i64> ret <1 x i64> %sext } ; Don't use SVE for 128-bit vectors. define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: cmeq v0.2d, v0.2d, v1.2d ; CHECK-NEXT: ret %cmp = icmp eq <2 x i64> %op1, %op2 %sext = sext <2 x i1> %cmp to <2 x i64> ret <2 x i64> %sext } define void @icmp_eq_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_eq_v4i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b %cmp = icmp eq <4 x i64> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i64> store <4 x i64> %sext, <4 x i64>* %a ret void } define void @icmp_eq_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 { ; VBITS_GE_256-LABEL: icmp_eq_v8i64: ; VBITS_GE_256: // %bb.0: ; VBITS_GE_256-NEXT: mov x8, #4 ; VBITS_GE_256-NEXT: ptrue p0.d, vl4 ; VBITS_GE_256-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] ; VBITS_GE_256-NEXT: ld1d { z1.d }, p0/z, [x0] ; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x1, x8, lsl #3] ; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1] ; VBITS_GE_256-NEXT: cmpeq p1.d, p0/z, z0.d, z2.d ; VBITS_GE_256-NEXT: cmpeq p2.d, p0/z, z1.d, z3.d ; VBITS_GE_256-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: mov z1.d, p2/z, #-1 // =0xffffffffffffffff ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3] ; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0] ; VBITS_GE_256-NEXT: ret ; ; VBITS_GE_512-LABEL: icmp_eq_v8i64: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 ; VBITS_GE_512-NEXT: ld1d { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1d { z1.d }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d ; VBITS_GE_512-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x0] ; VBITS_GE_512-NEXT: ret %op1 = load <8 x i64>, <8 x i64>* %a %op2 = load <8 x i64>, <8 x i64>* %b %cmp = icmp eq <8 x i64> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i64> store <8 x i64> %sext, <8 x i64>* %a ret void } define void @icmp_eq_v16i64(<16 x i64>* %a, <16 x i64>* %b) vscale_range(8,0) #0 { ; CHECK-LABEL: icmp_eq_v16i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl16 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i64>, <16 x i64>* %a %op2 = load <16 x i64>, <16 x i64>* %b %cmp = icmp eq <16 x i64> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i64> store <16 x i64> %sext, <16 x i64>* %a ret void } define void @icmp_eq_v32i64(<32 x i64>* %a, <32 x i64>* %b) vscale_range(16,0) #0 { ; CHECK-LABEL: icmp_eq_v32i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl32 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i64>, <32 x i64>* %a %op2 = load <32 x i64>, <32 x i64>* %b %cmp = icmp eq <32 x i64> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i64> store <32 x i64> %sext, <32 x i64>* %a ret void } ; ; ICMP NE ; define void @icmp_ne_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_ne_v32i8: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b, vl32 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] ; CHECK-NEXT: cmpne p1.b, p0/z, z0.b, z1.b ; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1b { z0.b }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b %cmp = icmp ne <32 x i8> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i8> store <32 x i8> %sext, <32 x i8>* %a ret void } ; ; ICMP SGE ; define void @icmp_sge_v32i16(<32 x i16>* %a, <32 x i16>* %b) vscale_range(4,0) #0 { ; CHECK-LABEL: icmp_sge_v32i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h, vl32 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] ; CHECK-NEXT: cmpge p1.h, p0/z, z0.h, z1.h ; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i16>, <32 x i16>* %a %op2 = load <32 x i16>, <32 x i16>* %b %cmp = icmp sge <32 x i16> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i16> store <32 x i16> %sext, <32 x i16>* %a ret void } ; ; ICMP SGT ; define void @icmp_sgt_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_sgt_v16i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h, vl16 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] ; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z1.h ; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1h { z0.h }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i16>, <16 x i16>* %a %op2 = load <16 x i16>, <16 x i16>* %b %cmp = icmp sgt <16 x i16> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i16> store <16 x i16> %sext, <16 x i16>* %a ret void } ; ; ICMP SLE ; define void @icmp_sle_v16i32(<16 x i32>* %a, <16 x i32>* %b) vscale_range(4,0) #0 { ; CHECK-LABEL: icmp_sle_v16i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s, vl16 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] ; CHECK-NEXT: cmpge p1.s, p0/z, z1.s, z0.s ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i32>, <16 x i32>* %a %op2 = load <16 x i32>, <16 x i32>* %b %cmp = icmp sle <16 x i32> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i32> store <16 x i32> %sext, <16 x i32>* %a ret void } ; ; ICMP SLT ; define void @icmp_slt_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_slt_v8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] ; CHECK-NEXT: cmpgt p1.s, p0/z, z1.s, z0.s ; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1w { z0.s }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i32>, <8 x i32>* %a %op2 = load <8 x i32>, <8 x i32>* %b %cmp = icmp slt <8 x i32> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i32> store <8 x i32> %sext, <8 x i32>* %a ret void } ; ; ICMP UGE ; define void @icmp_uge_v8i64(<8 x i64>* %a, <8 x i64>* %b) vscale_range(4,0) #0 { ; CHECK-LABEL: icmp_uge_v8i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl8 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmphs p1.d, p0/z, z0.d, z1.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <8 x i64>, <8 x i64>* %a %op2 = load <8 x i64>, <8 x i64>* %b %cmp = icmp uge <8 x i64> %op1, %op2 %sext = sext <8 x i1> %cmp to <8 x i64> store <8 x i64> %sext, <8 x i64>* %a ret void } ; ; ICMP UGT ; define void @icmp_ugt_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { ; CHECK-LABEL: icmp_ugt_v4i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z1.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <4 x i64>, <4 x i64>* %a %op2 = load <4 x i64>, <4 x i64>* %b %cmp = icmp ugt <4 x i64> %op1, %op2 %sext = sext <4 x i1> %cmp to <4 x i64> store <4 x i64> %sext, <4 x i64>* %a ret void } ; ; ICMP ULE ; define void @icmp_ule_v16i64(<16 x i64>* %a, <16 x i64>* %b) vscale_range(8,0) #0 { ; CHECK-LABEL: icmp_ule_v16i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl16 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmphs p1.d, p0/z, z1.d, z0.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <16 x i64>, <16 x i64>* %a %op2 = load <16 x i64>, <16 x i64>* %b %cmp = icmp ule <16 x i64> %op1, %op2 %sext = sext <16 x i1> %cmp to <16 x i64> store <16 x i64> %sext, <16 x i64>* %a ret void } ; ; ICMP ULT ; define void @icmp_ult_v32i64(<32 x i64>* %a, <32 x i64>* %b) vscale_range(16,0) #0 { ; CHECK-LABEL: icmp_ult_v32i64: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d, vl32 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] ; CHECK-NEXT: cmphi p1.d, p0/z, z1.d, z0.d ; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i64>, <32 x i64>* %a %op2 = load <32 x i64>, <32 x i64>* %b %cmp = icmp ult <32 x i64> %op1, %op2 %sext = sext <32 x i1> %cmp to <32 x i64> store <32 x i64> %sext, <32 x i64>* %a ret void } attributes #0 = { "target-features"="+sve" }