Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -aarch64prelegalizercombinerhelper-only-enable-rule="load_and_mask" -verify-machineinstrs %s -o - | FileCheck %s

# REQUIRES: asserts

# Check that we can fold and ({any,zext,sext}load, mask) -> zextload

---
name:            test_anyext_1
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_anyext_1
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s8) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s8)
    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s8) = G_CONSTANT i8 1
    %2:_(s8) = G_LOAD %0 :: (load (s8))
    %3:_(s8) = G_AND %2, %1
    %4:_(s32) = G_ANYEXT %3
    $w0 = COPY %4
...

---
name:            test_anyext_s16
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_anyext_s16
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s16) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXTLOAD]](s16)
    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s16) = G_CONSTANT i16 255
    %2:_(s16) = G_LOAD %0 :: (load (s8))
    %3:_(s16) = G_AND %2, %1
    %4:_(s32) = G_ANYEXT %3
    $w0 = COPY %4
...

---
name:            test_anyext_s32
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_anyext_s32
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (load (s8))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_load_s32
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_load_s32
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8), align 4)
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (load (s32))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_load_mask_s8_s32_atomic
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_load_mask_s8_s32_atomic
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load seq_cst (s32))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (load seq_cst (s32))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

# The mask is equal to the memory size.
---
name:            test_load_mask_s16_s16_atomic
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_load_mask_s16_s16_atomic
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load seq_cst (s16))
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 65535
    %2:_(s32) = G_LOAD %0 :: (load seq_cst (s16))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

# The mask is smaller than the memory size which must be preserved, so
# there's little point to folding.
---
name:            test_load_mask_s8_s16_atomic
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_load_mask_s8_s16_atomic
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load seq_cst (s16))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (load seq_cst (s16))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_load_mask_size_equals_dst_size
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; The combine should only apply if the mask zeroes actual bits of the dst type
    ; If it doesn't, the mask is redundant and we have other combines to fold it away

    ; CHECK-LABEL: name: test_load_mask_size_equals_dst_size
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 4294967295
    %2:_(s32) = G_LOAD %0 :: (load (s32))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_zext
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_zext
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_ZEXTLOAD %0 :: (load (s16))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_zext_mask_larger_memsize
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0

    ; The combine should only apply if the mask narrows the memory size.
    ; We have another combine that folds redundant masks

    ; CHECK-LABEL: name: test_zext_mask_larger_memsize
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ZEXTLOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 65535
    %2:_(s32) = G_ZEXTLOAD %0 :: (load (s8))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_sext
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_sext
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_SEXTLOAD %0 :: (load (s16))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_sext_mask_larger_memsize
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_sext_mask_larger_memsize
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXTLOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 65535
    %2:_(s32) = G_SEXTLOAD %0 :: (load (s8))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_non_pow2_memtype
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_non_pow2_memtype
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s24) = G_CONSTANT i24 7
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load (s24), align 4)
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s24) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s24)
    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s24) = G_CONSTANT i24 7
    %2:_(s24) = G_LOAD %0 :: (load (s24))
    %3:_(s24) = G_AND %2, %1
    %4:_(s32) = G_ANYEXT %3
    $w0 = COPY %4
...


---
name:            test_no_mask
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_no_mask
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 510
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 510
    %2:_(s32) = G_LOAD %0 :: (load (s8))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_volatile
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_volatile
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (volatile load (s8))
    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (volatile load (s8))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...

---
name:            test_volatile_mask_smaller_mem
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_volatile_mask_smaller_mem
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (volatile load (s16))
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LOAD]], [[C]]
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (volatile load (s16))
    %3:_(s32) = G_AND %2, %1
    $w0 = COPY %3
...
---
name:            test_no_lookthrough_copies_multi_uses
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x0
    ; CHECK-LABEL: name: test_no_lookthrough_copies_multi_uses
    ; CHECK: liveins: $x0
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s16))
    ; CHECK-NEXT: %v:_(s32) = G_ASSERT_ZEXT [[LOAD]], 16
    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND %v, [[C]]
    ; CHECK-NEXT: $w1 = COPY %v(s32)
    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
    %0:_(p0) = COPY $x0
    %1:_(s32) = G_CONSTANT i32 255
    %2:_(s32) = G_LOAD %0 :: (load (s16))
    %v:_(s32) = G_ASSERT_ZEXT %2, 16
    %3:_(s32) = G_AND %v, %1
    $w1 = COPY %v
    $w0 = COPY %3
...