Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
...
---
name:            flip_eq
alignment:       4
legalized:       true
regBankSelected: true
body:             |
  ; CHECK-LABEL: name: flip_eq
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0
    %copy:gpr(s64) = COPY $x0

    ; Check bit 3.
    %bit:gpr(s64) = G_CONSTANT i64 8
    %zero:gpr(s64) = G_CONSTANT i64 0

    ; 8 has the third bit set.
    %fold_cst:gpr(s64) = G_CONSTANT i64 8

    ; This only has the third bit set if %copy does not. So, to walk through
    ; this, we want to use a TBNZW on %copy.
    %fold_me:gpr(s64) = G_XOR %copy, %fold_cst

    %and:gpr(s64) = G_AND %fold_me, %bit
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            flip_ne
alignment:       4
legalized:       true
regBankSelected: true
body:             |
  ; CHECK-LABEL: name: flip_ne
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBZW [[COPY1]], 3, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0

    ; Same as eq case, but we should get a TBZW instead.

    %copy:gpr(s64) = COPY $x0
    %bit:gpr(s64) = G_CONSTANT i64 8
    %zero:gpr(s64) = G_CONSTANT i64 0
    %fold_cst:gpr(s64) = G_CONSTANT i64 8
    %fold_me:gpr(s64) = G_XOR %copy, %fold_cst
    %and:gpr(s64) = G_AND %fold_me, %bit
    %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            dont_flip_eq
alignment:       4
legalized:       true
regBankSelected: true
body:             |
  ; CHECK-LABEL: name: dont_flip_eq
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBZW [[COPY1]], 3, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0
    %copy:gpr(s64) = COPY $x0

    ; Check bit 3.
    %bit:gpr(s64) = G_CONSTANT i64 8
    %zero:gpr(s64) = G_CONSTANT i64 0

    ; 7 does not have the third bit set.
    %fold_cst:gpr(s64) = G_CONSTANT i64 7

    ; This only has the third bit set if %copy does. So, to walk through this,
    ; we should have a TBZW on %copy.
    %fold_me:gpr(s64) = G_XOR %fold_cst, %copy

    %and:gpr(s64) = G_AND %fold_me, %bit
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            dont_flip_eq_zext
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: dont_flip_eq_zext
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32 = COPY $wzr
  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
  ; CHECK-NEXT:   TBNZX [[COPY1]], 63, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0(0x40000000), %bb.1(0x40000000)

    %1:gpr(s32) = G_CONSTANT i32 0
    %3:gpr(s32) = G_CONSTANT i32 -1
    %4:gpr(s32) = G_XOR %1, %3
    %5:gpr(s64) = G_ZEXT %4(s32)
    %15:gpr(s64) = G_CONSTANT i64 0
    %13:gpr(s32) = G_ICMP intpred(slt), %5(s64), %15
    G_BRCOND %13, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            dont_flip_ne
alignment:       4
legalized:       true
regBankSelected: true
body:             |
  ; CHECK-LABEL: name: dont_flip_ne
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0

    ; Same as eq case, but we should get a TBNZW instead.

    %copy:gpr(s64) = COPY $x0
    %bit:gpr(s64) = G_CONSTANT i64 8
    %zero:gpr(s64) = G_CONSTANT i64 0
    %fold_cst:gpr(s64) = G_CONSTANT i64 7
    %fold_me:gpr(s64) = G_XOR %fold_cst, %copy
    %and:gpr(s64) = G_AND %fold_me, %bit
    %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            xor_chain
alignment:       4
legalized:       true
regBankSelected: true
body:             |
  ; CHECK-LABEL: name: xor_chain
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBZW [[COPY1]], 3, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0
    %copy:gpr(s64) = COPY $x0
    %bit:gpr(s64) = G_CONSTANT i64 8
    %zero:gpr(s64) = G_CONSTANT i64 0
    %fold_cst:gpr(s64) = G_CONSTANT i64 8

    ; The G_XORs cancel each other out, so we should get a TBZW.
    %xor1:gpr(s64) = G_XOR %copy, %fold_cst
    %xor2:gpr(s64) = G_XOR %xor1, %fold_cst

    %and:gpr(s64) = G_AND %xor2, %bit
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR