Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
#
# Test widening and narrowing on test bit operations using subregister copies
# or SUBREG_TO_REG.
--- |
 @glob = external dso_local unnamed_addr global i1, align 4
 define void @p0_no_copy() { ret void }
 define void @widen_s32_to_s64() { ret void }
 define void @widen_s16_to_s64() { ret void }
 define void @narrow_s64_to_s32() { ret void }

...
---
name:            p0_no_copy
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: p0_no_copy
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %glob:gpr64common = MOVaddr target-flags(aarch64-page) @glob, target-flags(aarch64-pageoff, aarch64-nc) @glob
  ; CHECK-NEXT:   %load:gpr32 = LDRBBui %glob, 0 :: (dereferenceable load (s8) from @glob, align 4)
  ; CHECK-NEXT:   TBNZW %load, 0, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    %glob:gpr(p0) = G_GLOBAL_VALUE @glob
    %load:gpr(s32) = G_LOAD %glob(p0) :: (dereferenceable load (s8) from @glob, align 4)

    ; Look through G_TRUNC to get the load. The load is into a s8, which will
    ; be selected to a GPR32, so we don't need a copy.
    G_BRCOND %load, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            widen_s32_to_s64
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: widen_s32_to_s64
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $w0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %reg:gpr32all = COPY $w0
  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, %reg, %subreg.sub_32
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
  ; CHECK-NEXT:   TBZX [[COPY]], 33, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $w0
    %reg:gpr(s32) = COPY $w0
    %zext:gpr(s64) = G_ZEXT %reg(s32)
    %bit:gpr(s64) = G_CONSTANT i64 8589934592
    %zero:gpr(s64) = G_CONSTANT i64 0
    %and:gpr(s64) = G_AND %zext, %bit
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero

    ; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
    ; bit 33. The subregister should be sub_32.
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            widen_s16_to_s64
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: widen_s16_to_s64
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %reg:gpr32 = IMPLICIT_DEF
  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, %reg, %subreg.sub_32
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
  ; CHECK-NEXT:   TBZX [[COPY]], 33, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    %reg:gpr(s16) = G_IMPLICIT_DEF
    %zext:gpr(s64) = G_ZEXT %reg(s16)
    %bit:gpr(s64) = G_CONSTANT i64 8589934592
    %zero:gpr(s64) = G_CONSTANT i64 0
    %and:gpr(s64) = G_AND %zext, %bit
    %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero

    ; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
    ; bit 33. The subregister should be sub_32, because s16 will end up on a
    ; GPR32.
    G_BRCOND %cmp, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR
...
---
name:            narrow_s64_to_s32
alignment:       4
legalized:       true
regBankSelected: true
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: narrow_s64_to_s32
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %wide:gpr64all = COPY $x0
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %wide.sub_32
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
  ; CHECK-NEXT:   TBNZW [[COPY1]], 0, %bb.1
  ; CHECK-NEXT:   B %bb.0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   RET_ReallyLR
  bb.0:
    successors: %bb.0, %bb.1
    liveins: $x0
    %wide:gpr(s64) = COPY $x0

    ; We should narrow using a subregister copy here.
    %trunc:gpr(s32) = G_TRUNC %wide(s64)
    G_BRCOND %trunc, %bb.1
    G_BR %bb.0
  bb.1:
    RET_ReallyLR