Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s

...
---
name:            ashr_v4s32
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: ashr_v4s32
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
    ; CHECK: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
    ; CHECK: $q0 = COPY [[VASHR]](<4 x s32>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_CONSTANT i32 5
    %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
    %3:_(<4 x s32>) = G_ASHR %0, %2(<4 x s32>)
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0
...
---
name:            lshr_v4s32
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: lshr_v4s32
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
    ; CHECK: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
    ; CHECK: $q0 = COPY [[VLSHR]](<4 x s32>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_CONSTANT i32 5
    %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
    %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0
...
---
name:            lshr_v8s16
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: lshr_v8s16
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
    ; CHECK: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
    ; CHECK: $q0 = COPY [[VLSHR]](<8 x s16>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<8 x s16>) = COPY $q0
    %1:_(s16) = G_CONSTANT i16 5
    %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
    %3:_(<8 x s16>) = G_LSHR %0, %2(<8 x s16>)
    $q0 = COPY %3(<8 x s16>)
    RET_ReallyLR implicit $q0
...
---
name:            imm_too_large
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: imm_too_large
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
    ; CHECK: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[DUP]](<4 x s32>)
    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_CONSTANT i32 40
    %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
    %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0
...
---
name:            imm_zero
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: imm_zero
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_CONSTANT i32 0
    %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
    %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0
...
---
name:            imm_not_splat
alignment:       4
legalized:       true
tracksRegLiveness: true
body:             |
  bb.1.entry:
    liveins: $d0, $d1

    ; CHECK-LABEL: name: imm_not_splat
    ; CHECK: liveins: $d0, $d1
    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C]](s32), [[C]](s32)
    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
    ; CHECK: RET_ReallyLR implicit $q0
    %0:_(<4 x s32>) = COPY $q0
    %1:_(s32) = G_CONSTANT i32 4
    %4:_(s32) = G_CONSTANT i32 6
    %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %4(s32), %1(s32), %1(s32)
    %3:_(<4 x s32>) = G_LSHR %0, %2(<4 x s32>)
    $q0 = COPY %3(<4 x s32>)
    RET_ReallyLR implicit $q0
...