Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass=instruction-select -verify-machineinstrs %s -o -  | FileCheck -check-prefix=GCN %s

---
name: ldexp_s32_vsv
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $sgpr0, $vgpr0
    ; GCN-LABEL: name: ldexp_s32_vsv
    ; GCN: liveins: $sgpr0, $vgpr0
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
    ; GCN-NEXT: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:sgpr(s32) = COPY $sgpr0
    %1:vgpr(s32) = COPY $vgpr0
    %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...

---
name: ldexp_s32_vvs
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $sgpr0, $vgpr0
    ; GCN-LABEL: name: ldexp_s32_vvs
    ; GCN: liveins: $sgpr0, $vgpr0
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
    ; GCN-NEXT: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:vgpr(s32) = COPY $vgpr0
    %1:sgpr(s32) = COPY $sgpr0
    %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...

---
name: ldexp_s32_vvv
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $vgpr0, $vgpr1
    ; GCN-LABEL: name: ldexp_s32_vvv
    ; GCN: liveins: $vgpr0, $vgpr1
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
    ; GCN-NEXT: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:vgpr(s32) = COPY $vgpr0
    %1:vgpr(s32) = COPY $vgpr1
    %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...

---
name: ldexp_s64_vsv
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $sgpr0_sgpr1, $vgpr0
    ; GCN-LABEL: name: ldexp_s64_vsv
    ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
    ; GCN-NEXT: %2:vreg_64 = nofpexcept V_LDEXP_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:sgpr(s64) = COPY $sgpr0_sgpr1
    %1:vgpr(s32) = COPY $vgpr0
    %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...

---
name: ldexp_s64_vvs
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $sgpr0_sgpr1, $vgpr0
    ; GCN-LABEL: name: ldexp_s64_vvs
    ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
    ; GCN-NEXT: %2:vreg_64 = nofpexcept V_LDEXP_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:vgpr(s64) = COPY $vgpr0_vgpr1
    %1:sgpr(s32) = COPY $sgpr0
    %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...

---
name: ldexp_s64_vvv
legalized: true
regBankSelected: true
tracksRegLiveness: true

body: |
  bb.0:
    liveins: $vgpr0_vgpr1, $vgpr2
    ; GCN-LABEL: name: ldexp_s64_vvv
    ; GCN: liveins: $vgpr0_vgpr1, $vgpr2
    ; GCN-NEXT: {{  $}}
    ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
    ; GCN-NEXT: %2:vreg_64 = nofpexcept V_LDEXP_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
    ; GCN-NEXT: S_ENDPGM 0, implicit %2
    %0:vgpr(s64) = COPY $vgpr0_vgpr1
    %1:vgpr(s32) = COPY $vgpr2
    %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
    S_ENDPGM 0, implicit %2
...