Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX6 %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX8 %s

---
name: test_sitofp_s32_to_s32
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s32_to_s32
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    ; GFX8-LABEL: name: test_sitofp_s32_to_s32
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s32) = G_SITOFP %0
    $vgpr0 = COPY %1
...

---
name: test_sitofp_s32_to_s64
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s32_to_s64
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32)
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    ; GFX8-LABEL: name: test_sitofp_s32_to_s64
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32)
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    %0:_(s32) = COPY $vgpr0
    %1:_(s64) = G_SITOFP %0
    $vgpr0_vgpr1 = COPY %1
...

---
name: test_sitofp_v2s32_to_v2s32
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_v2s32_to_v2s32
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[UV]](s32)
    ; GFX6-NEXT: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[UV1]](s32)
    ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SITOFP]](s32), [[SITOFP1]](s32)
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
    ; GFX8-LABEL: name: test_sitofp_v2s32_to_v2s32
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[UV]](s32)
    ; GFX8-NEXT: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[UV1]](s32)
    ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SITOFP]](s32), [[SITOFP1]](s32)
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
    %1:_(<2 x s32>) = G_SITOFP %0
    $vgpr0_vgpr1 = COPY %1
...

---
name: test_sitofp_v2s32_to_v2s64
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_v2s32_to_v2s64
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV]](s32)
    ; GFX6-NEXT: [[SITOFP1:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
    ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SITOFP]](s64), [[SITOFP1]](s64)
    ; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
    ; GFX8-LABEL: name: test_sitofp_v2s32_to_v2s64
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV]](s32)
    ; GFX8-NEXT: [[SITOFP1:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
    ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SITOFP]](s64), [[SITOFP1]](s64)
    ; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
    %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
    %1:_(<2 x s64>) = G_SITOFP %0
    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...

---
name: test_sitofp_s64_to_s32
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_s64_to_s32
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX6-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX6-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX6-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX6-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[UMIN]](s32)
    ; GFX6-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX6-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX6-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX6-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[INT1]](s32)
    ; GFX8-LABEL: name: test_sitofp_s64_to_s32
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX8-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX8-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[UMIN]](s32)
    ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX8-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX8-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[INT1]](s32)
    %0:_(s64) = COPY $vgpr0_vgpr1
    %1:_(s32) = G_SITOFP %0
    $vgpr0 = COPY %1
...

---
name: test_sitofp_s64_to_s64
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_s64_to_s64
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
    ; GFX6-NEXT: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
    ; GFX6-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32)
    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]]
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[FADD]](s64)
    ; GFX8-LABEL: name: test_sitofp_s64_to_s64
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32)
    ; GFX8-NEXT: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32)
    ; GFX8-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32)
    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]]
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[FADD]](s64)
    %0:_(s64) = COPY $vgpr0_vgpr1
    %1:_(s64) = G_SITOFP %0
    $vgpr0_vgpr1 = COPY %1
...

---
name: test_sitofp_s16_to_s16
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s16_to_s16
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s16) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SITOFP]](s16)
    ; GFX6-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    ; GFX8-LABEL: name: test_sitofp_s16_to_s16
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s16) = G_SITOFP [[TRUNC]](s16)
    ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SITOFP]](s16)
    ; GFX8-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s16) = G_TRUNC %0
    %2:_(s16) = G_SITOFP %1
    %3:_(s32) = G_ANYEXT %2
    $vgpr0 = COPY %3
...

---
name: test_sitofp_s16_to_s32
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s16_to_s32
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    ; GFX8-LABEL: name: test_sitofp_s16_to_s32
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s16) = G_TRUNC %0
    %2:_(s32) = G_SITOFP %1
    $vgpr0 = COPY %2
...

---
name: test_sitofp_s16_to_s64
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s16_to_s64
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    ; GFX8-LABEL: name: test_sitofp_s16_to_s64
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    %0:_(s32) = COPY $vgpr0
    %1:_(s16) = G_TRUNC %0
    %2:_(s64) = G_SITOFP %1
    $vgpr0_vgpr1 = COPY %2
...

---
name: test_sitofp_s8_to_s16
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s8_to_s16
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s16) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SITOFP]](s16)
    ; GFX6-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    ; GFX8-LABEL: name: test_sitofp_s8_to_s16
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s16) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SITOFP]](s16)
    ; GFX8-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s8) = G_TRUNC %0
    %2:_(s16) = G_SITOFP %1
    %3:_(s32) = G_ANYEXT %2
    $vgpr0 = COPY %3
...

---
name: test_sitofp_s8_to_s32
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s8_to_s32
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    ; GFX8-LABEL: name: test_sitofp_s8_to_s32
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[SITOFP]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s8) = G_TRUNC %0
    %2:_(s32) = G_SITOFP %1
    $vgpr0 = COPY %2
...

---
name: test_sitofp_s8_to_s64
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s8_to_s64
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    ; GFX8-LABEL: name: test_sitofp_s8_to_s64
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[SEXT_INREG]](s32)
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SITOFP]](s64)
    %0:_(s32) = COPY $vgpr0
    %1:_(s8) = G_TRUNC %0
    %2:_(s64) = G_SITOFP %1
    $vgpr0_vgpr1 = COPY %2
...

---
name: test_sitofp_s1_to_s16
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s1_to_s16
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHBC00
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX6-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16)
    ; GFX6-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    ; GFX8-LABEL: name: test_sitofp_s1_to_s16
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHBC00
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16)
    ; GFX8-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s1) = G_TRUNC %0
    %2:_(s16) = G_SITOFP %1
    %3:_(s32) = G_ANYEXT %2
    $vgpr0 = COPY %3
...

---
name: test_sitofp_s1_to_s32
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s1_to_s32
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX6-NEXT: $vgpr0 = COPY [[SELECT]](s32)
    ; GFX8-LABEL: name: test_sitofp_s1_to_s32
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX8-NEXT: $vgpr0 = COPY [[SELECT]](s32)
    %0:_(s32) = COPY $vgpr0
    %1:_(s1) = G_TRUNC %0
    %2:_(s32) = G_SITOFP %1
    $vgpr0 = COPY %2
...

---
name: test_sitofp_s1_to_s64
body: |
  bb.0:
    liveins: $vgpr0

    ; GFX6-LABEL: name: test_sitofp_s1_to_s64
    ; GFX6: liveins: $vgpr0
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX6-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
    ; GFX8-LABEL: name: test_sitofp_s1_to_s64
    ; GFX8: liveins: $vgpr0
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
    ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[C]], [[C1]]
    ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
    %0:_(s32) = COPY $vgpr0
    %1:_(s1) = G_TRUNC %0
    %2:_(s64) = G_SITOFP %1
    $vgpr0_vgpr1 = COPY %2
...

---
name: test_sitofp_s33_to_s32
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_s33_to_s32
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 33
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX6-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX6-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX6-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX6-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[UMIN]](s32)
    ; GFX6-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX6-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX6-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX6-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[INT1]](s32)
    ; GFX8-LABEL: name: test_sitofp_s33_to_s32
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 33
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX8-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX8-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[UMIN]](s32)
    ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX8-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX8-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[INT1]](s32)
    %0:_(s64) = COPY $vgpr0_vgpr1
    %1:_(s33) = G_TRUNC %0
    %2:_(s32) = G_SITOFP %1
    $vgpr0 = COPY %2
...

---
name: test_sitofp_s64_to_s16
body: |
  bb.0:
    liveins: $vgpr0_vgpr1

    ; GFX6-LABEL: name: test_sitofp_s64_to_s16
    ; GFX6: liveins: $vgpr0_vgpr1
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX6-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX6-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX6-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX6-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[UMIN]](s32)
    ; GFX6-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX6-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX6-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX6-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX6-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
    ; GFX6-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
    ; GFX6-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    ; GFX8-LABEL: name: test_sitofp_s64_to_s16
    ; GFX8: liveins: $vgpr0_vgpr1
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]]
    ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX8-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32)
    ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX8-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[UMIN]](s32)
    ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX8-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV2]]
    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[UMIN1]]
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX8-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX8-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
    ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
    ; GFX8-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
    %0:_(s64) = COPY $vgpr0_vgpr1
    %1:_(s16) = G_SITOFP %0
    %2:_(s32) = G_ANYEXT %1
    $vgpr0 = COPY %2
...

---
name: test_sitofp_v2s64_to_v2s16
body: |
  bb.0:
    liveins: $vgpr0_vgpr1_vgpr2_vgpr3

    ; GFX6-LABEL: name: test_sitofp_v2s64_to_v2s16
    ; GFX6: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
    ; GFX6-NEXT: {{  $}}
    ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
    ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
    ; GFX6-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
    ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX6-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX6-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32)
    ; GFX6-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX6-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX6-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[UMIN]](s32)
    ; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX6-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV4]]
    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[UMIN1]]
    ; GFX6-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX6-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX6-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX6-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
    ; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
    ; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]]
    ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[XOR1]], [[C2]](s32)
    ; GFX6-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR1]]
    ; GFX6-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32)
    ; GFX6-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[INT2]], [[C1]]
    ; GFX6-NEXT: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[SUB2]], [[ADD1]]
    ; GFX6-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UMIN2]](s32)
    ; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
    ; GFX6-NEXT: [[UMIN3:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV8]]
    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[UMIN3]]
    ; GFX6-NEXT: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32)
    ; GFX6-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN2]]
    ; GFX6-NEXT: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32)
    ; GFX6-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32)
    ; GFX6-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
    ; GFX6-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
    ; GFX6-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
    ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
    ; GFX6-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
    ; GFX6-NEXT: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
    ; GFX8-LABEL: name: test_sitofp_v2s64_to_v2s16
    ; GFX8: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
    ; GFX8-NEXT: {{  $}}
    ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
    ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
    ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
    ; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]]
    ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[XOR]], [[C2]](s32)
    ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR]]
    ; GFX8-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32)
    ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT]], [[C1]]
    ; GFX8-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SUB]], [[ADD]]
    ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[UMIN]](s32)
    ; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64)
    ; GFX8-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV4]]
    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[UMIN1]]
    ; GFX8-NEXT: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32)
    ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN]]
    ; GFX8-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32)
    ; GFX8-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32)
    ; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
    ; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]]
    ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[XOR1]], [[C2]](s32)
    ; GFX8-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[C]], [[ASHR1]]
    ; GFX8-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32)
    ; GFX8-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[INT2]], [[C1]]
    ; GFX8-NEXT: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[SUB2]], [[ADD1]]
    ; GFX8-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UMIN2]](s32)
    ; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64)
    ; GFX8-NEXT: [[UMIN3:%[0-9]+]]:_(s32) = G_UMIN [[C1]], [[UV8]]
    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[UMIN3]]
    ; GFX8-NEXT: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32)
    ; GFX8-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UMIN2]]
    ; GFX8-NEXT: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32)
    ; GFX8-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32)
    ; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16)
    ; GFX8-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16)
    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
    ; GFX8-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
    ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
    ; GFX8-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
    ; GFX8-NEXT: $vgpr0 = COPY [[BITCAST]](<2 x s16>)
    %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
    %1:_(<2 x s16>) = G_SITOFP %0
    $vgpr0 = COPY %1
...