Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --force-update
; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -amdgpu-codegenprepare -amdgpu-bypass-slow-div=0 %s | FileCheck %s
; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti -amdgpu-bypass-slow-div=0 < %s | FileCheck -check-prefix=GFX6 %s
; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 -amdgpu-bypass-slow-div=0 < %s | FileCheck -check-prefix=GFX9 %s

define amdgpu_kernel void @udiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @udiv_i32(
; CHECK-NEXT:    [[TMP1:%.*]] = uitofp i32 [[Y:%.*]] to float
; CHECK-NEXT:    [[TMP2:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP1]])
; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast float [[TMP2]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP4:%.*]] = fptoui float [[TMP3]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = sub i32 0, [[Y]]
; CHECK-NEXT:    [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP4]]
; CHECK-NEXT:    [[TMP7:%.*]] = zext i32 [[TMP4]] to i64
; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP7]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = lshr i64 [[TMP9]], 32
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = add i32 [[TMP4]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[X:%.*]] to i64
; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = mul i32 [[TMP19]], [[Y]]
; CHECK-NEXT:    [[TMP21:%.*]] = sub i32 [[X]], [[TMP20]]
; CHECK-NEXT:    [[TMP22:%.*]] = icmp uge i32 [[TMP21]], [[Y]]
; CHECK-NEXT:    [[TMP23:%.*]] = add i32 [[TMP19]], 1
; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP22]], i32 [[TMP23]], i32 [[TMP19]]
; CHECK-NEXT:    [[TMP25:%.*]] = sub i32 [[TMP21]], [[Y]]
; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP22]], i32 [[TMP25]], i32 [[TMP21]]
; CHECK-NEXT:    [[TMP27:%.*]] = icmp uge i32 [[TMP26]], [[Y]]
; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP24]], 1
; CHECK-NEXT:    [[TMP29:%.*]] = select i1 [[TMP27]], i32 [[TMP28]], i32 [[TMP24]]
; CHECK-NEXT:    store i32 [[TMP29]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX6-NEXT:    s_sub_i32 s4, 0, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s4, v0
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_mul_hi_u32 v0, s2, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s2, v1
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s3, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s4, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, s4, v0
; GFX9-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_mul_hi_u32 v0, s2, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_sub_u32_e32 v1, s2, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v3, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv i32 %x, %y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @urem_i32(
; CHECK-NEXT:    [[TMP1:%.*]] = uitofp i32 [[Y:%.*]] to float
; CHECK-NEXT:    [[TMP2:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP1]])
; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast float [[TMP2]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP4:%.*]] = fptoui float [[TMP3]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = sub i32 0, [[Y]]
; CHECK-NEXT:    [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP4]]
; CHECK-NEXT:    [[TMP7:%.*]] = zext i32 [[TMP4]] to i64
; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP7]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = lshr i64 [[TMP9]], 32
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = add i32 [[TMP4]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[X:%.*]] to i64
; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = mul i32 [[TMP19]], [[Y]]
; CHECK-NEXT:    [[TMP21:%.*]] = sub i32 [[X]], [[TMP20]]
; CHECK-NEXT:    [[TMP22:%.*]] = icmp uge i32 [[TMP21]], [[Y]]
; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP21]], [[Y]]
; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP22]], i32 [[TMP23]], i32 [[TMP21]]
; CHECK-NEXT:    [[TMP25:%.*]] = icmp uge i32 [[TMP24]], [[Y]]
; CHECK-NEXT:    [[TMP26:%.*]] = sub i32 [[TMP24]], [[Y]]
; CHECK-NEXT:    [[TMP27:%.*]] = select i1 [[TMP25]], i32 [[TMP26]], i32 [[TMP24]]
; CHECK-NEXT:    store i32 [[TMP27]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s5
; GFX6-NEXT:    s_sub_i32 s2, 0, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s2, v0
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s5
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s5, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s5, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s5, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s5, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s4, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s5, v0
; GFX9-NEXT:    s_mul_i32 s4, s4, s5
; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
; GFX9-NEXT:    s_add_i32 s5, s5, s4
; GFX9-NEXT:    s_mul_hi_u32 s4, s2, s5
; GFX9-NEXT:    s_mul_i32 s4, s4, s3
; GFX9-NEXT:    s_sub_i32 s2, s2, s4
; GFX9-NEXT:    s_sub_i32 s4, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s4, s2
; GFX9-NEXT:    s_sub_i32 s4, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s4, s2
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem i32 %x, %y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @sdiv_i32(
; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[Y:%.*]], 31
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[X]], [[TMP1]]
; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[Y]], [[TMP2]]
; CHECK-NEXT:    [[TMP6:%.*]] = xor i32 [[TMP4]], [[TMP1]]
; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], [[TMP2]]
; CHECK-NEXT:    [[TMP8:%.*]] = uitofp i32 [[TMP7]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fmul fast float [[TMP9]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP11:%.*]] = fptoui float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP12:%.*]] = sub i32 0, [[TMP7]]
; CHECK-NEXT:    [[TMP13:%.*]] = mul i32 [[TMP12]], [[TMP11]]
; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
; CHECK-NEXT:    [[TMP18:%.*]] = lshr i64 [[TMP16]], 32
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP11]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP20]] to i64
; CHECK-NEXT:    [[TMP23:%.*]] = mul i64 [[TMP21]], [[TMP22]]
; CHECK-NEXT:    [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
; CHECK-NEXT:    [[TMP25:%.*]] = lshr i64 [[TMP23]], 32
; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = mul i32 [[TMP26]], [[TMP7]]
; CHECK-NEXT:    [[TMP28:%.*]] = sub i32 [[TMP6]], [[TMP27]]
; CHECK-NEXT:    [[TMP29:%.*]] = icmp uge i32 [[TMP28]], [[TMP7]]
; CHECK-NEXT:    [[TMP30:%.*]] = add i32 [[TMP26]], 1
; CHECK-NEXT:    [[TMP31:%.*]] = select i1 [[TMP29]], i32 [[TMP30]], i32 [[TMP26]]
; CHECK-NEXT:    [[TMP32:%.*]] = sub i32 [[TMP28]], [[TMP7]]
; CHECK-NEXT:    [[TMP33:%.*]] = select i1 [[TMP29]], i32 [[TMP32]], i32 [[TMP28]]
; CHECK-NEXT:    [[TMP34:%.*]] = icmp uge i32 [[TMP33]], [[TMP7]]
; CHECK-NEXT:    [[TMP35:%.*]] = add i32 [[TMP31]], 1
; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP34]], i32 [[TMP35]], i32 [[TMP31]]
; CHECK-NEXT:    [[TMP37:%.*]] = xor i32 [[TMP36]], [[TMP3]]
; CHECK-NEXT:    [[TMP38:%.*]] = sub i32 [[TMP37]], [[TMP3]]
; CHECK-NEXT:    store i32 [[TMP38]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_i32 s3, s3, s8
; GFX6-NEXT:    s_xor_b32 s3, s3, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX6-NEXT:    s_sub_i32 s4, 0, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s4, v0
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_ashr_i32 s0, s2, 31
; GFX6-NEXT:    s_add_i32 s1, s2, s0
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    s_xor_b32 s2, s0, s8
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s1, v1
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s3, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_i32 s3, s3, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s5, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, s5, v0
; GFX9-NEXT:    s_ashr_i32 s5, s2, 31
; GFX9-NEXT:    s_add_i32 s2, s2, s5
; GFX9-NEXT:    s_xor_b32 s2, s2, s5
; GFX9-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX9-NEXT:    s_xor_b32 s4, s5, s4
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_mul_hi_u32 v0, s2, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_sub_u32_e32 v1, s2, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v3, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_xor_b32_e32 v0, s4, v0
; GFX9-NEXT:    v_subrev_u32_e32 v0, s4, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i32 %x, %y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @srem_i32(
; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[Y:%.*]], 31
; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[X]], [[TMP1]]
; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[Y]], [[TMP2]]
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP1]]
; CHECK-NEXT:    [[TMP6:%.*]] = xor i32 [[TMP4]], [[TMP2]]
; CHECK-NEXT:    [[TMP7:%.*]] = uitofp i32 [[TMP6]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP8]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = sub i32 0, [[TMP6]]
; CHECK-NEXT:    [[TMP12:%.*]] = mul i32 [[TMP11]], [[TMP10]]
; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT:    [[TMP14:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32
; CHECK-NEXT:    [[TMP17:%.*]] = lshr i64 [[TMP15]], 32
; CHECK-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
; CHECK-NEXT:    [[TMP19:%.*]] = add i32 [[TMP10]], [[TMP18]]
; CHECK-NEXT:    [[TMP20:%.*]] = zext i32 [[TMP5]] to i64
; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP19]] to i64
; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP20]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32
; CHECK-NEXT:    [[TMP24:%.*]] = lshr i64 [[TMP22]], 32
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = mul i32 [[TMP25]], [[TMP6]]
; CHECK-NEXT:    [[TMP27:%.*]] = sub i32 [[TMP5]], [[TMP26]]
; CHECK-NEXT:    [[TMP28:%.*]] = icmp uge i32 [[TMP27]], [[TMP6]]
; CHECK-NEXT:    [[TMP29:%.*]] = sub i32 [[TMP27]], [[TMP6]]
; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP28]], i32 [[TMP29]], i32 [[TMP27]]
; CHECK-NEXT:    [[TMP31:%.*]] = icmp uge i32 [[TMP30]], [[TMP6]]
; CHECK-NEXT:    [[TMP32:%.*]] = sub i32 [[TMP30]], [[TMP6]]
; CHECK-NEXT:    [[TMP33:%.*]] = select i1 [[TMP31]], i32 [[TMP32]], i32 [[TMP30]]
; CHECK-NEXT:    [[TMP34:%.*]] = xor i32 [[TMP33]], [[TMP1]]
; CHECK-NEXT:    [[TMP35:%.*]] = sub i32 [[TMP34]], [[TMP1]]
; CHECK-NEXT:    store i32 [[TMP35]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s4, s3, 31
; GFX6-NEXT:    s_add_i32 s3, s3, s4
; GFX6-NEXT:    s_xor_b32 s4, s3, s4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s4
; GFX6-NEXT:    s_sub_i32 s3, 0, s4
; GFX6-NEXT:    s_ashr_i32 s5, s2, 31
; GFX6-NEXT:    s_add_i32 s2, s2, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    s_xor_b32 s6, s2, s5
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v0
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s4
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s5, v0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_i32 s3, s3, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s5, 0, s3
; GFX9-NEXT:    s_ashr_i32 s4, s2, 31
; GFX9-NEXT:    s_add_i32 s2, s2, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_xor_b32 s2, s2, s4
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
; GFX9-NEXT:    s_mul_i32 s5, s5, s6
; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s5
; GFX9-NEXT:    s_add_i32 s6, s6, s5
; GFX9-NEXT:    s_mul_hi_u32 s5, s2, s6
; GFX9-NEXT:    s_mul_i32 s5, s5, s3
; GFX9-NEXT:    s_sub_i32 s2, s2, s5
; GFX9-NEXT:    s_sub_i32 s5, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
; GFX9-NEXT:    s_sub_i32 s5, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
; GFX9-NEXT:    s_xor_b32 s2, s2, s4
; GFX9-NEXT:    s_sub_i32 s2, s2, s4
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem i32 %x, %y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
; CHECK-LABEL: @udiv_i16(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 65535
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
; CHECK-NEXT:    store i16 [[TMP17]], i16 addrspace(1)* [[OUT:%.*]], align 2
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s2, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b32 s3, s2, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX6-NEXT:    s_and_b32 s2, s2, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s2
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v3, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b32 s3, s2, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s2, s2, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v4, vcc
; GFX9-NEXT:    global_store_short v3, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv i16 %x, %y
  store i16 %r, i16 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
; CHECK-LABEL: @urem_i16(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
; CHECK-NEXT:    store i16 [[TMP19]], i16 addrspace(1)* [[OUT:%.*]], align 2
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b32 s2, s4, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
; GFX6-NEXT:    s_and_b32 s3, s4, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s3
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b32 s3, s2, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s4, s2, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s4
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX9-NEXT:    v_sub_u32_e32 v0, s2, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_short v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem i16 %x, %y
  store i16 %r, i16 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
; CHECK-LABEL: @sdiv_i16(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 16
; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 16
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
; CHECK-NEXT:    store i16 [[TMP21]], i16 addrspace(1)* [[OUT:%.*]], align 2
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s5, s4, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s5
; GFX6-NEXT:    s_sext_i32_i16 s4, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s4
; GFX6-NEXT:    s_xor_b32 s4, s4, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s4
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s4, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_sext_i32_i16 s1, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v3
; GFX9-NEXT:    global_store_short v1, v0, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i16 %x, %y
  store i16 %r, i16 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
; CHECK-LABEL: @srem_i16(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i16 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 16
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 16
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
; CHECK-NEXT:    store i16 [[TMP23]], i16 addrspace(1)* [[OUT:%.*]], align 2
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s2, s4, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s2
; GFX6-NEXT:    s_sext_i32_i16 s3, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s3
; GFX6-NEXT:    s_xor_b32 s3, s3, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s3, s3, 30
; GFX6-NEXT:    s_or_b32 s3, s3, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s3
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s5, s4, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s5
; GFX9-NEXT:    s_sext_i32_i16 s2, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s2
; GFX9-NEXT:    s_xor_b32 s2, s2, s5
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    s_ashr_i32 s2, s2, 30
; GFX9-NEXT:    s_or_b32 s6, s2, 1
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], exec
; GFX9-NEXT:    s_cselect_b32 s2, s6, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s2, v2
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s5
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_sub_u32_e32 v0, s4, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_short v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem i16 %x, %y
  store i16 %r, i16 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
; CHECK-LABEL: @udiv_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 255
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
; CHECK-NEXT:    store i8 [[TMP17]], i8 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i8:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_ubyte1_e32 v0, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
; GFX6-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX6-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_ubyte1_e32 v0, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v3, s2
; GFX9-NEXT:    v_mul_f32_e32 v1, v3, v1
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v1
; GFX9-NEXT:    v_mad_f32 v1, -v1, v0, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v4, vcc
; GFX9-NEXT:    global_store_byte v2, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv i8 %x, %y
  store i8 %r, i8 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
; CHECK-LABEL: @urem_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 255
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i8
; CHECK-NEXT:    store i8 [[TMP19]], i8 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i8:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_ubyte1_e32 v0, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
; GFX6-NEXT:    s_lshr_b32 s2, s4, 8
; GFX6-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX6-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_ubyte1_e32 v0, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v2, s2
; GFX9-NEXT:    s_lshr_b32 s3, s2, 8
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX9-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX9-NEXT:    v_sub_u32_e32 v0, s2, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_byte v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem i8 %x, %y
  store i8 %r, i8 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
; CHECK-LABEL: @sdiv_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i8 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 24
; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 24
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i8
; CHECK-NEXT:    store i8 [[TMP21]], i8 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i8:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_i32 s5, s4, 0x80008
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s5
; GFX6-NEXT:    s_sext_i32_i8 s4, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s4
; GFX6-NEXT:    s_xor_b32 s4, s4, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s4
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s0, s4, 0x80008
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_sext_i32_i8 s1, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v3
; GFX9-NEXT:    global_store_byte v1, v0, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i8 %x, %y
  store i8 %r, i8 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
; CHECK-LABEL: @srem_i8(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i8 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 24
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 24
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i8
; CHECK-NEXT:    store i8 [[TMP23]], i8 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i8:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_i32 s2, s4, 0x80008
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s2
; GFX6-NEXT:    s_sext_i32_i8 s5, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s5
; GFX6-NEXT:    s_xor_b32 s2, s5, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s2, s2, 30
; GFX6-NEXT:    s_or_b32 s2, s2, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s2
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    s_lshr_b32 s3, s4, 8
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i8:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s0, s4, 0x80008
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_sext_i32_i8 s1, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_lshr_b32 s5, s4, 8
; GFX9-NEXT:    s_or_b32 s6, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s6, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v2
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s5
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_sub_u32_e32 v0, s4, v0
; GFX9-NEXT:    global_store_byte v1, v0, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = srem i8 %x, %y
  store i8 %r, i8 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @udiv_v4i32(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP3]])
; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 0, [[TMP2]]
; CHECK-NEXT:    [[TMP8:%.*]] = mul i32 [[TMP7]], [[TMP6]]
; CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP6]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = mul i32 [[TMP21]], [[TMP2]]
; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP1]], [[TMP22]]
; CHECK-NEXT:    [[TMP24:%.*]] = icmp uge i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP25:%.*]] = add i32 [[TMP21]], 1
; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP24]], i32 [[TMP25]], i32 [[TMP21]]
; CHECK-NEXT:    [[TMP27:%.*]] = sub i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP24]], i32 [[TMP27]], i32 [[TMP23]]
; CHECK-NEXT:    [[TMP29:%.*]] = icmp uge i32 [[TMP28]], [[TMP2]]
; CHECK-NEXT:    [[TMP30:%.*]] = add i32 [[TMP26]], 1
; CHECK-NEXT:    [[TMP31:%.*]] = select i1 [[TMP29]], i32 [[TMP30]], i32 [[TMP26]]
; CHECK-NEXT:    [[TMP32:%.*]] = insertelement <4 x i32> undef, i32 [[TMP31]], i64 0
; CHECK-NEXT:    [[TMP33:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <4 x i32> [[Y]], i64 1
; CHECK-NEXT:    [[TMP35:%.*]] = uitofp i32 [[TMP34]] to float
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP36]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP38:%.*]] = fptoui float [[TMP37]] to i32
; CHECK-NEXT:    [[TMP39:%.*]] = sub i32 0, [[TMP34]]
; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP38]]
; CHECK-NEXT:    [[TMP41:%.*]] = zext i32 [[TMP38]] to i64
; CHECK-NEXT:    [[TMP42:%.*]] = zext i32 [[TMP40]] to i64
; CHECK-NEXT:    [[TMP43:%.*]] = mul i64 [[TMP41]], [[TMP42]]
; CHECK-NEXT:    [[TMP44:%.*]] = trunc i64 [[TMP43]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = lshr i64 [[TMP43]], 32
; CHECK-NEXT:    [[TMP46:%.*]] = trunc i64 [[TMP45]] to i32
; CHECK-NEXT:    [[TMP47:%.*]] = add i32 [[TMP38]], [[TMP46]]
; CHECK-NEXT:    [[TMP48:%.*]] = zext i32 [[TMP33]] to i64
; CHECK-NEXT:    [[TMP49:%.*]] = zext i32 [[TMP47]] to i64
; CHECK-NEXT:    [[TMP50:%.*]] = mul i64 [[TMP48]], [[TMP49]]
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i64 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = lshr i64 [[TMP50]], 32
; CHECK-NEXT:    [[TMP53:%.*]] = trunc i64 [[TMP52]] to i32
; CHECK-NEXT:    [[TMP54:%.*]] = mul i32 [[TMP53]], [[TMP34]]
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 [[TMP33]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = icmp uge i32 [[TMP55]], [[TMP34]]
; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP53]], 1
; CHECK-NEXT:    [[TMP58:%.*]] = select i1 [[TMP56]], i32 [[TMP57]], i32 [[TMP53]]
; CHECK-NEXT:    [[TMP59:%.*]] = sub i32 [[TMP55]], [[TMP34]]
; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP56]], i32 [[TMP59]], i32 [[TMP55]]
; CHECK-NEXT:    [[TMP61:%.*]] = icmp uge i32 [[TMP60]], [[TMP34]]
; CHECK-NEXT:    [[TMP62:%.*]] = add i32 [[TMP58]], 1
; CHECK-NEXT:    [[TMP63:%.*]] = select i1 [[TMP61]], i32 [[TMP62]], i32 [[TMP58]]
; CHECK-NEXT:    [[TMP64:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP63]], i64 1
; CHECK-NEXT:    [[TMP65:%.*]] = extractelement <4 x i32> [[X]], i64 2
; CHECK-NEXT:    [[TMP66:%.*]] = extractelement <4 x i32> [[Y]], i64 2
; CHECK-NEXT:    [[TMP67:%.*]] = uitofp i32 [[TMP66]] to float
; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP67]])
; CHECK-NEXT:    [[TMP69:%.*]] = fmul fast float [[TMP68]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP70:%.*]] = fptoui float [[TMP69]] to i32
; CHECK-NEXT:    [[TMP71:%.*]] = sub i32 0, [[TMP66]]
; CHECK-NEXT:    [[TMP72:%.*]] = mul i32 [[TMP71]], [[TMP70]]
; CHECK-NEXT:    [[TMP73:%.*]] = zext i32 [[TMP70]] to i64
; CHECK-NEXT:    [[TMP74:%.*]] = zext i32 [[TMP72]] to i64
; CHECK-NEXT:    [[TMP75:%.*]] = mul i64 [[TMP73]], [[TMP74]]
; CHECK-NEXT:    [[TMP76:%.*]] = trunc i64 [[TMP75]] to i32
; CHECK-NEXT:    [[TMP77:%.*]] = lshr i64 [[TMP75]], 32
; CHECK-NEXT:    [[TMP78:%.*]] = trunc i64 [[TMP77]] to i32
; CHECK-NEXT:    [[TMP79:%.*]] = add i32 [[TMP70]], [[TMP78]]
; CHECK-NEXT:    [[TMP80:%.*]] = zext i32 [[TMP65]] to i64
; CHECK-NEXT:    [[TMP81:%.*]] = zext i32 [[TMP79]] to i64
; CHECK-NEXT:    [[TMP82:%.*]] = mul i64 [[TMP80]], [[TMP81]]
; CHECK-NEXT:    [[TMP83:%.*]] = trunc i64 [[TMP82]] to i32
; CHECK-NEXT:    [[TMP84:%.*]] = lshr i64 [[TMP82]], 32
; CHECK-NEXT:    [[TMP85:%.*]] = trunc i64 [[TMP84]] to i32
; CHECK-NEXT:    [[TMP86:%.*]] = mul i32 [[TMP85]], [[TMP66]]
; CHECK-NEXT:    [[TMP87:%.*]] = sub i32 [[TMP65]], [[TMP86]]
; CHECK-NEXT:    [[TMP88:%.*]] = icmp uge i32 [[TMP87]], [[TMP66]]
; CHECK-NEXT:    [[TMP89:%.*]] = add i32 [[TMP85]], 1
; CHECK-NEXT:    [[TMP90:%.*]] = select i1 [[TMP88]], i32 [[TMP89]], i32 [[TMP85]]
; CHECK-NEXT:    [[TMP91:%.*]] = sub i32 [[TMP87]], [[TMP66]]
; CHECK-NEXT:    [[TMP92:%.*]] = select i1 [[TMP88]], i32 [[TMP91]], i32 [[TMP87]]
; CHECK-NEXT:    [[TMP93:%.*]] = icmp uge i32 [[TMP92]], [[TMP66]]
; CHECK-NEXT:    [[TMP94:%.*]] = add i32 [[TMP90]], 1
; CHECK-NEXT:    [[TMP95:%.*]] = select i1 [[TMP93]], i32 [[TMP94]], i32 [[TMP90]]
; CHECK-NEXT:    [[TMP96:%.*]] = insertelement <4 x i32> [[TMP64]], i32 [[TMP95]], i64 2
; CHECK-NEXT:    [[TMP97:%.*]] = extractelement <4 x i32> [[X]], i64 3
; CHECK-NEXT:    [[TMP98:%.*]] = extractelement <4 x i32> [[Y]], i64 3
; CHECK-NEXT:    [[TMP99:%.*]] = uitofp i32 [[TMP98]] to float
; CHECK-NEXT:    [[TMP100:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP99]])
; CHECK-NEXT:    [[TMP101:%.*]] = fmul fast float [[TMP100]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP102:%.*]] = fptoui float [[TMP101]] to i32
; CHECK-NEXT:    [[TMP103:%.*]] = sub i32 0, [[TMP98]]
; CHECK-NEXT:    [[TMP104:%.*]] = mul i32 [[TMP103]], [[TMP102]]
; CHECK-NEXT:    [[TMP105:%.*]] = zext i32 [[TMP102]] to i64
; CHECK-NEXT:    [[TMP106:%.*]] = zext i32 [[TMP104]] to i64
; CHECK-NEXT:    [[TMP107:%.*]] = mul i64 [[TMP105]], [[TMP106]]
; CHECK-NEXT:    [[TMP108:%.*]] = trunc i64 [[TMP107]] to i32
; CHECK-NEXT:    [[TMP109:%.*]] = lshr i64 [[TMP107]], 32
; CHECK-NEXT:    [[TMP110:%.*]] = trunc i64 [[TMP109]] to i32
; CHECK-NEXT:    [[TMP111:%.*]] = add i32 [[TMP102]], [[TMP110]]
; CHECK-NEXT:    [[TMP112:%.*]] = zext i32 [[TMP97]] to i64
; CHECK-NEXT:    [[TMP113:%.*]] = zext i32 [[TMP111]] to i64
; CHECK-NEXT:    [[TMP114:%.*]] = mul i64 [[TMP112]], [[TMP113]]
; CHECK-NEXT:    [[TMP115:%.*]] = trunc i64 [[TMP114]] to i32
; CHECK-NEXT:    [[TMP116:%.*]] = lshr i64 [[TMP114]], 32
; CHECK-NEXT:    [[TMP117:%.*]] = trunc i64 [[TMP116]] to i32
; CHECK-NEXT:    [[TMP118:%.*]] = mul i32 [[TMP117]], [[TMP98]]
; CHECK-NEXT:    [[TMP119:%.*]] = sub i32 [[TMP97]], [[TMP118]]
; CHECK-NEXT:    [[TMP120:%.*]] = icmp uge i32 [[TMP119]], [[TMP98]]
; CHECK-NEXT:    [[TMP121:%.*]] = add i32 [[TMP117]], 1
; CHECK-NEXT:    [[TMP122:%.*]] = select i1 [[TMP120]], i32 [[TMP121]], i32 [[TMP117]]
; CHECK-NEXT:    [[TMP123:%.*]] = sub i32 [[TMP119]], [[TMP98]]
; CHECK-NEXT:    [[TMP124:%.*]] = select i1 [[TMP120]], i32 [[TMP123]], i32 [[TMP119]]
; CHECK-NEXT:    [[TMP125:%.*]] = icmp uge i32 [[TMP124]], [[TMP98]]
; CHECK-NEXT:    [[TMP126:%.*]] = add i32 [[TMP122]], 1
; CHECK-NEXT:    [[TMP127:%.*]] = select i1 [[TMP125]], i32 [[TMP126]], i32 [[TMP122]]
; CHECK-NEXT:    [[TMP128:%.*]] = insertelement <4 x i32> [[TMP96]], i32 [[TMP127]], i64 3
; CHECK-NEXT:    store <4 x i32> [[TMP128]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v4i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s15, 0xf000
; GFX6-NEXT:    s_mov_b32 s14, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    s_sub_i32 s2, 0, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s10
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s11
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v0
; GFX6-NEXT:    s_sub_i32 s2, 0, s9
; GFX6-NEXT:    v_mul_lo_u32 v3, s2, v1
; GFX6-NEXT:    s_sub_i32 s2, 0, s10
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s8
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
; GFX6-NEXT:    v_mul_lo_u32 v5, v1, s9
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s4, v2
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v4
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s5, v5
; GFX6-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v1
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
; GFX6-NEXT:    v_mul_lo_u32 v4, s2, v2
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s9, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
; GFX6-NEXT:    v_mul_hi_u32 v4, v2, v4
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v1
; GFX6-NEXT:    s_sub_i32 s0, 0, s11
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v6
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
; GFX6-NEXT:    v_mul_hi_u32 v2, s6, v2
; GFX6-NEXT:    v_mul_f32_e32 v4, 0x4f7ffffe, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    v_mul_lo_u32 v3, v2, s10
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v4
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s6, v3
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, v4, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v6, vcc, s10, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, s7, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v4, s11
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v4
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s7, v6
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s11, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v4, v5, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s11, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v4
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s11, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v4i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT:    s_sub_i32 s2, 0, s8
; GFX9-NEXT:    s_sub_i32 s3, 0, s9
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s10
; GFX9-NEXT:    v_cvt_f32_u32_e32 v6, s11
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v5
; GFX9-NEXT:    v_mul_lo_u32 v2, s2, v0
; GFX9-NEXT:    s_sub_i32 s2, 0, s10
; GFX9-NEXT:    v_mul_lo_u32 v3, s3, v1
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v6
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX9-NEXT:    v_mul_f32_e32 v6, 0x4f7ffffe, v6
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v5
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, s8
; GFX9-NEXT:    v_add_u32_e32 v7, 1, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v6, v6
; GFX9-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX9-NEXT:    v_sub_u32_e32 v3, s4, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v7, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v7, s8, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
; GFX9-NEXT:    v_mul_lo_u32 v3, s2, v2
; GFX9-NEXT:    s_sub_i32 s2, 0, s11
; GFX9-NEXT:    v_mul_lo_u32 v5, v1, s9
; GFX9-NEXT:    v_add_u32_e32 v7, 1, v0
; GFX9-NEXT:    v_mul_hi_u32 v3, v2, v3
; GFX9-NEXT:    v_add_u32_e32 v8, 1, v1
; GFX9-NEXT:    v_sub_u32_e32 v5, s5, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v7, vcc
; GFX9-NEXT:    v_add_u32_e32 v2, v2, v3
; GFX9-NEXT:    v_mul_lo_u32 v3, s2, v6
; GFX9-NEXT:    v_mul_hi_u32 v2, s6, v2
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
; GFX9-NEXT:    v_mul_hi_u32 v3, v6, v3
; GFX9-NEXT:    v_mul_lo_u32 v8, v2, s10
; GFX9-NEXT:    v_subrev_u32_e32 v7, s9, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX9-NEXT:    v_add_u32_e32 v3, v6, v3
; GFX9-NEXT:    v_mul_hi_u32 v3, s7, v3
; GFX9-NEXT:    v_add_u32_e32 v7, 1, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v5
; GFX9-NEXT:    v_sub_u32_e32 v5, s6, v8
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s10, v5
; GFX9-NEXT:    v_subrev_u32_e32 v6, s10, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX9-NEXT:    v_mul_lo_u32 v6, v3, s11
; GFX9-NEXT:    v_add_u32_e32 v7, 1, v2
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc
; GFX9-NEXT:    v_add_u32_e32 v7, 1, v2
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s10, v5
; GFX9-NEXT:    v_sub_u32_e32 v5, s7, v6
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s11, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v6, s11, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s11, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv <4 x i32> %x, %y
  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @urem_v4i32(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP3]])
; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 0, [[TMP2]]
; CHECK-NEXT:    [[TMP8:%.*]] = mul i32 [[TMP7]], [[TMP6]]
; CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP6]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = mul i32 [[TMP21]], [[TMP2]]
; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP1]], [[TMP22]]
; CHECK-NEXT:    [[TMP24:%.*]] = icmp uge i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP25:%.*]] = sub i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP24]], i32 [[TMP25]], i32 [[TMP23]]
; CHECK-NEXT:    [[TMP27:%.*]] = icmp uge i32 [[TMP26]], [[TMP2]]
; CHECK-NEXT:    [[TMP28:%.*]] = sub i32 [[TMP26]], [[TMP2]]
; CHECK-NEXT:    [[TMP29:%.*]] = select i1 [[TMP27]], i32 [[TMP28]], i32 [[TMP26]]
; CHECK-NEXT:    [[TMP30:%.*]] = insertelement <4 x i32> undef, i32 [[TMP29]], i64 0
; CHECK-NEXT:    [[TMP31:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP32:%.*]] = extractelement <4 x i32> [[Y]], i64 1
; CHECK-NEXT:    [[TMP33:%.*]] = uitofp i32 [[TMP32]] to float
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP34]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP36:%.*]] = fptoui float [[TMP35]] to i32
; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 0, [[TMP32]]
; CHECK-NEXT:    [[TMP38:%.*]] = mul i32 [[TMP37]], [[TMP36]]
; CHECK-NEXT:    [[TMP39:%.*]] = zext i32 [[TMP36]] to i64
; CHECK-NEXT:    [[TMP40:%.*]] = zext i32 [[TMP38]] to i64
; CHECK-NEXT:    [[TMP41:%.*]] = mul i64 [[TMP39]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = trunc i64 [[TMP41]] to i32
; CHECK-NEXT:    [[TMP43:%.*]] = lshr i64 [[TMP41]], 32
; CHECK-NEXT:    [[TMP44:%.*]] = trunc i64 [[TMP43]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = add i32 [[TMP36]], [[TMP44]]
; CHECK-NEXT:    [[TMP46:%.*]] = zext i32 [[TMP31]] to i64
; CHECK-NEXT:    [[TMP47:%.*]] = zext i32 [[TMP45]] to i64
; CHECK-NEXT:    [[TMP48:%.*]] = mul i64 [[TMP46]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = trunc i64 [[TMP48]] to i32
; CHECK-NEXT:    [[TMP50:%.*]] = lshr i64 [[TMP48]], 32
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i64 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = mul i32 [[TMP51]], [[TMP32]]
; CHECK-NEXT:    [[TMP53:%.*]] = sub i32 [[TMP31]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = icmp uge i32 [[TMP53]], [[TMP32]]
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 [[TMP53]], [[TMP32]]
; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP54]], i32 [[TMP55]], i32 [[TMP53]]
; CHECK-NEXT:    [[TMP57:%.*]] = icmp uge i32 [[TMP56]], [[TMP32]]
; CHECK-NEXT:    [[TMP58:%.*]] = sub i32 [[TMP56]], [[TMP32]]
; CHECK-NEXT:    [[TMP59:%.*]] = select i1 [[TMP57]], i32 [[TMP58]], i32 [[TMP56]]
; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP59]], i64 1
; CHECK-NEXT:    [[TMP61:%.*]] = extractelement <4 x i32> [[X]], i64 2
; CHECK-NEXT:    [[TMP62:%.*]] = extractelement <4 x i32> [[Y]], i64 2
; CHECK-NEXT:    [[TMP63:%.*]] = uitofp i32 [[TMP62]] to float
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP63]])
; CHECK-NEXT:    [[TMP65:%.*]] = fmul fast float [[TMP64]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP66:%.*]] = fptoui float [[TMP65]] to i32
; CHECK-NEXT:    [[TMP67:%.*]] = sub i32 0, [[TMP62]]
; CHECK-NEXT:    [[TMP68:%.*]] = mul i32 [[TMP67]], [[TMP66]]
; CHECK-NEXT:    [[TMP69:%.*]] = zext i32 [[TMP66]] to i64
; CHECK-NEXT:    [[TMP70:%.*]] = zext i32 [[TMP68]] to i64
; CHECK-NEXT:    [[TMP71:%.*]] = mul i64 [[TMP69]], [[TMP70]]
; CHECK-NEXT:    [[TMP72:%.*]] = trunc i64 [[TMP71]] to i32
; CHECK-NEXT:    [[TMP73:%.*]] = lshr i64 [[TMP71]], 32
; CHECK-NEXT:    [[TMP74:%.*]] = trunc i64 [[TMP73]] to i32
; CHECK-NEXT:    [[TMP75:%.*]] = add i32 [[TMP66]], [[TMP74]]
; CHECK-NEXT:    [[TMP76:%.*]] = zext i32 [[TMP61]] to i64
; CHECK-NEXT:    [[TMP77:%.*]] = zext i32 [[TMP75]] to i64
; CHECK-NEXT:    [[TMP78:%.*]] = mul i64 [[TMP76]], [[TMP77]]
; CHECK-NEXT:    [[TMP79:%.*]] = trunc i64 [[TMP78]] to i32
; CHECK-NEXT:    [[TMP80:%.*]] = lshr i64 [[TMP78]], 32
; CHECK-NEXT:    [[TMP81:%.*]] = trunc i64 [[TMP80]] to i32
; CHECK-NEXT:    [[TMP82:%.*]] = mul i32 [[TMP81]], [[TMP62]]
; CHECK-NEXT:    [[TMP83:%.*]] = sub i32 [[TMP61]], [[TMP82]]
; CHECK-NEXT:    [[TMP84:%.*]] = icmp uge i32 [[TMP83]], [[TMP62]]
; CHECK-NEXT:    [[TMP85:%.*]] = sub i32 [[TMP83]], [[TMP62]]
; CHECK-NEXT:    [[TMP86:%.*]] = select i1 [[TMP84]], i32 [[TMP85]], i32 [[TMP83]]
; CHECK-NEXT:    [[TMP87:%.*]] = icmp uge i32 [[TMP86]], [[TMP62]]
; CHECK-NEXT:    [[TMP88:%.*]] = sub i32 [[TMP86]], [[TMP62]]
; CHECK-NEXT:    [[TMP89:%.*]] = select i1 [[TMP87]], i32 [[TMP88]], i32 [[TMP86]]
; CHECK-NEXT:    [[TMP90:%.*]] = insertelement <4 x i32> [[TMP60]], i32 [[TMP89]], i64 2
; CHECK-NEXT:    [[TMP91:%.*]] = extractelement <4 x i32> [[X]], i64 3
; CHECK-NEXT:    [[TMP92:%.*]] = extractelement <4 x i32> [[Y]], i64 3
; CHECK-NEXT:    [[TMP93:%.*]] = uitofp i32 [[TMP92]] to float
; CHECK-NEXT:    [[TMP94:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP93]])
; CHECK-NEXT:    [[TMP95:%.*]] = fmul fast float [[TMP94]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP96:%.*]] = fptoui float [[TMP95]] to i32
; CHECK-NEXT:    [[TMP97:%.*]] = sub i32 0, [[TMP92]]
; CHECK-NEXT:    [[TMP98:%.*]] = mul i32 [[TMP97]], [[TMP96]]
; CHECK-NEXT:    [[TMP99:%.*]] = zext i32 [[TMP96]] to i64
; CHECK-NEXT:    [[TMP100:%.*]] = zext i32 [[TMP98]] to i64
; CHECK-NEXT:    [[TMP101:%.*]] = mul i64 [[TMP99]], [[TMP100]]
; CHECK-NEXT:    [[TMP102:%.*]] = trunc i64 [[TMP101]] to i32
; CHECK-NEXT:    [[TMP103:%.*]] = lshr i64 [[TMP101]], 32
; CHECK-NEXT:    [[TMP104:%.*]] = trunc i64 [[TMP103]] to i32
; CHECK-NEXT:    [[TMP105:%.*]] = add i32 [[TMP96]], [[TMP104]]
; CHECK-NEXT:    [[TMP106:%.*]] = zext i32 [[TMP91]] to i64
; CHECK-NEXT:    [[TMP107:%.*]] = zext i32 [[TMP105]] to i64
; CHECK-NEXT:    [[TMP108:%.*]] = mul i64 [[TMP106]], [[TMP107]]
; CHECK-NEXT:    [[TMP109:%.*]] = trunc i64 [[TMP108]] to i32
; CHECK-NEXT:    [[TMP110:%.*]] = lshr i64 [[TMP108]], 32
; CHECK-NEXT:    [[TMP111:%.*]] = trunc i64 [[TMP110]] to i32
; CHECK-NEXT:    [[TMP112:%.*]] = mul i32 [[TMP111]], [[TMP92]]
; CHECK-NEXT:    [[TMP113:%.*]] = sub i32 [[TMP91]], [[TMP112]]
; CHECK-NEXT:    [[TMP114:%.*]] = icmp uge i32 [[TMP113]], [[TMP92]]
; CHECK-NEXT:    [[TMP115:%.*]] = sub i32 [[TMP113]], [[TMP92]]
; CHECK-NEXT:    [[TMP116:%.*]] = select i1 [[TMP114]], i32 [[TMP115]], i32 [[TMP113]]
; CHECK-NEXT:    [[TMP117:%.*]] = icmp uge i32 [[TMP116]], [[TMP92]]
; CHECK-NEXT:    [[TMP118:%.*]] = sub i32 [[TMP116]], [[TMP92]]
; CHECK-NEXT:    [[TMP119:%.*]] = select i1 [[TMP117]], i32 [[TMP118]], i32 [[TMP116]]
; CHECK-NEXT:    [[TMP120:%.*]] = insertelement <4 x i32> [[TMP90]], i32 [[TMP119]], i64 3
; CHECK-NEXT:    store <4 x i32> [[TMP120]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v4i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    s_sub_i32 s12, 0, s8
; GFX6-NEXT:    s_sub_i32 s13, 0, s9
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s10
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s11
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v3
; GFX6-NEXT:    v_mul_lo_u32 v2, s12, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s13, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
; GFX6-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s8
; GFX6-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s9
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
; GFX6-NEXT:    s_sub_i32 s4, 0, s10
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v2
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s9, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, v2, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v5
; GFX6-NEXT:    s_sub_i32 s4, 0, s11
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_f32_e32 v3, 0x4f7ffffe, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s9, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, s6, v2
; GFX6-NEXT:    v_mul_lo_u32 v5, s4, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v2, s10
; GFX6-NEXT:    v_mul_hi_u32 v4, v3, v5
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, s7, v3
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s10, v2
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s11
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s10, v2
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s7, v3
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s11, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s11, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s11, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s11, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v4i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT:    s_sub_i32 s2, 0, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s10
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
; GFX9-NEXT:    s_mul_i32 s2, s2, s3
; GFX9-NEXT:    s_mul_hi_u32 s2, s3, s2
; GFX9-NEXT:    s_add_i32 s3, s3, s2
; GFX9-NEXT:    s_mul_hi_u32 s2, s4, s3
; GFX9-NEXT:    s_mul_i32 s2, s2, s8
; GFX9-NEXT:    s_sub_i32 s2, s4, s2
; GFX9-NEXT:    s_sub_i32 s3, s2, s8
; GFX9-NEXT:    s_cmp_ge_u32 s2, s8
; GFX9-NEXT:    s_cselect_b32 s2, s3, s2
; GFX9-NEXT:    s_sub_i32 s3, s2, s8
; GFX9-NEXT:    s_cmp_ge_u32 s2, s8
; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
; GFX9-NEXT:    s_cselect_b32 s2, s3, s2
; GFX9-NEXT:    s_sub_i32 s3, 0, s9
; GFX9-NEXT:    s_mul_i32 s3, s3, s12
; GFX9-NEXT:    s_mul_hi_u32 s3, s12, s3
; GFX9-NEXT:    s_add_i32 s12, s12, s3
; GFX9-NEXT:    s_mul_hi_u32 s3, s5, s12
; GFX9-NEXT:    s_mul_i32 s3, s3, s9
; GFX9-NEXT:    s_sub_i32 s3, s5, s3
; GFX9-NEXT:    s_sub_i32 s4, s3, s9
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    s_cmp_ge_u32 s3, s9
; GFX9-NEXT:    s_cselect_b32 s3, s4, s3
; GFX9-NEXT:    s_sub_i32 s4, s3, s9
; GFX9-NEXT:    s_cmp_ge_u32 s3, s9
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s11
; GFX9-NEXT:    s_cselect_b32 s3, s4, s3
; GFX9-NEXT:    s_sub_i32 s4, 0, s10
; GFX9-NEXT:    v_readfirstlane_b32 s5, v2
; GFX9-NEXT:    s_mul_i32 s4, s4, s5
; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
; GFX9-NEXT:    s_add_i32 s5, s5, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_mul_hi_u32 s4, s6, s5
; GFX9-NEXT:    s_mul_i32 s4, s4, s10
; GFX9-NEXT:    s_sub_i32 s4, s6, s4
; GFX9-NEXT:    s_sub_i32 s5, s4, s10
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    s_cmp_ge_u32 s4, s10
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
; GFX9-NEXT:    s_sub_i32 s5, s4, s10
; GFX9-NEXT:    s_cmp_ge_u32 s4, s10
; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
; GFX9-NEXT:    s_sub_i32 s5, 0, s11
; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
; GFX9-NEXT:    s_mul_i32 s5, s5, s6
; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s5
; GFX9-NEXT:    s_add_i32 s6, s6, s5
; GFX9-NEXT:    s_mul_hi_u32 s5, s7, s6
; GFX9-NEXT:    s_mul_i32 s5, s5, s11
; GFX9-NEXT:    s_sub_i32 s5, s7, s5
; GFX9-NEXT:    s_sub_i32 s6, s5, s11
; GFX9-NEXT:    s_cmp_ge_u32 s5, s11
; GFX9-NEXT:    s_cselect_b32 s5, s6, s5
; GFX9-NEXT:    s_sub_i32 s6, s5, s11
; GFX9-NEXT:    s_cmp_ge_u32 s5, s11
; GFX9-NEXT:    s_cselect_b32 s5, s6, s5
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem <4 x i32> %x, %y
  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @sdiv_v4i32(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP1]], [[TMP3]]
; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[TMP2]], [[TMP4]]
; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP3]]
; CHECK-NEXT:    [[TMP9:%.*]] = xor i32 [[TMP7]], [[TMP4]]
; CHECK-NEXT:    [[TMP10:%.*]] = uitofp i32 [[TMP9]] to float
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP10]])
; CHECK-NEXT:    [[TMP12:%.*]] = fmul fast float [[TMP11]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP13:%.*]] = fptoui float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = sub i32 0, [[TMP9]]
; CHECK-NEXT:    [[TMP15:%.*]] = mul i32 [[TMP14]], [[TMP13]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP13]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[TMP22]] to i64
; CHECK-NEXT:    [[TMP25:%.*]] = mul i64 [[TMP23]], [[TMP24]]
; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = lshr i64 [[TMP25]], 32
; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
; CHECK-NEXT:    [[TMP29:%.*]] = mul i32 [[TMP28]], [[TMP9]]
; CHECK-NEXT:    [[TMP30:%.*]] = sub i32 [[TMP8]], [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = icmp uge i32 [[TMP30]], [[TMP9]]
; CHECK-NEXT:    [[TMP32:%.*]] = add i32 [[TMP28]], 1
; CHECK-NEXT:    [[TMP33:%.*]] = select i1 [[TMP31]], i32 [[TMP32]], i32 [[TMP28]]
; CHECK-NEXT:    [[TMP34:%.*]] = sub i32 [[TMP30]], [[TMP9]]
; CHECK-NEXT:    [[TMP35:%.*]] = select i1 [[TMP31]], i32 [[TMP34]], i32 [[TMP30]]
; CHECK-NEXT:    [[TMP36:%.*]] = icmp uge i32 [[TMP35]], [[TMP9]]
; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP33]], 1
; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP36]], i32 [[TMP37]], i32 [[TMP33]]
; CHECK-NEXT:    [[TMP39:%.*]] = xor i32 [[TMP38]], [[TMP5]]
; CHECK-NEXT:    [[TMP40:%.*]] = sub i32 [[TMP39]], [[TMP5]]
; CHECK-NEXT:    [[TMP41:%.*]] = insertelement <4 x i32> undef, i32 [[TMP40]], i64 0
; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP43:%.*]] = extractelement <4 x i32> [[Y]], i64 1
; CHECK-NEXT:    [[TMP44:%.*]] = ashr i32 [[TMP42]], 31
; CHECK-NEXT:    [[TMP45:%.*]] = ashr i32 [[TMP43]], 31
; CHECK-NEXT:    [[TMP46:%.*]] = xor i32 [[TMP44]], [[TMP45]]
; CHECK-NEXT:    [[TMP47:%.*]] = add i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT:    [[TMP48:%.*]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT:    [[TMP49:%.*]] = xor i32 [[TMP47]], [[TMP44]]
; CHECK-NEXT:    [[TMP50:%.*]] = xor i32 [[TMP48]], [[TMP45]]
; CHECK-NEXT:    [[TMP51:%.*]] = uitofp i32 [[TMP50]] to float
; CHECK-NEXT:    [[TMP52:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP51]])
; CHECK-NEXT:    [[TMP53:%.*]] = fmul fast float [[TMP52]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP54:%.*]] = fptoui float [[TMP53]] to i32
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 0, [[TMP50]]
; CHECK-NEXT:    [[TMP56:%.*]] = mul i32 [[TMP55]], [[TMP54]]
; CHECK-NEXT:    [[TMP57:%.*]] = zext i32 [[TMP54]] to i64
; CHECK-NEXT:    [[TMP58:%.*]] = zext i32 [[TMP56]] to i64
; CHECK-NEXT:    [[TMP59:%.*]] = mul i64 [[TMP57]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = trunc i64 [[TMP59]] to i32
; CHECK-NEXT:    [[TMP61:%.*]] = lshr i64 [[TMP59]], 32
; CHECK-NEXT:    [[TMP62:%.*]] = trunc i64 [[TMP61]] to i32
; CHECK-NEXT:    [[TMP63:%.*]] = add i32 [[TMP54]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = zext i32 [[TMP49]] to i64
; CHECK-NEXT:    [[TMP65:%.*]] = zext i32 [[TMP63]] to i64
; CHECK-NEXT:    [[TMP66:%.*]] = mul i64 [[TMP64]], [[TMP65]]
; CHECK-NEXT:    [[TMP67:%.*]] = trunc i64 [[TMP66]] to i32
; CHECK-NEXT:    [[TMP68:%.*]] = lshr i64 [[TMP66]], 32
; CHECK-NEXT:    [[TMP69:%.*]] = trunc i64 [[TMP68]] to i32
; CHECK-NEXT:    [[TMP70:%.*]] = mul i32 [[TMP69]], [[TMP50]]
; CHECK-NEXT:    [[TMP71:%.*]] = sub i32 [[TMP49]], [[TMP70]]
; CHECK-NEXT:    [[TMP72:%.*]] = icmp uge i32 [[TMP71]], [[TMP50]]
; CHECK-NEXT:    [[TMP73:%.*]] = add i32 [[TMP69]], 1
; CHECK-NEXT:    [[TMP74:%.*]] = select i1 [[TMP72]], i32 [[TMP73]], i32 [[TMP69]]
; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP71]], [[TMP50]]
; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP72]], i32 [[TMP75]], i32 [[TMP71]]
; CHECK-NEXT:    [[TMP77:%.*]] = icmp uge i32 [[TMP76]], [[TMP50]]
; CHECK-NEXT:    [[TMP78:%.*]] = add i32 [[TMP74]], 1
; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP77]], i32 [[TMP78]], i32 [[TMP74]]
; CHECK-NEXT:    [[TMP80:%.*]] = xor i32 [[TMP79]], [[TMP46]]
; CHECK-NEXT:    [[TMP81:%.*]] = sub i32 [[TMP80]], [[TMP46]]
; CHECK-NEXT:    [[TMP82:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP81]], i64 1
; CHECK-NEXT:    [[TMP83:%.*]] = extractelement <4 x i32> [[X]], i64 2
; CHECK-NEXT:    [[TMP84:%.*]] = extractelement <4 x i32> [[Y]], i64 2
; CHECK-NEXT:    [[TMP85:%.*]] = ashr i32 [[TMP83]], 31
; CHECK-NEXT:    [[TMP86:%.*]] = ashr i32 [[TMP84]], 31
; CHECK-NEXT:    [[TMP87:%.*]] = xor i32 [[TMP85]], [[TMP86]]
; CHECK-NEXT:    [[TMP88:%.*]] = add i32 [[TMP83]], [[TMP85]]
; CHECK-NEXT:    [[TMP89:%.*]] = add i32 [[TMP84]], [[TMP86]]
; CHECK-NEXT:    [[TMP90:%.*]] = xor i32 [[TMP88]], [[TMP85]]
; CHECK-NEXT:    [[TMP91:%.*]] = xor i32 [[TMP89]], [[TMP86]]
; CHECK-NEXT:    [[TMP92:%.*]] = uitofp i32 [[TMP91]] to float
; CHECK-NEXT:    [[TMP93:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP92]])
; CHECK-NEXT:    [[TMP94:%.*]] = fmul fast float [[TMP93]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP95:%.*]] = fptoui float [[TMP94]] to i32
; CHECK-NEXT:    [[TMP96:%.*]] = sub i32 0, [[TMP91]]
; CHECK-NEXT:    [[TMP97:%.*]] = mul i32 [[TMP96]], [[TMP95]]
; CHECK-NEXT:    [[TMP98:%.*]] = zext i32 [[TMP95]] to i64
; CHECK-NEXT:    [[TMP99:%.*]] = zext i32 [[TMP97]] to i64
; CHECK-NEXT:    [[TMP100:%.*]] = mul i64 [[TMP98]], [[TMP99]]
; CHECK-NEXT:    [[TMP101:%.*]] = trunc i64 [[TMP100]] to i32
; CHECK-NEXT:    [[TMP102:%.*]] = lshr i64 [[TMP100]], 32
; CHECK-NEXT:    [[TMP103:%.*]] = trunc i64 [[TMP102]] to i32
; CHECK-NEXT:    [[TMP104:%.*]] = add i32 [[TMP95]], [[TMP103]]
; CHECK-NEXT:    [[TMP105:%.*]] = zext i32 [[TMP90]] to i64
; CHECK-NEXT:    [[TMP106:%.*]] = zext i32 [[TMP104]] to i64
; CHECK-NEXT:    [[TMP107:%.*]] = mul i64 [[TMP105]], [[TMP106]]
; CHECK-NEXT:    [[TMP108:%.*]] = trunc i64 [[TMP107]] to i32
; CHECK-NEXT:    [[TMP109:%.*]] = lshr i64 [[TMP107]], 32
; CHECK-NEXT:    [[TMP110:%.*]] = trunc i64 [[TMP109]] to i32
; CHECK-NEXT:    [[TMP111:%.*]] = mul i32 [[TMP110]], [[TMP91]]
; CHECK-NEXT:    [[TMP112:%.*]] = sub i32 [[TMP90]], [[TMP111]]
; CHECK-NEXT:    [[TMP113:%.*]] = icmp uge i32 [[TMP112]], [[TMP91]]
; CHECK-NEXT:    [[TMP114:%.*]] = add i32 [[TMP110]], 1
; CHECK-NEXT:    [[TMP115:%.*]] = select i1 [[TMP113]], i32 [[TMP114]], i32 [[TMP110]]
; CHECK-NEXT:    [[TMP116:%.*]] = sub i32 [[TMP112]], [[TMP91]]
; CHECK-NEXT:    [[TMP117:%.*]] = select i1 [[TMP113]], i32 [[TMP116]], i32 [[TMP112]]
; CHECK-NEXT:    [[TMP118:%.*]] = icmp uge i32 [[TMP117]], [[TMP91]]
; CHECK-NEXT:    [[TMP119:%.*]] = add i32 [[TMP115]], 1
; CHECK-NEXT:    [[TMP120:%.*]] = select i1 [[TMP118]], i32 [[TMP119]], i32 [[TMP115]]
; CHECK-NEXT:    [[TMP121:%.*]] = xor i32 [[TMP120]], [[TMP87]]
; CHECK-NEXT:    [[TMP122:%.*]] = sub i32 [[TMP121]], [[TMP87]]
; CHECK-NEXT:    [[TMP123:%.*]] = insertelement <4 x i32> [[TMP82]], i32 [[TMP122]], i64 2
; CHECK-NEXT:    [[TMP124:%.*]] = extractelement <4 x i32> [[X]], i64 3
; CHECK-NEXT:    [[TMP125:%.*]] = extractelement <4 x i32> [[Y]], i64 3
; CHECK-NEXT:    [[TMP126:%.*]] = ashr i32 [[TMP124]], 31
; CHECK-NEXT:    [[TMP127:%.*]] = ashr i32 [[TMP125]], 31
; CHECK-NEXT:    [[TMP128:%.*]] = xor i32 [[TMP126]], [[TMP127]]
; CHECK-NEXT:    [[TMP129:%.*]] = add i32 [[TMP124]], [[TMP126]]
; CHECK-NEXT:    [[TMP130:%.*]] = add i32 [[TMP125]], [[TMP127]]
; CHECK-NEXT:    [[TMP131:%.*]] = xor i32 [[TMP129]], [[TMP126]]
; CHECK-NEXT:    [[TMP132:%.*]] = xor i32 [[TMP130]], [[TMP127]]
; CHECK-NEXT:    [[TMP133:%.*]] = uitofp i32 [[TMP132]] to float
; CHECK-NEXT:    [[TMP134:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP133]])
; CHECK-NEXT:    [[TMP135:%.*]] = fmul fast float [[TMP134]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP136:%.*]] = fptoui float [[TMP135]] to i32
; CHECK-NEXT:    [[TMP137:%.*]] = sub i32 0, [[TMP132]]
; CHECK-NEXT:    [[TMP138:%.*]] = mul i32 [[TMP137]], [[TMP136]]
; CHECK-NEXT:    [[TMP139:%.*]] = zext i32 [[TMP136]] to i64
; CHECK-NEXT:    [[TMP140:%.*]] = zext i32 [[TMP138]] to i64
; CHECK-NEXT:    [[TMP141:%.*]] = mul i64 [[TMP139]], [[TMP140]]
; CHECK-NEXT:    [[TMP142:%.*]] = trunc i64 [[TMP141]] to i32
; CHECK-NEXT:    [[TMP143:%.*]] = lshr i64 [[TMP141]], 32
; CHECK-NEXT:    [[TMP144:%.*]] = trunc i64 [[TMP143]] to i32
; CHECK-NEXT:    [[TMP145:%.*]] = add i32 [[TMP136]], [[TMP144]]
; CHECK-NEXT:    [[TMP146:%.*]] = zext i32 [[TMP131]] to i64
; CHECK-NEXT:    [[TMP147:%.*]] = zext i32 [[TMP145]] to i64
; CHECK-NEXT:    [[TMP148:%.*]] = mul i64 [[TMP146]], [[TMP147]]
; CHECK-NEXT:    [[TMP149:%.*]] = trunc i64 [[TMP148]] to i32
; CHECK-NEXT:    [[TMP150:%.*]] = lshr i64 [[TMP148]], 32
; CHECK-NEXT:    [[TMP151:%.*]] = trunc i64 [[TMP150]] to i32
; CHECK-NEXT:    [[TMP152:%.*]] = mul i32 [[TMP151]], [[TMP132]]
; CHECK-NEXT:    [[TMP153:%.*]] = sub i32 [[TMP131]], [[TMP152]]
; CHECK-NEXT:    [[TMP154:%.*]] = icmp uge i32 [[TMP153]], [[TMP132]]
; CHECK-NEXT:    [[TMP155:%.*]] = add i32 [[TMP151]], 1
; CHECK-NEXT:    [[TMP156:%.*]] = select i1 [[TMP154]], i32 [[TMP155]], i32 [[TMP151]]
; CHECK-NEXT:    [[TMP157:%.*]] = sub i32 [[TMP153]], [[TMP132]]
; CHECK-NEXT:    [[TMP158:%.*]] = select i1 [[TMP154]], i32 [[TMP157]], i32 [[TMP153]]
; CHECK-NEXT:    [[TMP159:%.*]] = icmp uge i32 [[TMP158]], [[TMP132]]
; CHECK-NEXT:    [[TMP160:%.*]] = add i32 [[TMP156]], 1
; CHECK-NEXT:    [[TMP161:%.*]] = select i1 [[TMP159]], i32 [[TMP160]], i32 [[TMP156]]
; CHECK-NEXT:    [[TMP162:%.*]] = xor i32 [[TMP161]], [[TMP128]]
; CHECK-NEXT:    [[TMP163:%.*]] = sub i32 [[TMP162]], [[TMP128]]
; CHECK-NEXT:    [[TMP164:%.*]] = insertelement <4 x i32> [[TMP123]], i32 [[TMP163]], i64 3
; CHECK-NEXT:    store <4 x i32> [[TMP164]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v4i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s15, 0xf000
; GFX6-NEXT:    s_mov_b32 s14, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s2, s8, 31
; GFX6-NEXT:    s_add_i32 s3, s8, s2
; GFX6-NEXT:    s_xor_b32 s3, s3, s2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX6-NEXT:    s_ashr_i32 s8, s9, 31
; GFX6-NEXT:    s_add_i32 s0, s9, s8
; GFX6-NEXT:    s_xor_b32 s9, s0, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    s_sub_i32 s1, 0, s3
; GFX6-NEXT:    s_ashr_i32 s0, s4, 31
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    s_xor_b32 s2, s0, s2
; GFX6-NEXT:    v_mul_lo_u32 v2, s1, v0
; GFX6-NEXT:    s_add_i32 s1, s4, s0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_sub_i32 s0, 0, s9
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s3
; GFX6-NEXT:    v_mul_hi_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s1, v3
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s3, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v3
; GFX6-NEXT:    s_ashr_i32 s0, s5, 31
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX6-NEXT:    s_add_i32 s1, s5, s0
; GFX6-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX6-NEXT:    s_ashr_i32 s3, s10, 31
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
; GFX6-NEXT:    s_xor_b32 s2, s0, s8
; GFX6-NEXT:    s_add_i32 s0, s10, s3
; GFX6-NEXT:    s_xor_b32 s4, s0, s3
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX6-NEXT:    v_mul_hi_u32 v1, s1, v1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v3
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s9
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v1
; GFX6-NEXT:    v_mul_f32_e32 v3, 0x4f7ffffe, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s9, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
; GFX6-NEXT:    s_sub_i32 s0, 0, s4
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_mul_hi_u32 v2, v3, v5
; GFX6-NEXT:    v_xor_b32_e32 v1, s2, v1
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s2, v1
; GFX6-NEXT:    s_ashr_i32 s2, s11, 31
; GFX6-NEXT:    s_ashr_i32 s0, s6, 31
; GFX6-NEXT:    s_add_i32 s5, s11, s2
; GFX6-NEXT:    s_add_i32 s1, s6, s0
; GFX6-NEXT:    s_xor_b32 s5, s5, s2
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s5
; GFX6-NEXT:    v_mul_hi_u32 v2, s1, v2
; GFX6-NEXT:    s_xor_b32 s3, s0, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v4
; GFX6-NEXT:    v_mul_lo_u32 v3, v2, s4
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v2
; GFX6-NEXT:    v_mul_f32_e32 v4, 0x4f7ffffe, v4
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s1, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s4, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s4, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
; GFX6-NEXT:    s_sub_i32 s0, 0, s5
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v4
; GFX6-NEXT:    s_ashr_i32 s0, s7, 31
; GFX6-NEXT:    s_add_i32 s1, s7, s0
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    v_mul_hi_u32 v5, v4, v5
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
; GFX6-NEXT:    s_xor_b32 s2, s0, s2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
; GFX6-NEXT:    v_mul_hi_u32 v4, s1, v4
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
; GFX6-NEXT:    v_xor_b32_e32 v2, s3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v4, s5
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v4
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s3, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s1, v3
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s5, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v4, v5, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s5, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 1, v4
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s5, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX6-NEXT:    v_xor_b32_e32 v3, s2, v3
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s2, v3
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v4i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s2, s8, 31
; GFX9-NEXT:    s_add_i32 s3, s8, s2
; GFX9-NEXT:    s_xor_b32 s3, s3, s2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_ashr_i32 s12, s9, 31
; GFX9-NEXT:    s_add_i32 s9, s9, s12
; GFX9-NEXT:    s_xor_b32 s9, s9, s12
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT:    s_sub_i32 s14, 0, s3
; GFX9-NEXT:    s_ashr_i32 s8, s4, 31
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    s_add_i32 s4, s4, s8
; GFX9-NEXT:    s_xor_b32 s4, s4, s8
; GFX9-NEXT:    v_mul_lo_u32 v2, s14, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    s_sub_i32 s14, 0, s9
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX9-NEXT:    s_ashr_i32 s13, s5, 31
; GFX9-NEXT:    v_mul_lo_u32 v3, s14, v1
; GFX9-NEXT:    s_add_i32 s5, s5, s13
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX9-NEXT:    v_mul_hi_u32 v2, v1, v3
; GFX9-NEXT:    s_xor_b32 s5, s5, s13
; GFX9-NEXT:    s_xor_b32 s2, s8, s2
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, s3
; GFX9-NEXT:    v_add_u32_e32 v1, v1, v2
; GFX9-NEXT:    v_add_u32_e32 v2, 1, v0
; GFX9-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX9-NEXT:    v_sub_u32_e32 v3, s4, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v2, s3, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
; GFX9-NEXT:    s_ashr_i32 s3, s10, 31
; GFX9-NEXT:    s_add_i32 s4, s10, s3
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    s_xor_b32 s4, s4, s3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, s9
; GFX9-NEXT:    v_add_u32_e32 v5, 1, v1
; GFX9-NEXT:    s_ashr_i32 s8, s11, 31
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v3
; GFX9-NEXT:    v_sub_u32_e32 v2, s5, v2
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v2
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
; GFX9-NEXT:    v_mul_f32_e32 v3, 0x4f7ffffe, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX9-NEXT:    v_subrev_u32_e32 v5, s9, v2
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX9-NEXT:    s_sub_i32 s5, 0, s4
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v2
; GFX9-NEXT:    v_mul_lo_u32 v2, s5, v3
; GFX9-NEXT:    s_add_i32 s9, s11, s8
; GFX9-NEXT:    v_add_u32_e32 v5, 1, v1
; GFX9-NEXT:    s_xor_b32 s9, s9, s8
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
; GFX9-NEXT:    v_mul_hi_u32 v2, v3, v2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s9
; GFX9-NEXT:    s_ashr_i32 s5, s6, 31
; GFX9-NEXT:    s_add_i32 s6, s6, s5
; GFX9-NEXT:    v_add_u32_e32 v2, v3, v2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v5
; GFX9-NEXT:    s_xor_b32 s6, s6, s5
; GFX9-NEXT:    v_mul_hi_u32 v2, s6, v2
; GFX9-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX9-NEXT:    v_mul_f32_e32 v3, 0x4f7ffffe, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX9-NEXT:    v_subrev_u32_e32 v0, s2, v0
; GFX9-NEXT:    s_xor_b32 s2, s13, s12
; GFX9-NEXT:    v_mul_lo_u32 v5, v2, s4
; GFX9-NEXT:    v_xor_b32_e32 v1, s2, v1
; GFX9-NEXT:    v_subrev_u32_e32 v1, s2, v1
; GFX9-NEXT:    s_xor_b32 s2, s5, s3
; GFX9-NEXT:    s_sub_i32 s3, 0, s9
; GFX9-NEXT:    v_mul_lo_u32 v7, s3, v3
; GFX9-NEXT:    v_sub_u32_e32 v5, s6, v5
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v2
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s4, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v6, s4, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX9-NEXT:    v_mul_hi_u32 v6, v3, v7
; GFX9-NEXT:    s_ashr_i32 s3, s7, 31
; GFX9-NEXT:    s_add_i32 s5, s7, s3
; GFX9-NEXT:    s_xor_b32 s5, s5, s3
; GFX9-NEXT:    v_add_u32_e32 v3, v3, v6
; GFX9-NEXT:    v_mul_hi_u32 v3, s5, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s4, v5
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v2
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
; GFX9-NEXT:    v_mul_lo_u32 v5, v3, s9
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v3
; GFX9-NEXT:    v_xor_b32_e32 v2, s2, v2
; GFX9-NEXT:    v_subrev_u32_e32 v2, s2, v2
; GFX9-NEXT:    v_sub_u32_e32 v5, s5, v5
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v6, s9, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s9, v5
; GFX9-NEXT:    s_xor_b32 s2, s3, s8
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
; GFX9-NEXT:    v_xor_b32_e32 v3, s2, v3
; GFX9-NEXT:    v_subrev_u32_e32 v3, s2, v3
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <4 x i32> %x, %y
  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @srem_v4i32(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP1]], [[TMP3]]
; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP2]], [[TMP4]]
; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], [[TMP3]]
; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP4]]
; CHECK-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP10]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = sub i32 0, [[TMP8]]
; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], [[TMP12]]
; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP14]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP15]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
; CHECK-NEXT:    [[TMP19:%.*]] = lshr i64 [[TMP17]], 32
; CHECK-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
; CHECK-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP21]] to i64
; CHECK-NEXT:    [[TMP24:%.*]] = mul i64 [[TMP22]], [[TMP23]]
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = lshr i64 [[TMP24]], 32
; CHECK-NEXT:    [[TMP27:%.*]] = trunc i64 [[TMP26]] to i32
; CHECK-NEXT:    [[TMP28:%.*]] = mul i32 [[TMP27]], [[TMP8]]
; CHECK-NEXT:    [[TMP29:%.*]] = sub i32 [[TMP7]], [[TMP28]]
; CHECK-NEXT:    [[TMP30:%.*]] = icmp uge i32 [[TMP29]], [[TMP8]]
; CHECK-NEXT:    [[TMP31:%.*]] = sub i32 [[TMP29]], [[TMP8]]
; CHECK-NEXT:    [[TMP32:%.*]] = select i1 [[TMP30]], i32 [[TMP31]], i32 [[TMP29]]
; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[TMP32]], [[TMP8]]
; CHECK-NEXT:    [[TMP34:%.*]] = sub i32 [[TMP32]], [[TMP8]]
; CHECK-NEXT:    [[TMP35:%.*]] = select i1 [[TMP33]], i32 [[TMP34]], i32 [[TMP32]]
; CHECK-NEXT:    [[TMP36:%.*]] = xor i32 [[TMP35]], [[TMP3]]
; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 [[TMP36]], [[TMP3]]
; CHECK-NEXT:    [[TMP38:%.*]] = insertelement <4 x i32> undef, i32 [[TMP37]], i64 0
; CHECK-NEXT:    [[TMP39:%.*]] = extractelement <4 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP40:%.*]] = extractelement <4 x i32> [[Y]], i64 1
; CHECK-NEXT:    [[TMP41:%.*]] = ashr i32 [[TMP39]], 31
; CHECK-NEXT:    [[TMP42:%.*]] = ashr i32 [[TMP40]], 31
; CHECK-NEXT:    [[TMP43:%.*]] = add i32 [[TMP39]], [[TMP41]]
; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP40]], [[TMP42]]
; CHECK-NEXT:    [[TMP45:%.*]] = xor i32 [[TMP43]], [[TMP41]]
; CHECK-NEXT:    [[TMP46:%.*]] = xor i32 [[TMP44]], [[TMP42]]
; CHECK-NEXT:    [[TMP47:%.*]] = uitofp i32 [[TMP46]] to float
; CHECK-NEXT:    [[TMP48:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP47]])
; CHECK-NEXT:    [[TMP49:%.*]] = fmul fast float [[TMP48]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP50:%.*]] = fptoui float [[TMP49]] to i32
; CHECK-NEXT:    [[TMP51:%.*]] = sub i32 0, [[TMP46]]
; CHECK-NEXT:    [[TMP52:%.*]] = mul i32 [[TMP51]], [[TMP50]]
; CHECK-NEXT:    [[TMP53:%.*]] = zext i32 [[TMP50]] to i64
; CHECK-NEXT:    [[TMP54:%.*]] = zext i32 [[TMP52]] to i64
; CHECK-NEXT:    [[TMP55:%.*]] = mul i64 [[TMP53]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = trunc i64 [[TMP55]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = lshr i64 [[TMP55]], 32
; CHECK-NEXT:    [[TMP58:%.*]] = trunc i64 [[TMP57]] to i32
; CHECK-NEXT:    [[TMP59:%.*]] = add i32 [[TMP50]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = zext i32 [[TMP45]] to i64
; CHECK-NEXT:    [[TMP61:%.*]] = zext i32 [[TMP59]] to i64
; CHECK-NEXT:    [[TMP62:%.*]] = mul i64 [[TMP60]], [[TMP61]]
; CHECK-NEXT:    [[TMP63:%.*]] = trunc i64 [[TMP62]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = lshr i64 [[TMP62]], 32
; CHECK-NEXT:    [[TMP65:%.*]] = trunc i64 [[TMP64]] to i32
; CHECK-NEXT:    [[TMP66:%.*]] = mul i32 [[TMP65]], [[TMP46]]
; CHECK-NEXT:    [[TMP67:%.*]] = sub i32 [[TMP45]], [[TMP66]]
; CHECK-NEXT:    [[TMP68:%.*]] = icmp uge i32 [[TMP67]], [[TMP46]]
; CHECK-NEXT:    [[TMP69:%.*]] = sub i32 [[TMP67]], [[TMP46]]
; CHECK-NEXT:    [[TMP70:%.*]] = select i1 [[TMP68]], i32 [[TMP69]], i32 [[TMP67]]
; CHECK-NEXT:    [[TMP71:%.*]] = icmp uge i32 [[TMP70]], [[TMP46]]
; CHECK-NEXT:    [[TMP72:%.*]] = sub i32 [[TMP70]], [[TMP46]]
; CHECK-NEXT:    [[TMP73:%.*]] = select i1 [[TMP71]], i32 [[TMP72]], i32 [[TMP70]]
; CHECK-NEXT:    [[TMP74:%.*]] = xor i32 [[TMP73]], [[TMP41]]
; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP74]], [[TMP41]]
; CHECK-NEXT:    [[TMP76:%.*]] = insertelement <4 x i32> [[TMP38]], i32 [[TMP75]], i64 1
; CHECK-NEXT:    [[TMP77:%.*]] = extractelement <4 x i32> [[X]], i64 2
; CHECK-NEXT:    [[TMP78:%.*]] = extractelement <4 x i32> [[Y]], i64 2
; CHECK-NEXT:    [[TMP79:%.*]] = ashr i32 [[TMP77]], 31
; CHECK-NEXT:    [[TMP80:%.*]] = ashr i32 [[TMP78]], 31
; CHECK-NEXT:    [[TMP81:%.*]] = add i32 [[TMP77]], [[TMP79]]
; CHECK-NEXT:    [[TMP82:%.*]] = add i32 [[TMP78]], [[TMP80]]
; CHECK-NEXT:    [[TMP83:%.*]] = xor i32 [[TMP81]], [[TMP79]]
; CHECK-NEXT:    [[TMP84:%.*]] = xor i32 [[TMP82]], [[TMP80]]
; CHECK-NEXT:    [[TMP85:%.*]] = uitofp i32 [[TMP84]] to float
; CHECK-NEXT:    [[TMP86:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP85]])
; CHECK-NEXT:    [[TMP87:%.*]] = fmul fast float [[TMP86]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP88:%.*]] = fptoui float [[TMP87]] to i32
; CHECK-NEXT:    [[TMP89:%.*]] = sub i32 0, [[TMP84]]
; CHECK-NEXT:    [[TMP90:%.*]] = mul i32 [[TMP89]], [[TMP88]]
; CHECK-NEXT:    [[TMP91:%.*]] = zext i32 [[TMP88]] to i64
; CHECK-NEXT:    [[TMP92:%.*]] = zext i32 [[TMP90]] to i64
; CHECK-NEXT:    [[TMP93:%.*]] = mul i64 [[TMP91]], [[TMP92]]
; CHECK-NEXT:    [[TMP94:%.*]] = trunc i64 [[TMP93]] to i32
; CHECK-NEXT:    [[TMP95:%.*]] = lshr i64 [[TMP93]], 32
; CHECK-NEXT:    [[TMP96:%.*]] = trunc i64 [[TMP95]] to i32
; CHECK-NEXT:    [[TMP97:%.*]] = add i32 [[TMP88]], [[TMP96]]
; CHECK-NEXT:    [[TMP98:%.*]] = zext i32 [[TMP83]] to i64
; CHECK-NEXT:    [[TMP99:%.*]] = zext i32 [[TMP97]] to i64
; CHECK-NEXT:    [[TMP100:%.*]] = mul i64 [[TMP98]], [[TMP99]]
; CHECK-NEXT:    [[TMP101:%.*]] = trunc i64 [[TMP100]] to i32
; CHECK-NEXT:    [[TMP102:%.*]] = lshr i64 [[TMP100]], 32
; CHECK-NEXT:    [[TMP103:%.*]] = trunc i64 [[TMP102]] to i32
; CHECK-NEXT:    [[TMP104:%.*]] = mul i32 [[TMP103]], [[TMP84]]
; CHECK-NEXT:    [[TMP105:%.*]] = sub i32 [[TMP83]], [[TMP104]]
; CHECK-NEXT:    [[TMP106:%.*]] = icmp uge i32 [[TMP105]], [[TMP84]]
; CHECK-NEXT:    [[TMP107:%.*]] = sub i32 [[TMP105]], [[TMP84]]
; CHECK-NEXT:    [[TMP108:%.*]] = select i1 [[TMP106]], i32 [[TMP107]], i32 [[TMP105]]
; CHECK-NEXT:    [[TMP109:%.*]] = icmp uge i32 [[TMP108]], [[TMP84]]
; CHECK-NEXT:    [[TMP110:%.*]] = sub i32 [[TMP108]], [[TMP84]]
; CHECK-NEXT:    [[TMP111:%.*]] = select i1 [[TMP109]], i32 [[TMP110]], i32 [[TMP108]]
; CHECK-NEXT:    [[TMP112:%.*]] = xor i32 [[TMP111]], [[TMP79]]
; CHECK-NEXT:    [[TMP113:%.*]] = sub i32 [[TMP112]], [[TMP79]]
; CHECK-NEXT:    [[TMP114:%.*]] = insertelement <4 x i32> [[TMP76]], i32 [[TMP113]], i64 2
; CHECK-NEXT:    [[TMP115:%.*]] = extractelement <4 x i32> [[X]], i64 3
; CHECK-NEXT:    [[TMP116:%.*]] = extractelement <4 x i32> [[Y]], i64 3
; CHECK-NEXT:    [[TMP117:%.*]] = ashr i32 [[TMP115]], 31
; CHECK-NEXT:    [[TMP118:%.*]] = ashr i32 [[TMP116]], 31
; CHECK-NEXT:    [[TMP119:%.*]] = add i32 [[TMP115]], [[TMP117]]
; CHECK-NEXT:    [[TMP120:%.*]] = add i32 [[TMP116]], [[TMP118]]
; CHECK-NEXT:    [[TMP121:%.*]] = xor i32 [[TMP119]], [[TMP117]]
; CHECK-NEXT:    [[TMP122:%.*]] = xor i32 [[TMP120]], [[TMP118]]
; CHECK-NEXT:    [[TMP123:%.*]] = uitofp i32 [[TMP122]] to float
; CHECK-NEXT:    [[TMP124:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP123]])
; CHECK-NEXT:    [[TMP125:%.*]] = fmul fast float [[TMP124]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP126:%.*]] = fptoui float [[TMP125]] to i32
; CHECK-NEXT:    [[TMP127:%.*]] = sub i32 0, [[TMP122]]
; CHECK-NEXT:    [[TMP128:%.*]] = mul i32 [[TMP127]], [[TMP126]]
; CHECK-NEXT:    [[TMP129:%.*]] = zext i32 [[TMP126]] to i64
; CHECK-NEXT:    [[TMP130:%.*]] = zext i32 [[TMP128]] to i64
; CHECK-NEXT:    [[TMP131:%.*]] = mul i64 [[TMP129]], [[TMP130]]
; CHECK-NEXT:    [[TMP132:%.*]] = trunc i64 [[TMP131]] to i32
; CHECK-NEXT:    [[TMP133:%.*]] = lshr i64 [[TMP131]], 32
; CHECK-NEXT:    [[TMP134:%.*]] = trunc i64 [[TMP133]] to i32
; CHECK-NEXT:    [[TMP135:%.*]] = add i32 [[TMP126]], [[TMP134]]
; CHECK-NEXT:    [[TMP136:%.*]] = zext i32 [[TMP121]] to i64
; CHECK-NEXT:    [[TMP137:%.*]] = zext i32 [[TMP135]] to i64
; CHECK-NEXT:    [[TMP138:%.*]] = mul i64 [[TMP136]], [[TMP137]]
; CHECK-NEXT:    [[TMP139:%.*]] = trunc i64 [[TMP138]] to i32
; CHECK-NEXT:    [[TMP140:%.*]] = lshr i64 [[TMP138]], 32
; CHECK-NEXT:    [[TMP141:%.*]] = trunc i64 [[TMP140]] to i32
; CHECK-NEXT:    [[TMP142:%.*]] = mul i32 [[TMP141]], [[TMP122]]
; CHECK-NEXT:    [[TMP143:%.*]] = sub i32 [[TMP121]], [[TMP142]]
; CHECK-NEXT:    [[TMP144:%.*]] = icmp uge i32 [[TMP143]], [[TMP122]]
; CHECK-NEXT:    [[TMP145:%.*]] = sub i32 [[TMP143]], [[TMP122]]
; CHECK-NEXT:    [[TMP146:%.*]] = select i1 [[TMP144]], i32 [[TMP145]], i32 [[TMP143]]
; CHECK-NEXT:    [[TMP147:%.*]] = icmp uge i32 [[TMP146]], [[TMP122]]
; CHECK-NEXT:    [[TMP148:%.*]] = sub i32 [[TMP146]], [[TMP122]]
; CHECK-NEXT:    [[TMP149:%.*]] = select i1 [[TMP147]], i32 [[TMP148]], i32 [[TMP146]]
; CHECK-NEXT:    [[TMP150:%.*]] = xor i32 [[TMP149]], [[TMP117]]
; CHECK-NEXT:    [[TMP151:%.*]] = sub i32 [[TMP150]], [[TMP117]]
; CHECK-NEXT:    [[TMP152:%.*]] = insertelement <4 x i32> [[TMP114]], i32 [[TMP151]], i64 3
; CHECK-NEXT:    store <4 x i32> [[TMP152]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v4i32:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s2, s8, 31
; GFX6-NEXT:    s_add_i32 s8, s8, s2
; GFX6-NEXT:    s_xor_b32 s8, s8, s2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX6-NEXT:    s_ashr_i32 s13, s9, 31
; GFX6-NEXT:    s_add_i32 s9, s9, s13
; GFX6-NEXT:    s_xor_b32 s9, s9, s13
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    s_sub_i32 s14, 0, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    s_ashr_i32 s12, s4, 31
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    s_add_i32 s4, s4, s12
; GFX6-NEXT:    s_xor_b32 s4, s4, s12
; GFX6-NEXT:    v_mul_lo_u32 v2, s14, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_sub_i32 s14, 0, s9
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    s_ashr_i32 s13, s5, 31
; GFX6-NEXT:    s_add_i32 s5, s5, s13
; GFX6-NEXT:    s_xor_b32 s5, s5, s13
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v2, s14, v1
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s8
; GFX6-NEXT:    v_mul_hi_u32 v2, v1, v2
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    s_ashr_i32 s4, s10, 31
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
; GFX6-NEXT:    s_add_i32 s8, s10, s4
; GFX6-NEXT:    s_xor_b32 s4, s8, s4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, s4
; GFX6-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s12, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v2
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s9
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
; GFX6-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s9, v1
; GFX6-NEXT:    s_sub_i32 s5, 0, s4
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s9, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT:    v_mul_hi_u32 v3, v2, v4
; GFX6-NEXT:    s_ashr_i32 s8, s11, 31
; GFX6-NEXT:    s_add_i32 s9, s11, s8
; GFX6-NEXT:    s_ashr_i32 s5, s6, 31
; GFX6-NEXT:    s_xor_b32 s8, s9, s8
; GFX6-NEXT:    s_add_i32 s6, s6, s5
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s8
; GFX6-NEXT:    s_xor_b32 s6, s6, s5
; GFX6-NEXT:    v_mul_hi_u32 v2, s6, v2
; GFX6-NEXT:    v_xor_b32_e32 v1, s13, v1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v3
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s13, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, v2, s4
; GFX6-NEXT:    v_mul_f32_e32 v3, 0x4f7ffffe, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s4, v2
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
; GFX6-NEXT:    s_sub_i32 s6, 0, s8
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s6, v3
; GFX6-NEXT:    s_ashr_i32 s6, s7, 31
; GFX6-NEXT:    s_add_i32 s7, s7, s6
; GFX6-NEXT:    s_xor_b32 s7, s7, s6
; GFX6-NEXT:    v_mul_hi_u32 v4, v3, v4
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s4, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, s7, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX6-NEXT:    v_xor_b32_e32 v2, s5, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s8
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s5, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s7, v3
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s8, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s8, v3
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX6-NEXT:    v_xor_b32_e32 v3, s6, v3
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s6, v3
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v4i32:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s2, s8, 31
; GFX9-NEXT:    s_add_i32 s3, s8, s2
; GFX9-NEXT:    s_xor_b32 s2, s3, s2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
; GFX9-NEXT:    s_sub_i32 s8, 0, s2
; GFX9-NEXT:    s_ashr_i32 s3, s4, 31
; GFX9-NEXT:    s_add_i32 s4, s4, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_xor_b32 s4, s4, s3
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s12, v0
; GFX9-NEXT:    s_mul_i32 s8, s8, s12
; GFX9-NEXT:    s_mul_hi_u32 s8, s12, s8
; GFX9-NEXT:    s_add_i32 s12, s12, s8
; GFX9-NEXT:    s_mul_hi_u32 s8, s4, s12
; GFX9-NEXT:    s_mul_i32 s8, s8, s2
; GFX9-NEXT:    s_sub_i32 s4, s4, s8
; GFX9-NEXT:    s_sub_i32 s8, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s4, s8, s4
; GFX9-NEXT:    s_sub_i32 s8, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s2, s8, s4
; GFX9-NEXT:    s_ashr_i32 s4, s9, 31
; GFX9-NEXT:    s_add_i32 s8, s9, s4
; GFX9-NEXT:    s_xor_b32 s4, s8, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s4
; GFX9-NEXT:    s_ashr_i32 s8, s5, 31
; GFX9-NEXT:    s_xor_b32 s2, s2, s3
; GFX9-NEXT:    s_add_i32 s5, s5, s8
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_sub_i32 s2, s2, s3
; GFX9-NEXT:    s_xor_b32 s3, s5, s8
; GFX9-NEXT:    s_sub_i32 s5, 0, s4
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s9, v0
; GFX9-NEXT:    s_mul_i32 s5, s5, s9
; GFX9-NEXT:    s_mul_hi_u32 s5, s9, s5
; GFX9-NEXT:    s_add_i32 s9, s9, s5
; GFX9-NEXT:    s_mul_hi_u32 s5, s3, s9
; GFX9-NEXT:    s_mul_i32 s5, s5, s4
; GFX9-NEXT:    s_sub_i32 s3, s3, s5
; GFX9-NEXT:    s_sub_i32 s5, s3, s4
; GFX9-NEXT:    s_cmp_ge_u32 s3, s4
; GFX9-NEXT:    s_cselect_b32 s3, s5, s3
; GFX9-NEXT:    s_sub_i32 s5, s3, s4
; GFX9-NEXT:    s_cmp_ge_u32 s3, s4
; GFX9-NEXT:    s_cselect_b32 s3, s5, s3
; GFX9-NEXT:    s_ashr_i32 s4, s10, 31
; GFX9-NEXT:    s_add_i32 s5, s10, s4
; GFX9-NEXT:    s_xor_b32 s4, s5, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s8
; GFX9-NEXT:    s_sub_i32 s3, s3, s8
; GFX9-NEXT:    s_sub_i32 s8, 0, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_ashr_i32 s5, s6, 31
; GFX9-NEXT:    s_add_i32 s6, s6, s5
; GFX9-NEXT:    s_xor_b32 s6, s6, s5
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    v_readfirstlane_b32 s9, v0
; GFX9-NEXT:    s_mul_i32 s8, s8, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s9, s8
; GFX9-NEXT:    s_add_i32 s9, s9, s8
; GFX9-NEXT:    s_mul_hi_u32 s8, s6, s9
; GFX9-NEXT:    s_mul_i32 s8, s8, s4
; GFX9-NEXT:    s_sub_i32 s6, s6, s8
; GFX9-NEXT:    s_sub_i32 s8, s6, s4
; GFX9-NEXT:    s_cmp_ge_u32 s6, s4
; GFX9-NEXT:    s_cselect_b32 s6, s8, s6
; GFX9-NEXT:    s_sub_i32 s8, s6, s4
; GFX9-NEXT:    s_cmp_ge_u32 s6, s4
; GFX9-NEXT:    s_cselect_b32 s4, s8, s6
; GFX9-NEXT:    s_ashr_i32 s6, s11, 31
; GFX9-NEXT:    s_add_i32 s8, s11, s6
; GFX9-NEXT:    s_xor_b32 s6, s8, s6
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s6
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    s_ashr_i32 s2, s7, 31
; GFX9-NEXT:    s_xor_b32 s3, s4, s5
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v2
; GFX9-NEXT:    s_add_i32 s4, s7, s2
; GFX9-NEXT:    s_sub_i32 s3, s3, s5
; GFX9-NEXT:    s_sub_i32 s5, 0, s6
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    s_xor_b32 s4, s4, s2
; GFX9-NEXT:    v_readfirstlane_b32 s7, v2
; GFX9-NEXT:    s_mul_i32 s5, s5, s7
; GFX9-NEXT:    s_mul_hi_u32 s5, s7, s5
; GFX9-NEXT:    s_add_i32 s7, s7, s5
; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s7
; GFX9-NEXT:    s_mul_i32 s5, s5, s6
; GFX9-NEXT:    s_sub_i32 s4, s4, s5
; GFX9-NEXT:    s_sub_i32 s5, s4, s6
; GFX9-NEXT:    s_cmp_ge_u32 s4, s6
; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
; GFX9-NEXT:    s_sub_i32 s5, s4, s6
; GFX9-NEXT:    s_cmp_ge_u32 s4, s6
; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
; GFX9-NEXT:    s_xor_b32 s4, s4, s2
; GFX9-NEXT:    s_sub_i32 s2, s4, s2
; GFX9-NEXT:    v_mov_b32_e32 v2, s3
; GFX9-NEXT:    v_mov_b32_e32 v3, s2
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem <4 x i32> %x, %y
  store <4 x i32> %r, <4 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: @udiv_v4i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <4 x i16> undef, i16 [[TMP19]], i64 0
; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <4 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <4 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP23:%.*]] = zext i16 [[TMP21]] to i32
; CHECK-NEXT:    [[TMP24:%.*]] = zext i16 [[TMP22]] to i32
; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
; CHECK-NEXT:    [[TMP27:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fneg fast float [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 65535
; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i16
; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <4 x i16> [[TMP20]], i16 [[TMP39]], i64 1
; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <4 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <4 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP43:%.*]] = zext i16 [[TMP41]] to i32
; CHECK-NEXT:    [[TMP44:%.*]] = zext i16 [[TMP42]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
; CHECK-NEXT:    [[TMP47:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
; CHECK-NEXT:    [[TMP50:%.*]] = fneg fast float [[TMP49]]
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 65535
; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i16
; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <4 x i16> [[TMP40]], i16 [[TMP59]], i64 2
; CHECK-NEXT:    [[TMP61:%.*]] = extractelement <4 x i16> [[X]], i64 3
; CHECK-NEXT:    [[TMP62:%.*]] = extractelement <4 x i16> [[Y]], i64 3
; CHECK-NEXT:    [[TMP63:%.*]] = zext i16 [[TMP61]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = zext i16 [[TMP62]] to i32
; CHECK-NEXT:    [[TMP65:%.*]] = uitofp i32 [[TMP63]] to float
; CHECK-NEXT:    [[TMP66:%.*]] = uitofp i32 [[TMP64]] to float
; CHECK-NEXT:    [[TMP67:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP66]])
; CHECK-NEXT:    [[TMP68:%.*]] = fmul fast float [[TMP65]], [[TMP67]]
; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.trunc.f32(float [[TMP68]])
; CHECK-NEXT:    [[TMP70:%.*]] = fneg fast float [[TMP69]]
; CHECK-NEXT:    [[TMP71:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP70]], float [[TMP66]], float [[TMP65]])
; CHECK-NEXT:    [[TMP72:%.*]] = fptoui float [[TMP69]] to i32
; CHECK-NEXT:    [[TMP73:%.*]] = call fast float @llvm.fabs.f32(float [[TMP71]])
; CHECK-NEXT:    [[TMP74:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
; CHECK-NEXT:    [[TMP75:%.*]] = fcmp fast oge float [[TMP73]], [[TMP74]]
; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP75]], i32 1, i32 0
; CHECK-NEXT:    [[TMP77:%.*]] = add i32 [[TMP72]], [[TMP76]]
; CHECK-NEXT:    [[TMP78:%.*]] = and i32 [[TMP77]], 65535
; CHECK-NEXT:    [[TMP79:%.*]] = trunc i32 [[TMP78]] to i16
; CHECK-NEXT:    [[TMP80:%.*]] = insertelement <4 x i16> [[TMP60]], i16 [[TMP79]], i64 3
; CHECK-NEXT:    store <4 x i16> [[TMP80]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v4i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s9, s6, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s9
; GFX6-NEXT:    s_lshr_b32 s6, s6, 16
; GFX6-NEXT:    s_and_b32 s8, s4, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, s6
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX6-NEXT:    s_lshr_b32 s4, s4, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v2
; GFX6-NEXT:    v_mul_f32_e32 v3, v1, v3
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_mad_f32 v1, -v3, v0, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, v4, v5
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    s_and_b32 s4, s7, 0xffff
; GFX6-NEXT:    v_cvt_u32_f32_e32 v6, v3
; GFX6-NEXT:    v_mad_f32 v3, -v1, v2, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s4
; GFX6-NEXT:    s_and_b32 s4, s5, 0xffff
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v6, vcc
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v2
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mul_f32_e32 v1, v5, v6
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    s_lshr_b32 s4, s7, 16
; GFX6-NEXT:    v_mad_f32 v3, -v1, v4, v5
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s4
; GFX6-NEXT:    s_lshr_b32 s4, s5, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mul_f32_e32 v3, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v3
; GFX6-NEXT:    v_mad_f32 v3, -v3, v5, v6
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_or_b32_e32 v1, v1, v3
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v2
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v4i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v6, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s3, s6, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s2, s4, 0xffff
; GFX9-NEXT:    s_lshr_b32 s6, s6, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s6
; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX9-NEXT:    v_mul_f32_e32 v4, v2, v4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    s_and_b32 s2, s7, 0xffff
; GFX9-NEXT:    v_cvt_u32_f32_e32 v7, v4
; GFX9-NEXT:    v_mad_f32 v2, -v4, v0, v2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v4, s2
; GFX9-NEXT:    v_mul_f32_e32 v5, v3, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
; GFX9-NEXT:    s_and_b32 s2, s5, 0xffff
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v7, vcc
; GFX9-NEXT:    v_trunc_f32_e32 v2, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v4
; GFX9-NEXT:    v_mad_f32 v3, -v2, v1, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX9-NEXT:    s_lshr_b32 s2, s7, 16
; GFX9-NEXT:    v_mul_f32_e32 v1, v5, v7
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mad_f32 v3, -v1, v4, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    s_lshr_b32 s2, s5, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v7, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v8, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
; GFX9-NEXT:    v_mul_f32_e32 v3, v7, v8
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v3
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT:    v_mad_f32 v3, -v3, v5, v7
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT:    v_lshl_or_b32 v1, v3, 16, v1
; GFX9-NEXT:    v_lshl_or_b32 v0, v2, 16, v0
; GFX9-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv <4 x i16> %x, %y
  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: @urem_v4i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 65535
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <4 x i16> undef, i16 [[TMP21]], i64 0
; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <4 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP25:%.*]] = zext i16 [[TMP23]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = zext i16 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
; CHECK-NEXT:    [[TMP32:%.*]] = fneg fast float [[TMP31]]
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 65535
; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i16
; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <4 x i16> [[TMP22]], i16 [[TMP43]], i64 1
; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <4 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <4 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP47:%.*]] = zext i16 [[TMP45]] to i32
; CHECK-NEXT:    [[TMP48:%.*]] = zext i16 [[TMP46]] to i32
; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
; CHECK-NEXT:    [[TMP54:%.*]] = fneg fast float [[TMP53]]
; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 65535
; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i16
; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <4 x i16> [[TMP44]], i16 [[TMP65]], i64 2
; CHECK-NEXT:    [[TMP67:%.*]] = extractelement <4 x i16> [[X]], i64 3
; CHECK-NEXT:    [[TMP68:%.*]] = extractelement <4 x i16> [[Y]], i64 3
; CHECK-NEXT:    [[TMP69:%.*]] = zext i16 [[TMP67]] to i32
; CHECK-NEXT:    [[TMP70:%.*]] = zext i16 [[TMP68]] to i32
; CHECK-NEXT:    [[TMP71:%.*]] = uitofp i32 [[TMP69]] to float
; CHECK-NEXT:    [[TMP72:%.*]] = uitofp i32 [[TMP70]] to float
; CHECK-NEXT:    [[TMP73:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP72]])
; CHECK-NEXT:    [[TMP74:%.*]] = fmul fast float [[TMP71]], [[TMP73]]
; CHECK-NEXT:    [[TMP75:%.*]] = call fast float @llvm.trunc.f32(float [[TMP74]])
; CHECK-NEXT:    [[TMP76:%.*]] = fneg fast float [[TMP75]]
; CHECK-NEXT:    [[TMP77:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP76]], float [[TMP72]], float [[TMP71]])
; CHECK-NEXT:    [[TMP78:%.*]] = fptoui float [[TMP75]] to i32
; CHECK-NEXT:    [[TMP79:%.*]] = call fast float @llvm.fabs.f32(float [[TMP77]])
; CHECK-NEXT:    [[TMP80:%.*]] = call fast float @llvm.fabs.f32(float [[TMP72]])
; CHECK-NEXT:    [[TMP81:%.*]] = fcmp fast oge float [[TMP79]], [[TMP80]]
; CHECK-NEXT:    [[TMP82:%.*]] = select i1 [[TMP81]], i32 1, i32 0
; CHECK-NEXT:    [[TMP83:%.*]] = add i32 [[TMP78]], [[TMP82]]
; CHECK-NEXT:    [[TMP84:%.*]] = mul i32 [[TMP83]], [[TMP70]]
; CHECK-NEXT:    [[TMP85:%.*]] = sub i32 [[TMP69]], [[TMP84]]
; CHECK-NEXT:    [[TMP86:%.*]] = and i32 [[TMP85]], 65535
; CHECK-NEXT:    [[TMP87:%.*]] = trunc i32 [[TMP86]] to i16
; CHECK-NEXT:    [[TMP88:%.*]] = insertelement <4 x i16> [[TMP66]], i16 [[TMP87]], i64 3
; CHECK-NEXT:    store <4 x i16> [[TMP88]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v4i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s8, s6, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX6-NEXT:    v_mov_b32_e32 v4, s6
; GFX6-NEXT:    v_alignbit_b32 v4, s7, v4, 16
; GFX6-NEXT:    s_and_b32 s8, s4, 0xffff
; GFX6-NEXT:    v_and_b32_e32 v5, 0xffff, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, v5
; GFX6-NEXT:    v_mov_b32_e32 v1, s4
; GFX6-NEXT:    v_alignbit_b32 v1, s5, v1, 16
; GFX6-NEXT:    v_and_b32_e32 v6, 0xffff, v1
; GFX6-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, v6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
; GFX6-NEXT:    v_mul_f32_e32 v2, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX6-NEXT:    v_mad_f32 v2, -v2, v5, v6
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v5
; GFX6-NEXT:    s_and_b32 s6, s7, 0xffff
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s6
; GFX6-NEXT:    s_and_b32 s6, s5, 0xffff
; GFX6-NEXT:    v_mul_lo_u32 v2, v2, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v3
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    s_lshr_b32 s4, s7, 16
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, v2, v1
; GFX6-NEXT:    v_mul_f32_e32 v1, v4, v5
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s4
; GFX6-NEXT:    s_lshr_b32 s6, s5, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s6
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX6-NEXT:    v_mad_f32 v4, -v1, v3, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, v3
; GFX6-NEXT:    v_mul_f32_e32 v3, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v3
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mad_f32 v3, -v3, v5, v6
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s7
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s4
; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s6, v3
; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX6-NEXT:    v_or_b32_e32 v1, v1, v3
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v2
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v4i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v6, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s3, s6, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s2, s4, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s2
; GFX9-NEXT:    s_lshr_b32 s6, s6, 16
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s6
; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX9-NEXT:    v_mul_f32_e32 v4, v2, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_cvt_u32_f32_e32 v7, v4
; GFX9-NEXT:    v_mad_f32 v2, -v4, v0, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX9-NEXT:    s_and_b32 s3, s7, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v4, s3
; GFX9-NEXT:    v_mul_f32_e32 v5, v3, v5
; GFX9-NEXT:    v_trunc_f32_e32 v2, v5
; GFX9-NEXT:    s_and_b32 s8, s5, 0xffff
; GFX9-NEXT:    v_mad_f32 v3, -v2, v1, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s8
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v4
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
; GFX9-NEXT:    v_mul_f32_e32 v2, v5, v7
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, s6
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    s_lshr_b32 s6, s7, 16
; GFX9-NEXT:    v_mad_f32 v3, -v2, v4, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s6
; GFX9-NEXT:    s_lshr_b32 s5, s5, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v7, s5
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v8, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
; GFX9-NEXT:    v_sub_u32_e32 v0, s2, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
; GFX9-NEXT:    v_mul_f32_e32 v3, v7, v8
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v3
; GFX9-NEXT:    v_mad_f32 v3, -v3, v5, v7
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX9-NEXT:    v_mul_lo_u32 v2, v2, s3
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
; GFX9-NEXT:    v_mul_lo_u32 v3, v3, s6
; GFX9-NEXT:    v_sub_u32_e32 v4, s4, v1
; GFX9-NEXT:    v_sub_u32_e32 v1, s8, v2
; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT:    v_sub_u32_e32 v2, s5, v3
; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT:    v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem <4 x i16> %x, %y
  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: @sdiv_v4i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 16
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 16
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <4 x i16> undef, i16 [[TMP23]], i64 0
; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <4 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <4 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP27:%.*]] = sext i16 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP28:%.*]] = sext i16 [[TMP26]] to i32
; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fneg fast float [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = shl i32 [[TMP44]], 16
; CHECK-NEXT:    [[TMP46:%.*]] = ashr i32 [[TMP45]], 16
; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i16
; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <4 x i16> [[TMP24]], i16 [[TMP47]], i64 1
; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <4 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <4 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP51:%.*]] = sext i16 [[TMP49]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = sext i16 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
; CHECK-NEXT:    [[TMP61:%.*]] = fneg fast float [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
; CHECK-NEXT:    [[TMP69:%.*]] = shl i32 [[TMP68]], 16
; CHECK-NEXT:    [[TMP70:%.*]] = ashr i32 [[TMP69]], 16
; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i16
; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <4 x i16> [[TMP48]], i16 [[TMP71]], i64 2
; CHECK-NEXT:    [[TMP73:%.*]] = extractelement <4 x i16> [[X]], i64 3
; CHECK-NEXT:    [[TMP74:%.*]] = extractelement <4 x i16> [[Y]], i64 3
; CHECK-NEXT:    [[TMP75:%.*]] = sext i16 [[TMP73]] to i32
; CHECK-NEXT:    [[TMP76:%.*]] = sext i16 [[TMP74]] to i32
; CHECK-NEXT:    [[TMP77:%.*]] = xor i32 [[TMP75]], [[TMP76]]
; CHECK-NEXT:    [[TMP78:%.*]] = ashr i32 [[TMP77]], 30
; CHECK-NEXT:    [[TMP79:%.*]] = or i32 [[TMP78]], 1
; CHECK-NEXT:    [[TMP80:%.*]] = sitofp i32 [[TMP75]] to float
; CHECK-NEXT:    [[TMP81:%.*]] = sitofp i32 [[TMP76]] to float
; CHECK-NEXT:    [[TMP82:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP81]])
; CHECK-NEXT:    [[TMP83:%.*]] = fmul fast float [[TMP80]], [[TMP82]]
; CHECK-NEXT:    [[TMP84:%.*]] = call fast float @llvm.trunc.f32(float [[TMP83]])
; CHECK-NEXT:    [[TMP85:%.*]] = fneg fast float [[TMP84]]
; CHECK-NEXT:    [[TMP86:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP85]], float [[TMP81]], float [[TMP80]])
; CHECK-NEXT:    [[TMP87:%.*]] = fptosi float [[TMP84]] to i32
; CHECK-NEXT:    [[TMP88:%.*]] = call fast float @llvm.fabs.f32(float [[TMP86]])
; CHECK-NEXT:    [[TMP89:%.*]] = call fast float @llvm.fabs.f32(float [[TMP81]])
; CHECK-NEXT:    [[TMP90:%.*]] = fcmp fast oge float [[TMP88]], [[TMP89]]
; CHECK-NEXT:    [[TMP91:%.*]] = select i1 [[TMP90]], i32 [[TMP79]], i32 0
; CHECK-NEXT:    [[TMP92:%.*]] = add i32 [[TMP87]], [[TMP91]]
; CHECK-NEXT:    [[TMP93:%.*]] = shl i32 [[TMP92]], 16
; CHECK-NEXT:    [[TMP94:%.*]] = ashr i32 [[TMP93]], 16
; CHECK-NEXT:    [[TMP95:%.*]] = trunc i32 [[TMP94]] to i16
; CHECK-NEXT:    [[TMP96:%.*]] = insertelement <4 x i16> [[TMP72]], i16 [[TMP95]], i64 3
; CHECK-NEXT:    store <4 x i16> [[TMP96]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v4i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_sext_i32_i16 s8, s6
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX6-NEXT:    s_sext_i32_i16 s9, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX6-NEXT:    s_xor_b32 s8, s9, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s6, s6, 16
; GFX6-NEXT:    s_ashr_i32 s8, s8, 30
; GFX6-NEXT:    s_or_b32 s8, s8, 1
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s8
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    s_ashr_i32 s4, s4, 16
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v1
; GFX6-NEXT:    s_xor_b32 s4, s4, s6
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_mad_f32 v2, -v3, v1, v2
; GFX6-NEXT:    v_mov_b32_e32 v4, s4
; GFX6-NEXT:    s_sext_i32_i16 s4, s7
; GFX6-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v1|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
; GFX6-NEXT:    s_sext_i32_i16 s6, s5
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v2
; GFX6-NEXT:    s_xor_b32 s4, s6, s4
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v4, v1, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v1, -v4, v2, v1
; GFX6-NEXT:    v_mov_b32_e32 v5, s4
; GFX6-NEXT:    s_ashr_i32 s4, s7, 16
; GFX6-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v2|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v5, vcc
; GFX6-NEXT:    s_ashr_i32 s5, s5, 16
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v2
; GFX6-NEXT:    s_xor_b32 s4, s5, s4
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_mad_f32 v4, -v5, v2, v4
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    v_mov_b32_e32 v6, s4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v2
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v4i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_sext_i32_i16 s0, s6
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_sext_i32_i16 s1, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s8, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v1, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v1, -v3, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s8, 0
; GFX9-NEXT:    s_ashr_i32 s1, s6, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s1
; GFX9-NEXT:    s_ashr_i32 s4, s4, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s4
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    v_add_u32_e32 v3, s0, v3
; GFX9-NEXT:    v_mul_f32_e32 v4, v1, v4
; GFX9-NEXT:    s_xor_b32 s0, s4, s1
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_mad_f32 v1, -v4, v0, v1
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX9-NEXT:    s_sext_i32_i16 s1, s7
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s1
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v4, s0, v4
; GFX9-NEXT:    s_sext_i32_i16 s0, s5
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v0
; GFX9-NEXT:    s_xor_b32 s0, s0, s1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v5, v1, v5
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v1, -v5, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    s_ashr_i32 s1, s7, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s1
; GFX9-NEXT:    v_add_u32_e32 v1, s0, v5
; GFX9-NEXT:    s_ashr_i32 s0, s5, 16
; GFX9-NEXT:    v_cvt_f32_i32_e32 v5, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v0
; GFX9-NEXT:    s_xor_b32 s0, s0, s1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v6, v5, v6
; GFX9-NEXT:    v_trunc_f32_e32 v6, v6
; GFX9-NEXT:    v_mad_f32 v5, -v6, v0, v5
; GFX9-NEXT:    v_cvt_i32_f32_e32 v6, v6
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v5|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v6
; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT:    v_lshl_or_b32 v1, v0, 16, v1
; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v3
; GFX9-NEXT:    v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <4 x i16> %x, %y
  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: @srem_v4i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = shl i32 [[TMP22]], 16
; CHECK-NEXT:    [[TMP24:%.*]] = ashr i32 [[TMP23]], 16
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <4 x i16> undef, i16 [[TMP25]], i64 0
; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <4 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <4 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP29:%.*]] = sext i16 [[TMP27]] to i32
; CHECK-NEXT:    [[TMP30:%.*]] = sext i16 [[TMP28]] to i32
; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
; CHECK-NEXT:    [[TMP39:%.*]] = fneg fast float [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = shl i32 [[TMP48]], 16
; CHECK-NEXT:    [[TMP50:%.*]] = ashr i32 [[TMP49]], 16
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i16
; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <4 x i16> [[TMP26]], i16 [[TMP51]], i64 1
; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <4 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <4 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP55:%.*]] = sext i16 [[TMP53]] to i32
; CHECK-NEXT:    [[TMP56:%.*]] = sext i16 [[TMP54]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
; CHECK-NEXT:    [[TMP65:%.*]] = fneg fast float [[TMP64]]
; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
; CHECK-NEXT:    [[TMP75:%.*]] = shl i32 [[TMP74]], 16
; CHECK-NEXT:    [[TMP76:%.*]] = ashr i32 [[TMP75]], 16
; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i16
; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <4 x i16> [[TMP52]], i16 [[TMP77]], i64 2
; CHECK-NEXT:    [[TMP79:%.*]] = extractelement <4 x i16> [[X]], i64 3
; CHECK-NEXT:    [[TMP80:%.*]] = extractelement <4 x i16> [[Y]], i64 3
; CHECK-NEXT:    [[TMP81:%.*]] = sext i16 [[TMP79]] to i32
; CHECK-NEXT:    [[TMP82:%.*]] = sext i16 [[TMP80]] to i32
; CHECK-NEXT:    [[TMP83:%.*]] = xor i32 [[TMP81]], [[TMP82]]
; CHECK-NEXT:    [[TMP84:%.*]] = ashr i32 [[TMP83]], 30
; CHECK-NEXT:    [[TMP85:%.*]] = or i32 [[TMP84]], 1
; CHECK-NEXT:    [[TMP86:%.*]] = sitofp i32 [[TMP81]] to float
; CHECK-NEXT:    [[TMP87:%.*]] = sitofp i32 [[TMP82]] to float
; CHECK-NEXT:    [[TMP88:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP87]])
; CHECK-NEXT:    [[TMP89:%.*]] = fmul fast float [[TMP86]], [[TMP88]]
; CHECK-NEXT:    [[TMP90:%.*]] = call fast float @llvm.trunc.f32(float [[TMP89]])
; CHECK-NEXT:    [[TMP91:%.*]] = fneg fast float [[TMP90]]
; CHECK-NEXT:    [[TMP92:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP91]], float [[TMP87]], float [[TMP86]])
; CHECK-NEXT:    [[TMP93:%.*]] = fptosi float [[TMP90]] to i32
; CHECK-NEXT:    [[TMP94:%.*]] = call fast float @llvm.fabs.f32(float [[TMP92]])
; CHECK-NEXT:    [[TMP95:%.*]] = call fast float @llvm.fabs.f32(float [[TMP87]])
; CHECK-NEXT:    [[TMP96:%.*]] = fcmp fast oge float [[TMP94]], [[TMP95]]
; CHECK-NEXT:    [[TMP97:%.*]] = select i1 [[TMP96]], i32 [[TMP85]], i32 0
; CHECK-NEXT:    [[TMP98:%.*]] = add i32 [[TMP93]], [[TMP97]]
; CHECK-NEXT:    [[TMP99:%.*]] = mul i32 [[TMP98]], [[TMP82]]
; CHECK-NEXT:    [[TMP100:%.*]] = sub i32 [[TMP81]], [[TMP99]]
; CHECK-NEXT:    [[TMP101:%.*]] = shl i32 [[TMP100]], 16
; CHECK-NEXT:    [[TMP102:%.*]] = ashr i32 [[TMP101]], 16
; CHECK-NEXT:    [[TMP103:%.*]] = trunc i32 [[TMP102]] to i16
; CHECK-NEXT:    [[TMP104:%.*]] = insertelement <4 x i16> [[TMP78]], i16 [[TMP103]], i64 3
; CHECK-NEXT:    store <4 x i16> [[TMP104]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v4i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_sext_i32_i16 s8, s6
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX6-NEXT:    s_sext_i32_i16 s9, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX6-NEXT:    s_xor_b32 s8, s9, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s8, s8, 30
; GFX6-NEXT:    s_or_b32 s8, s8, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s8
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    v_mov_b32_e32 v1, s4
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_alignbit_b32 v2, s7, v2, 16
; GFX6-NEXT:    v_bfe_i32 v3, v2, 0, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, v3
; GFX6-NEXT:    v_alignbit_b32 v1, s5, v1, 16
; GFX6-NEXT:    v_bfe_i32 v5, v1, 0, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v6, v5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v4
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT:    v_xor_b32_e32 v3, v5, v3
; GFX6-NEXT:    v_ashrrev_i32_e32 v3, 30, v3
; GFX6-NEXT:    v_mul_f32_e32 v5, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_mad_f32 v6, -v5, v4, v6
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_or_b32_e32 v3, 1, v3
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, |v4|
; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
; GFX6-NEXT:    s_sext_i32_i16 s4, s7
; GFX6-NEXT:    v_mul_lo_u32 v2, v3, v2
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s4
; GFX6-NEXT:    s_sext_i32_i16 s6, s5
; GFX6-NEXT:    s_xor_b32 s4, s6, s4
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v3
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mov_b32_e32 v5, s4
; GFX6-NEXT:    v_mul_f32_e32 v4, v2, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v2, -v4, v3, v2
; GFX6-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX6-NEXT:    s_ashr_i32 s4, s7, 16
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v3|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s4
; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_lo_u32 v2, v2, s7
; GFX6-NEXT:    s_lshr_b32 s6, s7, 16
; GFX6-NEXT:    s_ashr_i32 s7, s5, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, s7
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v3
; GFX6-NEXT:    s_xor_b32 s4, s7, s4
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_mad_f32 v4, -v5, v3, v4
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    v_mov_b32_e32 v6, s4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v3|
; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s6
; GFX6-NEXT:    s_lshr_b32 s4, s5, 16
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s5, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s4, v3
; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
; GFX6-NEXT:    v_and_b32_e32 v2, 0xffff, v2
; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v4i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_sext_i32_i16 s8, s6
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX9-NEXT:    s_sext_i32_i16 s9, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX9-NEXT:    s_xor_b32 s0, s9, s8
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s10, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v1, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v1, -v3, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s10, 0
; GFX9-NEXT:    s_ashr_i32 s6, s6, 16
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s6
; GFX9-NEXT:    s_ashr_i32 s4, s4, 16
; GFX9-NEXT:    v_add_u32_e32 v1, s0, v3
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    s_xor_b32 s0, s4, s6
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, s8
; GFX9-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_mad_f32 v3, -v4, v0, v3
; GFX9-NEXT:    s_or_b32 s8, s0, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v3|, |v0|
; GFX9-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s8, 0
; GFX9-NEXT:    s_sext_i32_i16 s8, s7
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s8
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v4
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX9-NEXT:    s_sext_i32_i16 s6, s5
; GFX9-NEXT:    v_cvt_f32_i32_e32 v4, s6
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v3
; GFX9-NEXT:    s_xor_b32 s0, s6, s8
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s10, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v4, -v5, v3, v4
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v3|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s10, 0
; GFX9-NEXT:    s_ashr_i32 s7, s7, 16
; GFX9-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX9-NEXT:    v_cvt_f32_i32_e32 v4, s7
; GFX9-NEXT:    s_ashr_i32 s5, s5, 16
; GFX9-NEXT:    v_sub_u32_e32 v0, s4, v0
; GFX9-NEXT:    v_add_u32_e32 v3, s0, v5
; GFX9-NEXT:    v_cvt_f32_i32_e32 v5, s5
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX9-NEXT:    s_xor_b32 s0, s5, s7
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_mul_lo_u32 v3, v3, s8
; GFX9-NEXT:    v_mul_f32_e32 v6, v5, v6
; GFX9-NEXT:    v_trunc_f32_e32 v6, v6
; GFX9-NEXT:    v_mad_f32 v5, -v6, v4, v5
; GFX9-NEXT:    v_cvt_i32_f32_e32 v6, v6
; GFX9-NEXT:    s_or_b32 s8, s0, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v5|, |v4|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s8, 0
; GFX9-NEXT:    v_add_u32_e32 v4, s0, v6
; GFX9-NEXT:    v_mul_lo_u32 v4, v4, s7
; GFX9-NEXT:    v_sub_u32_e32 v5, s9, v1
; GFX9-NEXT:    v_sub_u32_e32 v1, s6, v3
; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT:    v_sub_u32_e32 v3, s5, v4
; GFX9-NEXT:    v_lshl_or_b32 v1, v3, 16, v1
; GFX9-NEXT:    v_and_b32_e32 v3, 0xffff, v5
; GFX9-NEXT:    v_lshl_or_b32 v0, v0, 16, v3
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = srem <4 x i16> %x, %y
  store <4 x i16> %r, <4 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
; CHECK-LABEL: @udiv_i3(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i3 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i3 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 7
; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i3
; CHECK-NEXT:    store i3 [[TMP17]], i3 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i3:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_u32 s2, s4, 0x30008
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v0, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX6-NEXT:    s_and_b32 s4, s4, 7
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX6-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i3:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_u32 s0, s4, 0x30008
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v0, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX9-NEXT:    s_and_b32 s0, s4, 7
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v3, s0
; GFX9-NEXT:    v_mul_f32_e32 v1, v3, v1
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v1
; GFX9-NEXT:    v_mad_f32 v1, -v1, v0, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v4, vcc
; GFX9-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = udiv i3 %x, %y
  store i3 %r, i3 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
; CHECK-LABEL: @urem_i3(
; CHECK-NEXT:    [[TMP1:%.*]] = zext i3 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = zext i3 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP5:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast float [[TMP3]], [[TMP5]]
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.trunc.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fneg fast float [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[TMP4]], float [[TMP3]])
; CHECK-NEXT:    [[TMP10:%.*]] = fptoui float [[TMP7]] to i32
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.fabs.f32(float [[TMP4]])
; CHECK-NEXT:    [[TMP13:%.*]] = fcmp fast oge float [[TMP11]], [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 1, i32 0
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], [[TMP2]]
; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 7
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i3
; CHECK-NEXT:    store i3 [[TMP19]], i3 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i3:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_u32 s2, s4, 0x30008
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v0, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX6-NEXT:    s_and_b32 s3, s4, 7
; GFX6-NEXT:    v_cvt_f32_ubyte0_e32 v2, s3
; GFX6-NEXT:    s_lshr_b32 s2, s4, 8
; GFX6-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX6-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i3:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_u32 s3, s2, 0x30008
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v0
; GFX9-NEXT:    s_and_b32 s4, s2, 7
; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
; GFX9-NEXT:    s_lshr_b32 s3, s2, 8
; GFX9-NEXT:    v_mul_f32_e32 v1, v2, v1
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v1
; GFX9-NEXT:    v_mad_f32 v1, -v1, v0, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_sub_u32_e32 v0, s2, v0
; GFX9-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_byte v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem i3 %x, %y
  store i3 %r, i3 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
; CHECK-LABEL: @sdiv_i3(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i3 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i3 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 29
; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 29
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i3
; CHECK-NEXT:    store i3 [[TMP21]], i3 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i3:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_i32 s5, s4, 0x30008
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s5
; GFX6-NEXT:    s_bfe_i32 s4, s4, 0x30000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s4
; GFX6-NEXT:    s_xor_b32 s4, s4, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s4
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i3:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s0, s4, 0x30008
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_bfe_i32 s1, s4, 0x30000
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v3
; GFX9-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX9-NEXT:    global_store_byte v1, v0, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i3 %x, %y
  store i3 %r, i3 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
; CHECK-LABEL: @srem_i3(
; CHECK-NEXT:    [[TMP1:%.*]] = sext i3 [[X:%.*]] to i32
; CHECK-NEXT:    [[TMP2:%.*]] = sext i3 [[Y:%.*]] to i32
; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP3]], 30
; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 1
; CHECK-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP1]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = sitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast float [[TMP6]], [[TMP8]]
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.trunc.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fneg fast float [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float [[TMP7]], float [[TMP6]])
; CHECK-NEXT:    [[TMP13:%.*]] = fptosi float [[TMP10]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP12]])
; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.fabs.f32(float [[TMP7]])
; CHECK-NEXT:    [[TMP16:%.*]] = fcmp fast oge float [[TMP14]], [[TMP15]]
; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP5]], i32 0
; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP13]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], [[TMP2]]
; CHECK-NEXT:    [[TMP20:%.*]] = sub i32 [[TMP1]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 29
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 29
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i3
; CHECK-NEXT:    store i3 [[TMP23]], i3 addrspace(1)* [[OUT:%.*]], align 1
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i3:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_i32 s2, s4, 0x30008
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s2
; GFX6-NEXT:    s_bfe_i32 s5, s4, 0x30000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s5
; GFX6-NEXT:    s_xor_b32 s2, s5, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s2, s2, 30
; GFX6-NEXT:    s_or_b32 s2, s2, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s2
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    s_lshr_b32 s3, s4, 8
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX6-NEXT:    buffer_store_byte v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i3:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s2, s4, 0x30008
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s2
; GFX9-NEXT:    s_bfe_i32 s3, s4, 0x30000
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s3
; GFX9-NEXT:    s_xor_b32 s2, s3, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    s_ashr_i32 s2, s2, 30
; GFX9-NEXT:    s_lshr_b32 s5, s4, 8
; GFX9-NEXT:    s_or_b32 s6, s2, 1
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], exec
; GFX9-NEXT:    s_cselect_b32 s2, s6, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s2, v2
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s5
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_sub_u32_e32 v0, s4, v0
; GFX9-NEXT:    v_and_b32_e32 v0, 7, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_byte v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem i3 %x, %y
  store i3 %r, i3 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
; CHECK-LABEL: @udiv_v3i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <3 x i16> undef, i16 [[TMP19]], i64 0
; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <3 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <3 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP23:%.*]] = zext i16 [[TMP21]] to i32
; CHECK-NEXT:    [[TMP24:%.*]] = zext i16 [[TMP22]] to i32
; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
; CHECK-NEXT:    [[TMP27:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fneg fast float [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 65535
; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i16
; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <3 x i16> [[TMP20]], i16 [[TMP39]], i64 1
; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <3 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <3 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP43:%.*]] = zext i16 [[TMP41]] to i32
; CHECK-NEXT:    [[TMP44:%.*]] = zext i16 [[TMP42]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
; CHECK-NEXT:    [[TMP47:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
; CHECK-NEXT:    [[TMP50:%.*]] = fneg fast float [[TMP49]]
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 65535
; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i16
; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i16> [[TMP40]], i16 [[TMP59]], i64 2
; CHECK-NEXT:    store <3 x i16> [[TMP60]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v3i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s9, s6, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s9
; GFX6-NEXT:    s_lshr_b32 s6, s6, 16
; GFX6-NEXT:    s_and_b32 s8, s4, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, s6
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX6-NEXT:    s_lshr_b32 s4, s4, 16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v2
; GFX6-NEXT:    v_mul_f32_e32 v3, v1, v3
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_mad_f32 v1, -v3, v0, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, v4, v5
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    s_and_b32 s4, s7, 0xffff
; GFX6-NEXT:    v_cvt_u32_f32_e32 v6, v3
; GFX6-NEXT:    v_mad_f32 v3, -v1, v2, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s4
; GFX6-NEXT:    s_and_b32 s4, s5, 0xffff
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v6, vcc
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v2
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_mul_f32_e32 v2, v5, v6
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mad_f32 v2, -v2, v4, v5
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
; GFX6-NEXT:    buffer_store_short v2, off, s[0:3], 0 offset:4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v3i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v6, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s3, s6, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s2, s4, 0xffff
; GFX9-NEXT:    s_lshr_b32 s6, s6, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s6
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_mul_f32_e32 v4, v2, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    s_and_b32 s2, s7, 0xffff
; GFX9-NEXT:    v_cvt_u32_f32_e32 v7, v4
; GFX9-NEXT:    v_mad_f32 v2, -v4, v0, v2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v4, s2
; GFX9-NEXT:    v_mul_f32_e32 v5, v3, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
; GFX9-NEXT:    v_trunc_f32_e32 v2, v5
; GFX9-NEXT:    s_and_b32 s2, s5, 0xffff
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mad_f32 v3, -v2, v1, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v4
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
; GFX9-NEXT:    v_mul_f32_e32 v2, v5, v7
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v2
; GFX9-NEXT:    v_mad_f32 v2, -v2, v4, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v3, vcc
; GFX9-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT:    global_store_short v6, v2, s[0:1] offset:4
; GFX9-NEXT:    global_store_dword v6, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv <3 x i16> %x, %y
  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
; CHECK-LABEL: @urem_v3i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 65535
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <3 x i16> undef, i16 [[TMP21]], i64 0
; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <3 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <3 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP25:%.*]] = zext i16 [[TMP23]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = zext i16 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
; CHECK-NEXT:    [[TMP32:%.*]] = fneg fast float [[TMP31]]
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 65535
; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i16
; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <3 x i16> [[TMP22]], i16 [[TMP43]], i64 1
; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <3 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <3 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP47:%.*]] = zext i16 [[TMP45]] to i32
; CHECK-NEXT:    [[TMP48:%.*]] = zext i16 [[TMP46]] to i32
; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
; CHECK-NEXT:    [[TMP54:%.*]] = fneg fast float [[TMP53]]
; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 65535
; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i16
; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i16> [[TMP44]], i16 [[TMP65]], i64 2
; CHECK-NEXT:    store <3 x i16> [[TMP66]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v3i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s8, s6, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s8
; GFX6-NEXT:    s_and_b32 s2, s4, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s2
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v1
; GFX6-NEXT:    v_alignbit_b32 v2, s7, v2, 16
; GFX6-NEXT:    v_and_b32_e32 v5, 0xffff, v2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, v5
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v6, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v1, v3
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v6, vcc
; GFX6-NEXT:    v_alignbit_b32 v0, s5, v0, 16
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s6
; GFX6-NEXT:    v_and_b32_e32 v3, 0xffff, v0
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, v3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v5
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v1
; GFX6-NEXT:    s_and_b32 s4, s7, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s4
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v5, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    s_and_b32 s4, s5, 0xffff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v7, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v8, v6
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v3, v2
; GFX6-NEXT:    v_mul_f32_e32 v3, v7, v8
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v3
; GFX6-NEXT:    v_mad_f32 v3, -v3, v6, v7
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v6
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s7
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s5, v3
; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX6-NEXT:    v_or_b32_e32 v0, v1, v0
; GFX6-NEXT:    buffer_store_short v2, off, s[0:3], 0 offset:4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v3i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s3, s6, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_and_b32 s2, s4, 0xffff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s2
; GFX9-NEXT:    s_lshr_b32 s6, s6, 16
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s6
; GFX9-NEXT:    s_lshr_b32 s4, s4, 16
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s4
; GFX9-NEXT:    v_mul_f32_e32 v4, v2, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v6, v4
; GFX9-NEXT:    v_mad_f32 v2, -v4, v0, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
; GFX9-NEXT:    v_mul_f32_e32 v5, v3, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v6, vcc
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s3
; GFX9-NEXT:    s_and_b32 s3, s7, 0xffff
; GFX9-NEXT:    v_mad_f32 v2, -v5, v1, v3
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s3
; GFX9-NEXT:    s_and_b32 s5, s5, 0xffff
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, s5
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v4, vcc
; GFX9-NEXT:    v_mul_f32_e32 v2, v5, v6
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v2
; GFX9-NEXT:    v_mad_f32 v2, -v2, v3, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v3
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, s6
; GFX9-NEXT:    v_mul_lo_u32 v2, v2, s3
; GFX9-NEXT:    v_sub_u32_e32 v0, s2, v0
; GFX9-NEXT:    v_mov_b32_e32 v3, 0
; GFX9-NEXT:    v_sub_u32_e32 v1, s4, v1
; GFX9-NEXT:    v_sub_u32_e32 v2, s5, v2
; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_short v3, v2, s[0:1] offset:4
; GFX9-NEXT:    global_store_dword v3, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem <3 x i16> %x, %y
  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
; CHECK-LABEL: @sdiv_v3i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 16
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 16
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <3 x i16> undef, i16 [[TMP23]], i64 0
; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <3 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <3 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP27:%.*]] = sext i16 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP28:%.*]] = sext i16 [[TMP26]] to i32
; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fneg fast float [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = shl i32 [[TMP44]], 16
; CHECK-NEXT:    [[TMP46:%.*]] = ashr i32 [[TMP45]], 16
; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i16
; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <3 x i16> [[TMP24]], i16 [[TMP47]], i64 1
; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <3 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <3 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP51:%.*]] = sext i16 [[TMP49]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = sext i16 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
; CHECK-NEXT:    [[TMP61:%.*]] = fneg fast float [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
; CHECK-NEXT:    [[TMP69:%.*]] = shl i32 [[TMP68]], 16
; CHECK-NEXT:    [[TMP70:%.*]] = ashr i32 [[TMP69]], 16
; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i16
; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i16> [[TMP48]], i16 [[TMP71]], i64 2
; CHECK-NEXT:    store <3 x i16> [[TMP72]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v3i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_sext_i32_i16 s8, s6
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX6-NEXT:    s_sext_i32_i16 s9, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX6-NEXT:    s_xor_b32 s8, s9, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s6, s6, 16
; GFX6-NEXT:    s_ashr_i32 s8, s8, 30
; GFX6-NEXT:    s_or_b32 s8, s8, 1
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s8
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    s_ashr_i32 s4, s4, 16
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v3, v1
; GFX6-NEXT:    s_xor_b32 s4, s4, s6
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_mad_f32 v2, -v3, v1, v2
; GFX6-NEXT:    v_mov_b32_e32 v4, s4
; GFX6-NEXT:    s_sext_i32_i16 s4, s7
; GFX6-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v1|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
; GFX6-NEXT:    s_sext_i32_i16 s5, s5
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v2
; GFX6-NEXT:    s_xor_b32 s4, s5, s4
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v2, v3
; GFX6-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX6-NEXT:    v_mov_b32_e32 v5, s4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
; GFX6-NEXT:    buffer_store_short v2, off, s[0:3], 0 offset:4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v3i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_sext_i32_i16 s0, s6
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s0
; GFX9-NEXT:    s_sext_i32_i16 s1, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s8, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s8, 0
; GFX9-NEXT:    s_ashr_i32 s1, s6, 16
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s1
; GFX9-NEXT:    s_ashr_i32 s4, s4, 16
; GFX9-NEXT:    v_add_u32_e32 v2, s0, v3
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v0
; GFX9-NEXT:    s_xor_b32 s0, s4, s1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_mad_f32 v3, -v4, v0, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v3|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX9-NEXT:    s_sext_i32_i16 s1, s7
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s1
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v3, s0, v4
; GFX9-NEXT:    s_sext_i32_i16 s0, s5
; GFX9-NEXT:    v_cvt_f32_i32_e32 v4, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v0
; GFX9-NEXT:    s_xor_b32 s0, s0, s1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_or_b32 s4, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v4, -v5, v0, v4
; GFX9-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v0|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s4, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s0, v5
; GFX9-NEXT:    v_and_b32_e32 v2, 0xffff, v2
; GFX9-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
; GFX9-NEXT:    global_store_short v1, v0, s[2:3] offset:4
; GFX9-NEXT:    global_store_dword v1, v2, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <3 x i16> %x, %y
  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x, <3 x i16> %y) {
; CHECK-LABEL: @srem_v3i16(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i16> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i16> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i16 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = shl i32 [[TMP22]], 16
; CHECK-NEXT:    [[TMP24:%.*]] = ashr i32 [[TMP23]], 16
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <3 x i16> undef, i16 [[TMP25]], i64 0
; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <3 x i16> [[X]], i64 1
; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <3 x i16> [[Y]], i64 1
; CHECK-NEXT:    [[TMP29:%.*]] = sext i16 [[TMP27]] to i32
; CHECK-NEXT:    [[TMP30:%.*]] = sext i16 [[TMP28]] to i32
; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
; CHECK-NEXT:    [[TMP39:%.*]] = fneg fast float [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = shl i32 [[TMP48]], 16
; CHECK-NEXT:    [[TMP50:%.*]] = ashr i32 [[TMP49]], 16
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i16
; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <3 x i16> [[TMP26]], i16 [[TMP51]], i64 1
; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <3 x i16> [[X]], i64 2
; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <3 x i16> [[Y]], i64 2
; CHECK-NEXT:    [[TMP55:%.*]] = sext i16 [[TMP53]] to i32
; CHECK-NEXT:    [[TMP56:%.*]] = sext i16 [[TMP54]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
; CHECK-NEXT:    [[TMP65:%.*]] = fneg fast float [[TMP64]]
; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
; CHECK-NEXT:    [[TMP75:%.*]] = shl i32 [[TMP74]], 16
; CHECK-NEXT:    [[TMP76:%.*]] = ashr i32 [[TMP75]], 16
; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i16
; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i16> [[TMP52]], i16 [[TMP77]], i64 2
; CHECK-NEXT:    store <3 x i16> [[TMP78]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v3i16:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_sext_i32_i16 s8, s6
; GFX6-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX6-NEXT:    s_sext_i32_i16 s9, s4
; GFX6-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX6-NEXT:    s_xor_b32 s8, s9, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX6-NEXT:    s_ashr_i32 s8, s8, 30
; GFX6-NEXT:    s_or_b32 s8, s8, 1
; GFX6-NEXT:    v_mov_b32_e32 v3, s8
; GFX6-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
; GFX6-NEXT:    v_mov_b32_e32 v1, s4
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_alignbit_b32 v2, s7, v2, 16
; GFX6-NEXT:    v_bfe_i32 v3, v2, 0, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, v3
; GFX6-NEXT:    v_alignbit_b32 v1, s5, v1, 16
; GFX6-NEXT:    v_bfe_i32 v5, v1, 0, 16
; GFX6-NEXT:    v_cvt_f32_i32_e32 v6, v5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v4
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT:    v_xor_b32_e32 v3, v5, v3
; GFX6-NEXT:    v_ashrrev_i32_e32 v3, 30, v3
; GFX6-NEXT:    v_mul_f32_e32 v5, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_mad_f32 v6, -v5, v4, v6
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    s_sext_i32_i16 s4, s7
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, |v4|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, s4
; GFX6-NEXT:    v_or_b32_e32 v3, 1, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    s_sext_i32_i16 s6, s5
; GFX6-NEXT:    v_mul_lo_u32 v2, v3, v2
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v4
; GFX6-NEXT:    s_xor_b32 s4, s6, s4
; GFX6-NEXT:    s_ashr_i32 s4, s4, 30
; GFX6-NEXT:    s_or_b32 s4, s4, 1
; GFX6-NEXT:    v_mul_f32_e32 v5, v3, v5
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_mad_f32 v3, -v5, v4, v3
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    v_mov_b32_e32 v6, s4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v4|
; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v3, s7
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s5, v3
; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
; GFX6-NEXT:    buffer_store_short v2, off, s[0:3], 0 offset:4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v3i16:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_sext_i32_i16 s8, s6
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s8
; GFX9-NEXT:    s_sext_i32_i16 s9, s4
; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, s9
; GFX9-NEXT:    s_xor_b32 s2, s9, s8
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
; GFX9-NEXT:    s_ashr_i32 s2, s2, 30
; GFX9-NEXT:    s_or_b32 s10, s2, 1
; GFX9-NEXT:    s_sext_i32_i16 s7, s7
; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v1|, |v0|
; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], exec
; GFX9-NEXT:    s_cselect_b32 s2, s10, 0
; GFX9-NEXT:    s_ashr_i32 s6, s6, 16
; GFX9-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_f32_i32_e32 v0, s6
; GFX9-NEXT:    s_ashr_i32 s4, s4, 16
; GFX9-NEXT:    s_sext_i32_i16 s5, s5
; GFX9-NEXT:    v_add_u32_e32 v1, s2, v2
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v3, v0
; GFX9-NEXT:    s_xor_b32 s2, s4, s6
; GFX9-NEXT:    s_ashr_i32 s2, s2, 30
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, s8
; GFX9-NEXT:    v_mul_f32_e32 v3, v2, v3
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mad_f32 v2, -v3, v0, v2
; GFX9-NEXT:    s_or_b32 s8, s2, 1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v3, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v2|, |v0|
; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, s7
; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], exec
; GFX9-NEXT:    s_cselect_b32 s2, s8, 0
; GFX9-NEXT:    v_add_u32_e32 v0, s2, v3
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s5
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v4, v2
; GFX9-NEXT:    s_xor_b32 s2, s5, s7
; GFX9-NEXT:    s_ashr_i32 s2, s2, 30
; GFX9-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX9-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_mad_f32 v3, -v4, v2, v3
; GFX9-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX9-NEXT:    s_or_b32 s6, s2, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v3|, |v2|
; GFX9-NEXT:    s_and_b64 s[2:3], s[2:3], exec
; GFX9-NEXT:    s_cselect_b32 s2, s6, 0
; GFX9-NEXT:    v_add_u32_e32 v2, s2, v4
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_mul_lo_u32 v2, v2, s7
; GFX9-NEXT:    v_sub_u32_e32 v1, s9, v1
; GFX9-NEXT:    v_mov_b32_e32 v3, 0
; GFX9-NEXT:    v_sub_u32_e32 v0, s4, v0
; GFX9-NEXT:    v_sub_u32_e32 v2, s5, v2
; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT:    v_lshl_or_b32 v0, v0, 16, v1
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_short v3, v2, s[0:1] offset:4
; GFX9-NEXT:    global_store_dword v3, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem <3 x i16> %x, %y
  store <3 x i16> %r, <3 x i16> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
; CHECK-LABEL: @udiv_v3i15(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i15 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i15 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 32767
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i15
; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <3 x i15> undef, i15 [[TMP19]], i64 0
; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <3 x i15> [[X]], i64 1
; CHECK-NEXT:    [[TMP22:%.*]] = extractelement <3 x i15> [[Y]], i64 1
; CHECK-NEXT:    [[TMP23:%.*]] = zext i15 [[TMP21]] to i32
; CHECK-NEXT:    [[TMP24:%.*]] = zext i15 [[TMP22]] to i32
; CHECK-NEXT:    [[TMP25:%.*]] = uitofp i32 [[TMP23]] to float
; CHECK-NEXT:    [[TMP26:%.*]] = uitofp i32 [[TMP24]] to float
; CHECK-NEXT:    [[TMP27:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP28:%.*]] = fmul fast float [[TMP25]], [[TMP27]]
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.trunc.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fneg fast float [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float [[TMP26]], float [[TMP25]])
; CHECK-NEXT:    [[TMP32:%.*]] = fptoui float [[TMP29]] to i32
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.fabs.f32(float [[TMP31]])
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.fabs.f32(float [[TMP26]])
; CHECK-NEXT:    [[TMP35:%.*]] = fcmp fast oge float [[TMP33]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP35]], i32 1, i32 0
; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP32]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = and i32 [[TMP37]], 32767
; CHECK-NEXT:    [[TMP39:%.*]] = trunc i32 [[TMP38]] to i15
; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <3 x i15> [[TMP20]], i15 [[TMP39]], i64 1
; CHECK-NEXT:    [[TMP41:%.*]] = extractelement <3 x i15> [[X]], i64 2
; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <3 x i15> [[Y]], i64 2
; CHECK-NEXT:    [[TMP43:%.*]] = zext i15 [[TMP41]] to i32
; CHECK-NEXT:    [[TMP44:%.*]] = zext i15 [[TMP42]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = uitofp i32 [[TMP43]] to float
; CHECK-NEXT:    [[TMP46:%.*]] = uitofp i32 [[TMP44]] to float
; CHECK-NEXT:    [[TMP47:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP48:%.*]] = fmul fast float [[TMP45]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = call fast float @llvm.trunc.f32(float [[TMP48]])
; CHECK-NEXT:    [[TMP50:%.*]] = fneg fast float [[TMP49]]
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP50]], float [[TMP46]], float [[TMP45]])
; CHECK-NEXT:    [[TMP52:%.*]] = fptoui float [[TMP49]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.fabs.f32(float [[TMP51]])
; CHECK-NEXT:    [[TMP54:%.*]] = call fast float @llvm.fabs.f32(float [[TMP46]])
; CHECK-NEXT:    [[TMP55:%.*]] = fcmp fast oge float [[TMP53]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP55]], i32 1, i32 0
; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP52]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 32767
; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i15
; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i15> [[TMP40]], i15 [[TMP59]], i64 2
; CHECK-NEXT:    store <3 x i15> [[TMP60]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v3i15:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mov_b32_e32 v0, s2
; GFX6-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX6-NEXT:    s_and_b32 s8, s0, 0x7fff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s8
; GFX6-NEXT:    s_and_b32 s3, s2, 0x7fff
; GFX6-NEXT:    v_mov_b32_e32 v2, s0
; GFX6-NEXT:    s_bfe_u32 s0, s0, 0xf000f
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s0
; GFX6-NEXT:    s_bfe_u32 s2, s2, 0xf000f
; GFX6-NEXT:    v_alignbit_b32 v2, s1, v2, 30
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v2
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v1, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX6-NEXT:    v_mul_f32_e32 v1, v6, v7
; GFX6-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mad_f32 v4, -v1, v5, v6
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v6, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, v5
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mul_f32_e32 v1, v0, v6
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v5, v1
; GFX6-NEXT:    v_mad_f32 v0, -v1, v2, v0
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v2
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v3
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, 0, v5, vcc
; GFX6-NEXT:    v_and_b32_e32 v3, 0x7fff, v4
; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 15, v3
; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
; GFX6-NEXT:    v_or_b32_e32 v0, v2, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_waitcnt expcnt(0)
; GFX6-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v3i15:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX9-NEXT:    s_and_b32 s6, s2, 0x7fff
; GFX9-NEXT:    s_and_b32 s3, s0, 0x7fff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
; GFX9-NEXT:    v_mov_b32_e32 v3, s0
; GFX9-NEXT:    s_bfe_u32 s0, s0, 0xf000f
; GFX9-NEXT:    v_cvt_f32_u32_e32 v4, s6
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_cvt_f32_u32_e32 v6, s0
; GFX9-NEXT:    s_bfe_u32 s2, s2, 0xf000f
; GFX9-NEXT:    v_alignbit_b32 v3, s1, v3, 30
; GFX9-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v7, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v8, v6
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v3
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v4, -v5, v1, v4
; GFX9-NEXT:    v_cvt_u32_f32_e32 v5, v5
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, v1
; GFX9-NEXT:    v_mul_f32_e32 v1, v7, v8
; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v5, vcc
; GFX9-NEXT:    v_mad_f32 v5, -v1, v6, v7
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v1, vcc
; GFX9-NEXT:    v_mul_f32_e32 v1, v0, v7
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v6, v1
; GFX9-NEXT:    v_mad_f32 v0, -v1, v3, v0
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v3
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v4
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v6, vcc
; GFX9-NEXT:    v_and_b32_e32 v4, 0x7fff, v5
; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 30, v[0:1]
; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 15, v4
; GFX9-NEXT:    v_or_b32_e32 v3, v3, v4
; GFX9-NEXT:    v_or_b32_e32 v0, v3, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[4:5]
; GFX9-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX9-NEXT:    global_store_short v2, v0, s[4:5] offset:4
; GFX9-NEXT:    s_endpgm
  %r = udiv <3 x i15> %x, %y
  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
; CHECK-LABEL: @urem_v3i15(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = zext i15 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = zext i15 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = uitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP6:%.*]] = uitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP7:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast float [[TMP5]], [[TMP7]]
; CHECK-NEXT:    [[TMP9:%.*]] = call fast float @llvm.trunc.f32(float [[TMP8]])
; CHECK-NEXT:    [[TMP10:%.*]] = fneg fast float [[TMP9]]
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float [[TMP6]], float [[TMP5]])
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP9]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = call fast float @llvm.fabs.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.fabs.f32(float [[TMP6]])
; CHECK-NEXT:    [[TMP15:%.*]] = fcmp fast oge float [[TMP13]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 1, i32 0
; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP12]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = mul i32 [[TMP17]], [[TMP4]]
; CHECK-NEXT:    [[TMP19:%.*]] = sub i32 [[TMP3]], [[TMP18]]
; CHECK-NEXT:    [[TMP20:%.*]] = and i32 [[TMP19]], 32767
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i15
; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <3 x i15> undef, i15 [[TMP21]], i64 0
; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <3 x i15> [[X]], i64 1
; CHECK-NEXT:    [[TMP24:%.*]] = extractelement <3 x i15> [[Y]], i64 1
; CHECK-NEXT:    [[TMP25:%.*]] = zext i15 [[TMP23]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = zext i15 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = uitofp i32 [[TMP25]] to float
; CHECK-NEXT:    [[TMP28:%.*]] = uitofp i32 [[TMP26]] to float
; CHECK-NEXT:    [[TMP29:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP30:%.*]] = fmul fast float [[TMP27]], [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = call fast float @llvm.trunc.f32(float [[TMP30]])
; CHECK-NEXT:    [[TMP32:%.*]] = fneg fast float [[TMP31]]
; CHECK-NEXT:    [[TMP33:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP32]], float [[TMP28]], float [[TMP27]])
; CHECK-NEXT:    [[TMP34:%.*]] = fptoui float [[TMP31]] to i32
; CHECK-NEXT:    [[TMP35:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.fabs.f32(float [[TMP28]])
; CHECK-NEXT:    [[TMP37:%.*]] = fcmp fast oge float [[TMP35]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP37]], i32 1, i32 0
; CHECK-NEXT:    [[TMP39:%.*]] = add i32 [[TMP34]], [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP26]]
; CHECK-NEXT:    [[TMP41:%.*]] = sub i32 [[TMP25]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = and i32 [[TMP41]], 32767
; CHECK-NEXT:    [[TMP43:%.*]] = trunc i32 [[TMP42]] to i15
; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <3 x i15> [[TMP22]], i15 [[TMP43]], i64 1
; CHECK-NEXT:    [[TMP45:%.*]] = extractelement <3 x i15> [[X]], i64 2
; CHECK-NEXT:    [[TMP46:%.*]] = extractelement <3 x i15> [[Y]], i64 2
; CHECK-NEXT:    [[TMP47:%.*]] = zext i15 [[TMP45]] to i32
; CHECK-NEXT:    [[TMP48:%.*]] = zext i15 [[TMP46]] to i32
; CHECK-NEXT:    [[TMP49:%.*]] = uitofp i32 [[TMP47]] to float
; CHECK-NEXT:    [[TMP50:%.*]] = uitofp i32 [[TMP48]] to float
; CHECK-NEXT:    [[TMP51:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP52:%.*]] = fmul fast float [[TMP49]], [[TMP51]]
; CHECK-NEXT:    [[TMP53:%.*]] = call fast float @llvm.trunc.f32(float [[TMP52]])
; CHECK-NEXT:    [[TMP54:%.*]] = fneg fast float [[TMP53]]
; CHECK-NEXT:    [[TMP55:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP54]], float [[TMP50]], float [[TMP49]])
; CHECK-NEXT:    [[TMP56:%.*]] = fptoui float [[TMP53]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = call fast float @llvm.fabs.f32(float [[TMP55]])
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.fabs.f32(float [[TMP50]])
; CHECK-NEXT:    [[TMP59:%.*]] = fcmp fast oge float [[TMP57]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP59]], i32 1, i32 0
; CHECK-NEXT:    [[TMP61:%.*]] = add i32 [[TMP56]], [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = mul i32 [[TMP61]], [[TMP48]]
; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 [[TMP47]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 32767
; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i15
; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i15> [[TMP44]], i15 [[TMP65]], i64 2
; CHECK-NEXT:    store <3 x i15> [[TMP66]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v3i15:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s8, s2, 0x7fff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s8
; GFX6-NEXT:    s_and_b32 s9, s0, 0x7fff
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    v_mov_b32_e32 v2, s0
; GFX6-NEXT:    v_alignbit_b32 v2, s1, v2, 30
; GFX6-NEXT:    s_bfe_u32 s1, s0, 0xf000f
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s1
; GFX6-NEXT:    s_bfe_u32 s9, s2, 0xf000f
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v2
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v1, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v3, s9
; GFX6-NEXT:    v_mov_b32_e32 v0, s2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v5
; GFX6-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX6-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s2, v1
; GFX6-NEXT:    v_mul_f32_e32 v1, v3, v4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, v2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v7, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mad_f32 v3, -v1, v5, v3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v8, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v5
; GFX6-NEXT:    s_lshr_b32 s0, s0, 15
; GFX6-NEXT:    v_mul_f32_e32 v3, v7, v8
; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v5, v3
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX6-NEXT:    v_mad_f32 v3, -v3, v4, v7
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s0
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v3, v2
; GFX6-NEXT:    s_lshr_b32 s3, s2, 15
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s3, v1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_and_b32_e32 v3, 0x7fff, v3
; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v6
; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 15, v3
; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
; GFX6-NEXT:    v_or_b32_e32 v0, v2, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_waitcnt expcnt(0)
; GFX6-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v3i15:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s6, s2, 0x7fff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v4, s6
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    s_and_b32 s7, s0, 0x7fff
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
; GFX9-NEXT:    s_bfe_u32 s6, s0, 0xf000f
; GFX9-NEXT:    v_cvt_f32_u32_e32 v6, s6
; GFX9-NEXT:    v_mov_b32_e32 v3, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v1
; GFX9-NEXT:    v_alignbit_b32 v3, s1, v3, 30
; GFX9-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX9-NEXT:    s_bfe_u32 s3, s2, 0xf000f
; GFX9-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v4, -v5, v1, v4
; GFX9-NEXT:    v_cvt_u32_f32_e32 v5, v5
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v3
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, v1
; GFX9-NEXT:    v_cvt_f32_u32_e32 v7, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v8, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
; GFX9-NEXT:    v_cvt_f32_u32_e32 v5, v3
; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
; GFX9-NEXT:    v_mul_f32_e32 v4, v7, v8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v8, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v9, v5
; GFX9-NEXT:    v_trunc_f32_e32 v4, v4
; GFX9-NEXT:    v_mad_f32 v7, -v4, v6, v7
; GFX9-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v7|, v6
; GFX9-NEXT:    v_mul_f32_e32 v6, v8, v9
; GFX9-NEXT:    v_trunc_f32_e32 v6, v6
; GFX9-NEXT:    v_cvt_u32_f32_e32 v7, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v4, vcc
; GFX9-NEXT:    v_mad_f32 v6, -v6, v5, v8
; GFX9-NEXT:    s_lshr_b32 s1, s0, 15
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, v5
; GFX9-NEXT:    v_mul_lo_u32 v4, v4, s1
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, s0
; GFX9-NEXT:    v_mul_lo_u32 v3, v5, v3
; GFX9-NEXT:    s_lshr_b32 s0, s2, 15
; GFX9-NEXT:    v_sub_u32_e32 v4, s0, v4
; GFX9-NEXT:    v_sub_u32_e32 v5, s2, v1
; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v3
; GFX9-NEXT:    v_and_b32_e32 v4, 0x7fff, v4
; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 30, v[0:1]
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v5
; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 15, v4
; GFX9-NEXT:    v_or_b32_e32 v3, v3, v4
; GFX9-NEXT:    v_or_b32_e32 v0, v3, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[4:5]
; GFX9-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX9-NEXT:    global_store_short v2, v0, s[4:5] offset:4
; GFX9-NEXT:    s_endpgm
  %r = urem <3 x i15> %x, %y
  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
; CHECK-LABEL: @sdiv_v3i15(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i15 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i15 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 17
; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 17
; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i15
; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <3 x i15> undef, i15 [[TMP23]], i64 0
; CHECK-NEXT:    [[TMP25:%.*]] = extractelement <3 x i15> [[X]], i64 1
; CHECK-NEXT:    [[TMP26:%.*]] = extractelement <3 x i15> [[Y]], i64 1
; CHECK-NEXT:    [[TMP27:%.*]] = sext i15 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP28:%.*]] = sext i15 [[TMP26]] to i32
; CHECK-NEXT:    [[TMP29:%.*]] = xor i32 [[TMP27]], [[TMP28]]
; CHECK-NEXT:    [[TMP30:%.*]] = ashr i32 [[TMP29]], 30
; CHECK-NEXT:    [[TMP31:%.*]] = or i32 [[TMP30]], 1
; CHECK-NEXT:    [[TMP32:%.*]] = sitofp i32 [[TMP27]] to float
; CHECK-NEXT:    [[TMP33:%.*]] = sitofp i32 [[TMP28]] to float
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP32]], [[TMP34]]
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.trunc.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fneg fast float [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP37]], float [[TMP33]], float [[TMP32]])
; CHECK-NEXT:    [[TMP39:%.*]] = fptosi float [[TMP36]] to i32
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.fabs.f32(float [[TMP38]])
; CHECK-NEXT:    [[TMP41:%.*]] = call fast float @llvm.fabs.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP42:%.*]] = fcmp fast oge float [[TMP40]], [[TMP41]]
; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i32 [[TMP31]], i32 0
; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP39]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = shl i32 [[TMP44]], 17
; CHECK-NEXT:    [[TMP46:%.*]] = ashr i32 [[TMP45]], 17
; CHECK-NEXT:    [[TMP47:%.*]] = trunc i32 [[TMP46]] to i15
; CHECK-NEXT:    [[TMP48:%.*]] = insertelement <3 x i15> [[TMP24]], i15 [[TMP47]], i64 1
; CHECK-NEXT:    [[TMP49:%.*]] = extractelement <3 x i15> [[X]], i64 2
; CHECK-NEXT:    [[TMP50:%.*]] = extractelement <3 x i15> [[Y]], i64 2
; CHECK-NEXT:    [[TMP51:%.*]] = sext i15 [[TMP49]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = sext i15 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP53:%.*]] = xor i32 [[TMP51]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = ashr i32 [[TMP53]], 30
; CHECK-NEXT:    [[TMP55:%.*]] = or i32 [[TMP54]], 1
; CHECK-NEXT:    [[TMP56:%.*]] = sitofp i32 [[TMP51]] to float
; CHECK-NEXT:    [[TMP57:%.*]] = sitofp i32 [[TMP52]] to float
; CHECK-NEXT:    [[TMP58:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP59:%.*]] = fmul fast float [[TMP56]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = call fast float @llvm.trunc.f32(float [[TMP59]])
; CHECK-NEXT:    [[TMP61:%.*]] = fneg fast float [[TMP60]]
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP61]], float [[TMP57]], float [[TMP56]])
; CHECK-NEXT:    [[TMP63:%.*]] = fptosi float [[TMP60]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.fabs.f32(float [[TMP62]])
; CHECK-NEXT:    [[TMP65:%.*]] = call fast float @llvm.fabs.f32(float [[TMP57]])
; CHECK-NEXT:    [[TMP66:%.*]] = fcmp fast oge float [[TMP64]], [[TMP65]]
; CHECK-NEXT:    [[TMP67:%.*]] = select i1 [[TMP66]], i32 [[TMP55]], i32 0
; CHECK-NEXT:    [[TMP68:%.*]] = add i32 [[TMP63]], [[TMP67]]
; CHECK-NEXT:    [[TMP69:%.*]] = shl i32 [[TMP68]], 17
; CHECK-NEXT:    [[TMP70:%.*]] = ashr i32 [[TMP69]], 17
; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i15
; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i15> [[TMP48]], i15 [[TMP71]], i64 2
; CHECK-NEXT:    store <3 x i15> [[TMP72]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v3i15:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mov_b32_e32 v0, s2
; GFX6-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX6-NEXT:    s_bfe_i32 s3, s0, 0xf0000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v2, s3
; GFX6-NEXT:    v_mov_b32_e32 v1, s0
; GFX6-NEXT:    v_alignbit_b32 v1, s1, v1, 30
; GFX6-NEXT:    s_bfe_i32 s1, s2, 0xf0000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v4, v2
; GFX6-NEXT:    s_xor_b32 s1, s1, s3
; GFX6-NEXT:    s_bfe_i32 s0, s0, 0xf000f
; GFX6-NEXT:    s_ashr_i32 s1, s1, 30
; GFX6-NEXT:    v_mul_f32_e32 v4, v3, v4
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mad_f32 v3, -v4, v2, v3
; GFX6-NEXT:    v_cvt_i32_f32_e32 v4, v4
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v3, s0
; GFX6-NEXT:    s_or_b32 s1, s1, 1
; GFX6-NEXT:    v_mov_b32_e32 v5, s1
; GFX6-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
; GFX6-NEXT:    s_bfe_i32 s1, s2, 0xf000f
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, s1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v5, v3
; GFX6-NEXT:    s_xor_b32 s0, s1, s0
; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 15
; GFX6-NEXT:    s_ashr_i32 s0, s0, 30
; GFX6-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX6-NEXT:    v_trunc_f32_e32 v5, v5
; GFX6-NEXT:    v_mad_f32 v4, -v5, v3, v4
; GFX6-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v3|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, v1
; GFX6-NEXT:    s_or_b32 s0, s0, 1
; GFX6-NEXT:    v_mov_b32_e32 v6, s0
; GFX6-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 15
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
; GFX6-NEXT:    v_cvt_f32_i32_e32 v5, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v1
; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
; GFX6-NEXT:    v_or_b32_e32 v0, 1, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, v5, v6
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mad_f32 v5, -v1, v4, v5
; GFX6-NEXT:    v_cvt_i32_f32_e32 v1, v1
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v4|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
; GFX6-NEXT:    v_and_b32_e32 v3, 0x7fff, v3
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v2
; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 15, v3
; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
; GFX6-NEXT:    v_or_b32_e32 v0, v2, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_waitcnt expcnt(0)
; GFX6-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v3i15:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s1, s2, 0xf0000
; GFX9-NEXT:    s_bfe_i32 s0, s4, 0xf0000
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s0
; GFX9-NEXT:    v_cvt_f32_i32_e32 v4, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v5, v3
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX9-NEXT:    s_or_b32 s3, s0, 1
; GFX9-NEXT:    v_mul_f32_e32 v5, v4, v5
; GFX9-NEXT:    v_trunc_f32_e32 v5, v5
; GFX9-NEXT:    v_mad_f32 v4, -v5, v3, v4
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v3|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    v_cvt_i32_f32_e32 v5, v5
; GFX9-NEXT:    s_cselect_b32 s0, s3, 0
; GFX9-NEXT:    s_bfe_i32 s1, s4, 0xf000f
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, s1
; GFX9-NEXT:    v_add_u32_e32 v4, s0, v5
; GFX9-NEXT:    s_bfe_i32 s0, s2, 0xf000f
; GFX9-NEXT:    v_cvt_f32_i32_e32 v5, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v3
; GFX9-NEXT:    v_mov_b32_e32 v1, s4
; GFX9-NEXT:    v_alignbit_b32 v1, s5, v1, 30
; GFX9-NEXT:    s_xor_b32 s0, s0, s1
; GFX9-NEXT:    v_mul_f32_e32 v6, v5, v6
; GFX9-NEXT:    v_trunc_f32_e32 v6, v6
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_mad_f32 v5, -v6, v3, v5
; GFX9-NEXT:    v_bfe_i32 v1, v1, 0, 15
; GFX9-NEXT:    s_or_b32 s2, s0, 1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v6, v6
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v5|, |v3|
; GFX9-NEXT:    v_cvt_f32_i32_e32 v3, v1
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s2, 0
; GFX9-NEXT:    v_bfe_i32 v0, v0, 0, 15
; GFX9-NEXT:    v_add_u32_e32 v5, s0, v6
; GFX9-NEXT:    v_cvt_f32_i32_e32 v6, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v3
; GFX9-NEXT:    v_xor_b32_e32 v0, v0, v1
; GFX9-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
; GFX9-NEXT:    v_or_b32_e32 v0, 1, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, v6, v7
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_i32_f32_e32 v7, v1
; GFX9-NEXT:    v_mad_f32 v1, -v1, v3, v6
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v3|
; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
; GFX9-NEXT:    v_add_u32_e32 v0, v7, v0
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v4
; GFX9-NEXT:    v_and_b32_e32 v4, 0x7fff, v5
; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 30, v[0:1]
; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 15, v4
; GFX9-NEXT:    v_or_b32_e32 v3, v3, v4
; GFX9-NEXT:    v_or_b32_e32 v0, v3, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[6:7]
; GFX9-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX9-NEXT:    global_store_short v2, v0, s[6:7] offset:4
; GFX9-NEXT:    s_endpgm
  %r = sdiv <3 x i15> %x, %y
  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x, <3 x i15> %y) {
; CHECK-LABEL: @srem_v3i15(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <3 x i15> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <3 x i15> [[Y:%.*]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sext i15 [[TMP1]] to i32
; CHECK-NEXT:    [[TMP4:%.*]] = sext i15 [[TMP2]] to i32
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP5]], 30
; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], 1
; CHECK-NEXT:    [[TMP8:%.*]] = sitofp i32 [[TMP3]] to float
; CHECK-NEXT:    [[TMP9:%.*]] = sitofp i32 [[TMP4]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP8]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = call fast float @llvm.trunc.f32(float [[TMP11]])
; CHECK-NEXT:    [[TMP13:%.*]] = fneg fast float [[TMP12]]
; CHECK-NEXT:    [[TMP14:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP13]], float [[TMP9]], float [[TMP8]])
; CHECK-NEXT:    [[TMP15:%.*]] = fptosi float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP16:%.*]] = call fast float @llvm.fabs.f32(float [[TMP14]])
; CHECK-NEXT:    [[TMP17:%.*]] = call fast float @llvm.fabs.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP18:%.*]] = fcmp fast oge float [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP7]], i32 0
; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP15]], [[TMP19]]
; CHECK-NEXT:    [[TMP21:%.*]] = mul i32 [[TMP20]], [[TMP4]]
; CHECK-NEXT:    [[TMP22:%.*]] = sub i32 [[TMP3]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = shl i32 [[TMP22]], 17
; CHECK-NEXT:    [[TMP24:%.*]] = ashr i32 [[TMP23]], 17
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i32 [[TMP24]] to i15
; CHECK-NEXT:    [[TMP26:%.*]] = insertelement <3 x i15> undef, i15 [[TMP25]], i64 0
; CHECK-NEXT:    [[TMP27:%.*]] = extractelement <3 x i15> [[X]], i64 1
; CHECK-NEXT:    [[TMP28:%.*]] = extractelement <3 x i15> [[Y]], i64 1
; CHECK-NEXT:    [[TMP29:%.*]] = sext i15 [[TMP27]] to i32
; CHECK-NEXT:    [[TMP30:%.*]] = sext i15 [[TMP28]] to i32
; CHECK-NEXT:    [[TMP31:%.*]] = xor i32 [[TMP29]], [[TMP30]]
; CHECK-NEXT:    [[TMP32:%.*]] = ashr i32 [[TMP31]], 30
; CHECK-NEXT:    [[TMP33:%.*]] = or i32 [[TMP32]], 1
; CHECK-NEXT:    [[TMP34:%.*]] = sitofp i32 [[TMP29]] to float
; CHECK-NEXT:    [[TMP35:%.*]] = sitofp i32 [[TMP30]] to float
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP34]], [[TMP36]]
; CHECK-NEXT:    [[TMP38:%.*]] = call fast float @llvm.trunc.f32(float [[TMP37]])
; CHECK-NEXT:    [[TMP39:%.*]] = fneg fast float [[TMP38]]
; CHECK-NEXT:    [[TMP40:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP39]], float [[TMP35]], float [[TMP34]])
; CHECK-NEXT:    [[TMP41:%.*]] = fptosi float [[TMP38]] to i32
; CHECK-NEXT:    [[TMP42:%.*]] = call fast float @llvm.fabs.f32(float [[TMP40]])
; CHECK-NEXT:    [[TMP43:%.*]] = call fast float @llvm.fabs.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP44:%.*]] = fcmp fast oge float [[TMP42]], [[TMP43]]
; CHECK-NEXT:    [[TMP45:%.*]] = select i1 [[TMP44]], i32 [[TMP33]], i32 0
; CHECK-NEXT:    [[TMP46:%.*]] = add i32 [[TMP41]], [[TMP45]]
; CHECK-NEXT:    [[TMP47:%.*]] = mul i32 [[TMP46]], [[TMP30]]
; CHECK-NEXT:    [[TMP48:%.*]] = sub i32 [[TMP29]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = shl i32 [[TMP48]], 17
; CHECK-NEXT:    [[TMP50:%.*]] = ashr i32 [[TMP49]], 17
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i32 [[TMP50]] to i15
; CHECK-NEXT:    [[TMP52:%.*]] = insertelement <3 x i15> [[TMP26]], i15 [[TMP51]], i64 1
; CHECK-NEXT:    [[TMP53:%.*]] = extractelement <3 x i15> [[X]], i64 2
; CHECK-NEXT:    [[TMP54:%.*]] = extractelement <3 x i15> [[Y]], i64 2
; CHECK-NEXT:    [[TMP55:%.*]] = sext i15 [[TMP53]] to i32
; CHECK-NEXT:    [[TMP56:%.*]] = sext i15 [[TMP54]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = xor i32 [[TMP55]], [[TMP56]]
; CHECK-NEXT:    [[TMP58:%.*]] = ashr i32 [[TMP57]], 30
; CHECK-NEXT:    [[TMP59:%.*]] = or i32 [[TMP58]], 1
; CHECK-NEXT:    [[TMP60:%.*]] = sitofp i32 [[TMP55]] to float
; CHECK-NEXT:    [[TMP61:%.*]] = sitofp i32 [[TMP56]] to float
; CHECK-NEXT:    [[TMP62:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP63:%.*]] = fmul fast float [[TMP60]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = call fast float @llvm.trunc.f32(float [[TMP63]])
; CHECK-NEXT:    [[TMP65:%.*]] = fneg fast float [[TMP64]]
; CHECK-NEXT:    [[TMP66:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP65]], float [[TMP61]], float [[TMP60]])
; CHECK-NEXT:    [[TMP67:%.*]] = fptosi float [[TMP64]] to i32
; CHECK-NEXT:    [[TMP68:%.*]] = call fast float @llvm.fabs.f32(float [[TMP66]])
; CHECK-NEXT:    [[TMP69:%.*]] = call fast float @llvm.fabs.f32(float [[TMP61]])
; CHECK-NEXT:    [[TMP70:%.*]] = fcmp fast oge float [[TMP68]], [[TMP69]]
; CHECK-NEXT:    [[TMP71:%.*]] = select i1 [[TMP70]], i32 [[TMP59]], i32 0
; CHECK-NEXT:    [[TMP72:%.*]] = add i32 [[TMP67]], [[TMP71]]
; CHECK-NEXT:    [[TMP73:%.*]] = mul i32 [[TMP72]], [[TMP56]]
; CHECK-NEXT:    [[TMP74:%.*]] = sub i32 [[TMP55]], [[TMP73]]
; CHECK-NEXT:    [[TMP75:%.*]] = shl i32 [[TMP74]], 17
; CHECK-NEXT:    [[TMP76:%.*]] = ashr i32 [[TMP75]], 17
; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i15
; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i15> [[TMP52]], i15 [[TMP77]], i64 2
; CHECK-NEXT:    store <3 x i15> [[TMP78]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v3i15:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_bfe_i32 s9, s2, 0xf0000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v5, s9
; GFX6-NEXT:    v_mov_b32_e32 v2, s0
; GFX6-NEXT:    v_alignbit_b32 v2, s1, v2, 30
; GFX6-NEXT:    s_bfe_i32 s1, s0, 0xf0000
; GFX6-NEXT:    v_cvt_f32_i32_e32 v4, s1
; GFX6-NEXT:    s_xor_b32 s1, s9, s1
; GFX6-NEXT:    s_ashr_i32 s1, s1, 30
; GFX6-NEXT:    s_or_b32 s1, s1, 1
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX6-NEXT:    v_mov_b32_e32 v7, s1
; GFX6-NEXT:    s_lshr_b32 s8, s0, 15
; GFX6-NEXT:    s_bfe_i32 s1, s2, 0xf000f
; GFX6-NEXT:    v_mul_f32_e32 v6, v5, v6
; GFX6-NEXT:    v_trunc_f32_e32 v6, v6
; GFX6-NEXT:    v_mad_f32 v5, -v6, v4, v5
; GFX6-NEXT:    v_cvt_i32_f32_e32 v6, v6
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v4|
; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v7, vcc
; GFX6-NEXT:    v_and_b32_e32 v3, 0x7fff, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
; GFX6-NEXT:    v_mul_lo_u32 v4, v4, s0
; GFX6-NEXT:    s_bfe_i32 s0, s0, 0xf000f
; GFX6-NEXT:    v_cvt_f32_i32_e32 v5, s0
; GFX6-NEXT:    v_cvt_f32_i32_e32 v6, s1
; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s2, v4
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX6-NEXT:    s_xor_b32 s0, s1, s0
; GFX6-NEXT:    v_bfe_i32 v2, v2, 0, 15
; GFX6-NEXT:    s_ashr_i32 s0, s0, 30
; GFX6-NEXT:    v_mul_f32_e32 v7, v6, v7
; GFX6-NEXT:    v_trunc_f32_e32 v7, v7
; GFX6-NEXT:    v_mad_f32 v6, -v7, v5, v6
; GFX6-NEXT:    v_cvt_i32_f32_e32 v7, v7
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, |v5|
; GFX6-NEXT:    v_cvt_f32_i32_e32 v6, v2
; GFX6-NEXT:    v_mov_b32_e32 v0, s2
; GFX6-NEXT:    s_or_b32 s0, s0, 1
; GFX6-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX6-NEXT:    v_mov_b32_e32 v8, s0
; GFX6-NEXT:    v_and_b32_e32 v1, 0x7fff, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v5, 0, v8, vcc
; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 15
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
; GFX6-NEXT:    v_cvt_f32_i32_e32 v7, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v8, v6
; GFX6-NEXT:    v_xor_b32_e32 v0, v0, v2
; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
; GFX6-NEXT:    v_or_b32_e32 v0, 1, v0
; GFX6-NEXT:    v_mul_f32_e32 v2, v7, v8
; GFX6-NEXT:    v_trunc_f32_e32 v2, v2
; GFX6-NEXT:    v_mad_f32 v7, -v2, v6, v7
; GFX6-NEXT:    v_cvt_i32_f32_e32 v2, v2
; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v7|, |v6|
; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
; GFX6-NEXT:    v_mul_lo_u32 v5, v5, s8
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, v3
; GFX6-NEXT:    s_lshr_b32 s3, s2, 15
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s3, v5
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_and_b32_e32 v2, 0x7fff, v2
; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
; GFX6-NEXT:    v_and_b32_e32 v3, 0x7fff, v4
; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 15, v2
; GFX6-NEXT:    v_or_b32_e32 v2, v3, v2
; GFX6-NEXT:    v_or_b32_e32 v0, v2, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_waitcnt expcnt(0)
; GFX6-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX6-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v3i15:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_bfe_i32 s1, s2, 0xf0000
; GFX9-NEXT:    v_cvt_f32_i32_e32 v5, s1
; GFX9-NEXT:    s_bfe_i32 s0, s6, 0xf0000
; GFX9-NEXT:    v_cvt_f32_i32_e32 v4, s0
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s6
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v4
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    s_lshr_b32 s8, s2, 15
; GFX9-NEXT:    v_alignbit_b32 v0, s3, v0, 30
; GFX9-NEXT:    v_mul_f32_e32 v6, v5, v6
; GFX9-NEXT:    v_trunc_f32_e32 v6, v6
; GFX9-NEXT:    v_mad_f32 v5, -v6, v4, v5
; GFX9-NEXT:    v_cvt_i32_f32_e32 v6, v6
; GFX9-NEXT:    v_alignbit_b32 v1, s7, v1, 30
; GFX9-NEXT:    s_lshr_b32 s3, s6, 15
; GFX9-NEXT:    s_or_b32 s7, s0, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v5|, |v4|
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s7, 0
; GFX9-NEXT:    v_add_u32_e32 v4, s0, v6
; GFX9-NEXT:    s_bfe_i32 s0, s6, 0xf000f
; GFX9-NEXT:    v_cvt_f32_i32_e32 v5, s0
; GFX9-NEXT:    s_bfe_i32 s1, s2, 0xf000f
; GFX9-NEXT:    v_cvt_f32_i32_e32 v6, s1
; GFX9-NEXT:    s_xor_b32 s0, s1, s0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v7, v5
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 30
; GFX9-NEXT:    v_bfe_i32 v1, v1, 0, 15
; GFX9-NEXT:    v_mul_f32_e32 v7, v6, v7
; GFX9-NEXT:    v_trunc_f32_e32 v7, v7
; GFX9-NEXT:    v_mad_f32 v6, -v7, v5, v6
; GFX9-NEXT:    v_cvt_i32_f32_e32 v7, v7
; GFX9-NEXT:    v_mul_lo_u32 v4, v4, s6
; GFX9-NEXT:    s_or_b32 s6, s0, 1
; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v6|, |v5|
; GFX9-NEXT:    v_cvt_f32_i32_e32 v6, v1
; GFX9-NEXT:    s_and_b64 s[0:1], s[0:1], exec
; GFX9-NEXT:    s_cselect_b32 s0, s6, 0
; GFX9-NEXT:    v_add_u32_e32 v5, s0, v7
; GFX9-NEXT:    v_bfe_i32 v7, v0, 0, 15
; GFX9-NEXT:    v_cvt_f32_i32_e32 v8, v7
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v9, v6
; GFX9-NEXT:    v_xor_b32_e32 v1, v7, v1
; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 30, v1
; GFX9-NEXT:    v_or_b32_e32 v1, 1, v1
; GFX9-NEXT:    v_mul_f32_e32 v7, v8, v9
; GFX9-NEXT:    v_trunc_f32_e32 v7, v7
; GFX9-NEXT:    v_cvt_i32_f32_e32 v9, v7
; GFX9-NEXT:    v_mad_f32 v7, -v7, v6, v8
; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v7|, |v6|
; GFX9-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
; GFX9-NEXT:    v_mul_lo_u32 v5, v5, s3
; GFX9-NEXT:    v_add_u32_e32 v1, v9, v1
; GFX9-NEXT:    v_mul_lo_u32 v1, v1, v3
; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
; GFX9-NEXT:    v_sub_u32_e32 v3, s2, v4
; GFX9-NEXT:    v_sub_u32_e32 v4, s8, v5
; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_and_b32_e32 v4, 0x7fff, v4
; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 30, v[0:1]
; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff, v3
; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 15, v4
; GFX9-NEXT:    v_or_b32_e32 v3, v3, v4
; GFX9-NEXT:    v_or_b32_e32 v0, v3, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[4:5]
; GFX9-NEXT:    v_and_b32_e32 v0, 0x1fff, v1
; GFX9-NEXT:    global_store_short v2, v0, s[4:5] offset:4
; GFX9-NEXT:    s_endpgm
  %r = srem <3 x i15> %x, %y
  store <3 x i15> %r, <3 x i15> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @udiv_i32_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], 1235195
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i32_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    v_mov_b32_e32 v0, 0xb2a50881
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 20, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i32_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_hi_u32 s0, s4, 0xb2a50881
; GFX9-NEXT:    s_sub_i32 s1, s4, s0
; GFX9-NEXT:    s_lshr_b32 s1, s1, 1
; GFX9-NEXT:    s_add_i32 s1, s1, s0
; GFX9-NEXT:    s_lshr_b32 s0, s1, 20
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = udiv i32 %x, 1235195
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @udiv_i32_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], 4096
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b32 s4, s4, 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b32 s0, s4, 12
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = udiv i32 %x, 4096
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @udiv_i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_add_i32 s5, s5, 12
; GFX6-NEXT:    s_lshr_b32 s4, s4, s5
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_add_i32 s0, s3, 12
; GFX9-NEXT:    s_lshr_b32 s0, s2, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i32 4096, %y
  %r = udiv i32 %x, %shl.y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @udiv_v2i32_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = udiv i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = udiv i32 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b32 s4, s4, 12
; GFX6-NEXT:    s_lshr_b32 s5, s5, 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b32 s0, s2, 12
; GFX9-NEXT:    s_lshr_b32 s1, s3, 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = udiv <2 x i32> %x, <i32 4096, i32 4096>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i32_mixed_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @udiv_v2i32_mixed_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = udiv i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = udiv i32 [[TMP4]], 4095
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i32_mixed_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x100101
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
; GFX6-NEXT:    s_lshr_b32 s4, s4, 12
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s5, v0
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 11, v0
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i32_mixed_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_hi_u32 s1, s3, 0x100101
; GFX9-NEXT:    s_lshr_b32 s0, s2, 12
; GFX9-NEXT:    s_sub_i32 s2, s3, s1
; GFX9-NEXT:    s_lshr_b32 s2, s2, 1
; GFX9-NEXT:    s_add_i32 s2, s2, s1
; GFX9-NEXT:    s_lshr_b32 s1, s2, 11
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = udiv <2 x i32> %x, <i32 4096, i32 4095>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @udiv_v2i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i32> <i32 4096, i32 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP3]])
; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 0, [[TMP2]]
; CHECK-NEXT:    [[TMP8:%.*]] = mul i32 [[TMP7]], [[TMP6]]
; CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP6]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = mul i32 [[TMP21]], [[TMP2]]
; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP1]], [[TMP22]]
; CHECK-NEXT:    [[TMP24:%.*]] = icmp uge i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP25:%.*]] = add i32 [[TMP21]], 1
; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP24]], i32 [[TMP25]], i32 [[TMP21]]
; CHECK-NEXT:    [[TMP27:%.*]] = sub i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP24]], i32 [[TMP27]], i32 [[TMP23]]
; CHECK-NEXT:    [[TMP29:%.*]] = icmp uge i32 [[TMP28]], [[TMP2]]
; CHECK-NEXT:    [[TMP30:%.*]] = add i32 [[TMP26]], 1
; CHECK-NEXT:    [[TMP31:%.*]] = select i1 [[TMP29]], i32 [[TMP30]], i32 [[TMP26]]
; CHECK-NEXT:    [[TMP32:%.*]] = insertelement <2 x i32> undef, i32 [[TMP31]], i64 0
; CHECK-NEXT:    [[TMP33:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP35:%.*]] = uitofp i32 [[TMP34]] to float
; CHECK-NEXT:    [[TMP36:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP35]])
; CHECK-NEXT:    [[TMP37:%.*]] = fmul fast float [[TMP36]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP38:%.*]] = fptoui float [[TMP37]] to i32
; CHECK-NEXT:    [[TMP39:%.*]] = sub i32 0, [[TMP34]]
; CHECK-NEXT:    [[TMP40:%.*]] = mul i32 [[TMP39]], [[TMP38]]
; CHECK-NEXT:    [[TMP41:%.*]] = zext i32 [[TMP38]] to i64
; CHECK-NEXT:    [[TMP42:%.*]] = zext i32 [[TMP40]] to i64
; CHECK-NEXT:    [[TMP43:%.*]] = mul i64 [[TMP41]], [[TMP42]]
; CHECK-NEXT:    [[TMP44:%.*]] = trunc i64 [[TMP43]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = lshr i64 [[TMP43]], 32
; CHECK-NEXT:    [[TMP46:%.*]] = trunc i64 [[TMP45]] to i32
; CHECK-NEXT:    [[TMP47:%.*]] = add i32 [[TMP38]], [[TMP46]]
; CHECK-NEXT:    [[TMP48:%.*]] = zext i32 [[TMP33]] to i64
; CHECK-NEXT:    [[TMP49:%.*]] = zext i32 [[TMP47]] to i64
; CHECK-NEXT:    [[TMP50:%.*]] = mul i64 [[TMP48]], [[TMP49]]
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i64 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = lshr i64 [[TMP50]], 32
; CHECK-NEXT:    [[TMP53:%.*]] = trunc i64 [[TMP52]] to i32
; CHECK-NEXT:    [[TMP54:%.*]] = mul i32 [[TMP53]], [[TMP34]]
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 [[TMP33]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = icmp uge i32 [[TMP55]], [[TMP34]]
; CHECK-NEXT:    [[TMP57:%.*]] = add i32 [[TMP53]], 1
; CHECK-NEXT:    [[TMP58:%.*]] = select i1 [[TMP56]], i32 [[TMP57]], i32 [[TMP53]]
; CHECK-NEXT:    [[TMP59:%.*]] = sub i32 [[TMP55]], [[TMP34]]
; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP56]], i32 [[TMP59]], i32 [[TMP55]]
; CHECK-NEXT:    [[TMP61:%.*]] = icmp uge i32 [[TMP60]], [[TMP34]]
; CHECK-NEXT:    [[TMP62:%.*]] = add i32 [[TMP58]], 1
; CHECK-NEXT:    [[TMP63:%.*]] = select i1 [[TMP61]], i32 [[TMP62]], i32 [[TMP58]]
; CHECK-NEXT:    [[TMP64:%.*]] = insertelement <2 x i32> [[TMP32]], i32 [[TMP63]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP64]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s11, 0xf000
; GFX6-NEXT:    s_mov_b32 s10, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s2, 0x1000, s6
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
; GFX6-NEXT:    s_lshl_b32 s3, 0x1000, s7
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s3
; GFX6-NEXT:    s_sub_i32 s0, 0, s2
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v0
; GFX6-NEXT:    s_sub_i32 s0, 0, s3
; GFX6-NEXT:    v_mul_lo_u32 v3, s0, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
; GFX6-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s4, v2
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s2, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s2, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s5, v4
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s3, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s6, 0x1000, s6
; GFX9-NEXT:    s_lshl_b32 s7, 0x1000, s7
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
; GFX9-NEXT:    s_sub_i32 s2, 0, s6
; GFX9-NEXT:    s_sub_i32 s3, 0, s7
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_mul_lo_u32 v2, s2, v0
; GFX9-NEXT:    v_mul_lo_u32 v3, s3, v1
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
; GFX9-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX9-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, s6
; GFX9-NEXT:    v_mul_lo_u32 v4, v1, s7
; GFX9-NEXT:    v_add_u32_e32 v5, 1, v0
; GFX9-NEXT:    v_add_u32_e32 v6, 1, v1
; GFX9-NEXT:    v_sub_u32_e32 v3, s4, v3
; GFX9-NEXT:    v_sub_u32_e32 v4, s5, v4
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v5, s6, v3
; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s7, v4
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[0:1]
; GFX9-NEXT:    v_subrev_u32_e32 v6, s7, v4
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX9-NEXT:    v_add_u32_e32 v5, 1, v0
; GFX9-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[0:1]
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s7, v4
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i32> <i32 4096, i32 4096>, %y
  %r = udiv <2 x i32> %x, %shl.y
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @urem_i32_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], 1235195
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i32_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    v_mov_b32_e32 v0, 0xb2a50881
; GFX6-NEXT:    s_mov_b32 s2, 0x12d8fb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 20, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i32_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_hi_u32 s0, s4, 0xb2a50881
; GFX9-NEXT:    s_sub_i32 s1, s4, s0
; GFX9-NEXT:    s_lshr_b32 s1, s1, 1
; GFX9-NEXT:    s_add_i32 s1, s1, s0
; GFX9-NEXT:    s_lshr_b32 s0, s1, 20
; GFX9-NEXT:    s_mul_i32 s0, s0, 0x12d8fb
; GFX9-NEXT:    s_sub_i32 s0, s4, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = urem i32 %x, 1235195
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @urem_i32_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], 4096
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s4, s4, 0xfff
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s0, s4, 0xfff
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = urem i32 %x, 4096
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @urem_i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s5, 0x1000, s5
; GFX6-NEXT:    s_add_i32 s5, s5, -1
; GFX6-NEXT:    s_and_b32 s4, s4, s5
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s0, 0x1000, s3
; GFX9-NEXT:    s_add_i32 s0, s0, -1
; GFX9-NEXT:    s_and_b32 s0, s2, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i32 4096, %y
  %r = urem i32 %x, %shl.y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @urem_v2i32_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = urem i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = urem i32 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v2i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s4, s4, 0xfff
; GFX6-NEXT:    s_and_b32 s5, s5, 0xfff
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v2i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s0, s2, 0xfff
; GFX9-NEXT:    s_and_b32 s1, s3, 0xfff
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = urem <2 x i32> %x, <i32 4096, i32 4096>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @urem_v2i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i32> <i32 4096, i32 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = uitofp i32 [[TMP2]] to float
; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP3]])
; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP6:%.*]] = fptoui float [[TMP5]] to i32
; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 0, [[TMP2]]
; CHECK-NEXT:    [[TMP8:%.*]] = mul i32 [[TMP7]], [[TMP6]]
; CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT:    [[TMP10:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]]
; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = lshr i64 [[TMP11]], 32
; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP6]], [[TMP14]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = mul i32 [[TMP21]], [[TMP2]]
; CHECK-NEXT:    [[TMP23:%.*]] = sub i32 [[TMP1]], [[TMP22]]
; CHECK-NEXT:    [[TMP24:%.*]] = icmp uge i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP25:%.*]] = sub i32 [[TMP23]], [[TMP2]]
; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP24]], i32 [[TMP25]], i32 [[TMP23]]
; CHECK-NEXT:    [[TMP27:%.*]] = icmp uge i32 [[TMP26]], [[TMP2]]
; CHECK-NEXT:    [[TMP28:%.*]] = sub i32 [[TMP26]], [[TMP2]]
; CHECK-NEXT:    [[TMP29:%.*]] = select i1 [[TMP27]], i32 [[TMP28]], i32 [[TMP26]]
; CHECK-NEXT:    [[TMP30:%.*]] = insertelement <2 x i32> undef, i32 [[TMP29]], i64 0
; CHECK-NEXT:    [[TMP31:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP32:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP33:%.*]] = uitofp i32 [[TMP32]] to float
; CHECK-NEXT:    [[TMP34:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP33]])
; CHECK-NEXT:    [[TMP35:%.*]] = fmul fast float [[TMP34]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP36:%.*]] = fptoui float [[TMP35]] to i32
; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 0, [[TMP32]]
; CHECK-NEXT:    [[TMP38:%.*]] = mul i32 [[TMP37]], [[TMP36]]
; CHECK-NEXT:    [[TMP39:%.*]] = zext i32 [[TMP36]] to i64
; CHECK-NEXT:    [[TMP40:%.*]] = zext i32 [[TMP38]] to i64
; CHECK-NEXT:    [[TMP41:%.*]] = mul i64 [[TMP39]], [[TMP40]]
; CHECK-NEXT:    [[TMP42:%.*]] = trunc i64 [[TMP41]] to i32
; CHECK-NEXT:    [[TMP43:%.*]] = lshr i64 [[TMP41]], 32
; CHECK-NEXT:    [[TMP44:%.*]] = trunc i64 [[TMP43]] to i32
; CHECK-NEXT:    [[TMP45:%.*]] = add i32 [[TMP36]], [[TMP44]]
; CHECK-NEXT:    [[TMP46:%.*]] = zext i32 [[TMP31]] to i64
; CHECK-NEXT:    [[TMP47:%.*]] = zext i32 [[TMP45]] to i64
; CHECK-NEXT:    [[TMP48:%.*]] = mul i64 [[TMP46]], [[TMP47]]
; CHECK-NEXT:    [[TMP49:%.*]] = trunc i64 [[TMP48]] to i32
; CHECK-NEXT:    [[TMP50:%.*]] = lshr i64 [[TMP48]], 32
; CHECK-NEXT:    [[TMP51:%.*]] = trunc i64 [[TMP50]] to i32
; CHECK-NEXT:    [[TMP52:%.*]] = mul i32 [[TMP51]], [[TMP32]]
; CHECK-NEXT:    [[TMP53:%.*]] = sub i32 [[TMP31]], [[TMP52]]
; CHECK-NEXT:    [[TMP54:%.*]] = icmp uge i32 [[TMP53]], [[TMP32]]
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 [[TMP53]], [[TMP32]]
; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP54]], i32 [[TMP55]], i32 [[TMP53]]
; CHECK-NEXT:    [[TMP57:%.*]] = icmp uge i32 [[TMP56]], [[TMP32]]
; CHECK-NEXT:    [[TMP58:%.*]] = sub i32 [[TMP56]], [[TMP32]]
; CHECK-NEXT:    [[TMP59:%.*]] = select i1 [[TMP57]], i32 [[TMP58]], i32 [[TMP56]]
; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <2 x i32> [[TMP30]], i32 [[TMP59]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP60]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v2i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s6, 0x1000, s6
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
; GFX6-NEXT:    s_lshl_b32 s7, 0x1000, s7
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
; GFX6-NEXT:    s_sub_i32 s2, 0, s6
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v0
; GFX6-NEXT:    s_sub_i32 s2, 0, s7
; GFX6-NEXT:    v_mul_lo_u32 v3, s2, v1
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s7
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s6, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s5, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s6, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s7, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s7, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v2i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s3, 0x1000, s6
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_lshl_b32 s2, 0x1000, s7
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s2
; GFX9-NEXT:    s_sub_i32 s6, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_readfirstlane_b32 s7, v0
; GFX9-NEXT:    s_mul_i32 s6, s6, s7
; GFX9-NEXT:    s_mul_hi_u32 s6, s7, s6
; GFX9-NEXT:    s_add_i32 s7, s7, s6
; GFX9-NEXT:    s_mul_hi_u32 s6, s4, s7
; GFX9-NEXT:    s_mul_i32 s6, s6, s3
; GFX9-NEXT:    s_sub_i32 s4, s4, s6
; GFX9-NEXT:    s_sub_i32 s6, s4, s3
; GFX9-NEXT:    s_cmp_ge_u32 s4, s3
; GFX9-NEXT:    s_cselect_b32 s4, s6, s4
; GFX9-NEXT:    s_sub_i32 s6, s4, s3
; GFX9-NEXT:    s_cmp_ge_u32 s4, s3
; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
; GFX9-NEXT:    s_cselect_b32 s3, s6, s4
; GFX9-NEXT:    s_sub_i32 s4, 0, s2
; GFX9-NEXT:    s_mul_i32 s4, s4, s8
; GFX9-NEXT:    s_mul_hi_u32 s4, s8, s4
; GFX9-NEXT:    s_add_i32 s8, s8, s4
; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s8
; GFX9-NEXT:    s_mul_i32 s4, s4, s2
; GFX9-NEXT:    s_sub_i32 s4, s5, s4
; GFX9-NEXT:    s_sub_i32 s5, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
; GFX9-NEXT:    s_sub_i32 s5, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s2, s5, s4
; GFX9-NEXT:    v_mov_b32_e32 v0, s3
; GFX9-NEXT:    v_mov_b32_e32 v1, s2
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i32> <i32 4096, i32 4096>, %y
  %r = urem <2 x i32> %x, %shl.y
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @sdiv_i32_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], 1235195
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i32_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    v_mov_b32_e32 v0, 0xd9528441
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_i32 v0, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 31, v0
; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 20, v0
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i32_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_hi_i32 s0, s4, 0xd9528441
; GFX9-NEXT:    s_add_i32 s0, s0, s4
; GFX9-NEXT:    s_lshr_b32 s1, s0, 31
; GFX9-NEXT:    s_ashr_i32 s0, s0, 20
; GFX9-NEXT:    s_add_i32 s0, s0, s1
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i32 %x, 1235195
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @sdiv_i32_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], 4096
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s5, s4, 31
; GFX6-NEXT:    s_lshr_b32 s5, s5, 20
; GFX6-NEXT:    s_add_i32 s4, s4, s5
; GFX6-NEXT:    s_ashr_i32 s4, s4, 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s4, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_add_i32 s4, s4, s0
; GFX9-NEXT:    s_ashr_i32 s0, s4, 12
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i32 %x, 4096
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @sdiv_i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s3, 0x1000, s3
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_i32 s3, s3, s8
; GFX6-NEXT:    s_xor_b32 s3, s3, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX6-NEXT:    s_sub_i32 s4, 0, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s4, v0
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_ashr_i32 s0, s2, 31
; GFX6-NEXT:    s_add_i32 s1, s2, s0
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    s_xor_b32 s2, s0, s8
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s1, v1
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s3, 0x1000, s3
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_i32 s3, s3, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s5, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, s5, v0
; GFX9-NEXT:    s_ashr_i32 s5, s2, 31
; GFX9-NEXT:    s_add_i32 s2, s2, s5
; GFX9-NEXT:    s_xor_b32 s2, s2, s5
; GFX9-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1
; GFX9-NEXT:    v_mul_hi_u32 v0, s2, v0
; GFX9-NEXT:    v_mul_lo_u32 v1, v0, s3
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_sub_u32_e32 v1, s2, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v3, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT:    v_add_u32_e32 v4, 1, v0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX9-NEXT:    s_xor_b32 s2, s5, s4
; GFX9-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX9-NEXT:    v_subrev_u32_e32 v0, s2, v0
; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i32 4096, %y
  %r = sdiv i32 %x, %shl.y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @sdiv_v2i32_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i32 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v2i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s6, s4, 31
; GFX6-NEXT:    s_ashr_i32 s7, s5, 31
; GFX6-NEXT:    s_lshr_b32 s6, s6, 20
; GFX6-NEXT:    s_add_i32 s4, s4, s6
; GFX6-NEXT:    s_lshr_b32 s6, s7, 20
; GFX6-NEXT:    s_add_i32 s5, s5, s6
; GFX6-NEXT:    s_ashr_i32 s4, s4, 12
; GFX6-NEXT:    s_ashr_i32 s5, s5, 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v2i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s2, 31
; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_lshr_b32 s1, s1, 20
; GFX9-NEXT:    s_add_i32 s0, s2, s0
; GFX9-NEXT:    s_add_i32 s1, s3, s1
; GFX9-NEXT:    s_ashr_i32 s0, s0, 12
; GFX9-NEXT:    s_ashr_i32 s1, s1, 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <2 x i32> %x, <i32 4096, i32 4096>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @ssdiv_v2i32_mixed_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @ssdiv_v2i32_mixed_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i32 [[TMP4]], 4095
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: ssdiv_v2i32_mixed_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x80080081
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_i32 v0, s5, v0
; GFX6-NEXT:    s_ashr_i32 s6, s4, 31
; GFX6-NEXT:    s_lshr_b32 s6, s6, 20
; GFX6-NEXT:    s_add_i32 s4, s4, s6
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, s5, v0
; GFX6-NEXT:    s_ashr_i32 s4, s4, 12
; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 31, v0
; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 11, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v0
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: ssdiv_v2i32_mixed_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s2, 31
; GFX9-NEXT:    s_mul_hi_i32 s1, s3, 0x80080081
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_add_i32 s1, s1, s3
; GFX9-NEXT:    s_add_i32 s0, s2, s0
; GFX9-NEXT:    s_lshr_b32 s2, s1, 31
; GFX9-NEXT:    s_ashr_i32 s1, s1, 11
; GFX9-NEXT:    s_ashr_i32 s0, s0, 12
; GFX9-NEXT:    s_add_i32 s1, s1, s2
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <2 x i32> %x, <i32 4096, i32 4095>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @sdiv_v2i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i32> <i32 4096, i32 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
; CHECK-NEXT:    [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP1]], [[TMP3]]
; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[TMP2]], [[TMP4]]
; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP3]]
; CHECK-NEXT:    [[TMP9:%.*]] = xor i32 [[TMP7]], [[TMP4]]
; CHECK-NEXT:    [[TMP10:%.*]] = uitofp i32 [[TMP9]] to float
; CHECK-NEXT:    [[TMP11:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP10]])
; CHECK-NEXT:    [[TMP12:%.*]] = fmul fast float [[TMP11]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP13:%.*]] = fptoui float [[TMP12]] to i32
; CHECK-NEXT:    [[TMP14:%.*]] = sub i32 0, [[TMP9]]
; CHECK-NEXT:    [[TMP15:%.*]] = mul i32 [[TMP14]], [[TMP13]]
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP13]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP15]] to i64
; CHECK-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP16]], [[TMP17]]
; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
; CHECK-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP13]], [[TMP21]]
; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP8]] to i64
; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[TMP22]] to i64
; CHECK-NEXT:    [[TMP25:%.*]] = mul i64 [[TMP23]], [[TMP24]]
; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[TMP25]] to i32
; CHECK-NEXT:    [[TMP27:%.*]] = lshr i64 [[TMP25]], 32
; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32
; CHECK-NEXT:    [[TMP29:%.*]] = mul i32 [[TMP28]], [[TMP9]]
; CHECK-NEXT:    [[TMP30:%.*]] = sub i32 [[TMP8]], [[TMP29]]
; CHECK-NEXT:    [[TMP31:%.*]] = icmp uge i32 [[TMP30]], [[TMP9]]
; CHECK-NEXT:    [[TMP32:%.*]] = add i32 [[TMP28]], 1
; CHECK-NEXT:    [[TMP33:%.*]] = select i1 [[TMP31]], i32 [[TMP32]], i32 [[TMP28]]
; CHECK-NEXT:    [[TMP34:%.*]] = sub i32 [[TMP30]], [[TMP9]]
; CHECK-NEXT:    [[TMP35:%.*]] = select i1 [[TMP31]], i32 [[TMP34]], i32 [[TMP30]]
; CHECK-NEXT:    [[TMP36:%.*]] = icmp uge i32 [[TMP35]], [[TMP9]]
; CHECK-NEXT:    [[TMP37:%.*]] = add i32 [[TMP33]], 1
; CHECK-NEXT:    [[TMP38:%.*]] = select i1 [[TMP36]], i32 [[TMP37]], i32 [[TMP33]]
; CHECK-NEXT:    [[TMP39:%.*]] = xor i32 [[TMP38]], [[TMP5]]
; CHECK-NEXT:    [[TMP40:%.*]] = sub i32 [[TMP39]], [[TMP5]]
; CHECK-NEXT:    [[TMP41:%.*]] = insertelement <2 x i32> undef, i32 [[TMP40]], i64 0
; CHECK-NEXT:    [[TMP42:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP43:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP44:%.*]] = ashr i32 [[TMP42]], 31
; CHECK-NEXT:    [[TMP45:%.*]] = ashr i32 [[TMP43]], 31
; CHECK-NEXT:    [[TMP46:%.*]] = xor i32 [[TMP44]], [[TMP45]]
; CHECK-NEXT:    [[TMP47:%.*]] = add i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT:    [[TMP48:%.*]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT:    [[TMP49:%.*]] = xor i32 [[TMP47]], [[TMP44]]
; CHECK-NEXT:    [[TMP50:%.*]] = xor i32 [[TMP48]], [[TMP45]]
; CHECK-NEXT:    [[TMP51:%.*]] = uitofp i32 [[TMP50]] to float
; CHECK-NEXT:    [[TMP52:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP51]])
; CHECK-NEXT:    [[TMP53:%.*]] = fmul fast float [[TMP52]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP54:%.*]] = fptoui float [[TMP53]] to i32
; CHECK-NEXT:    [[TMP55:%.*]] = sub i32 0, [[TMP50]]
; CHECK-NEXT:    [[TMP56:%.*]] = mul i32 [[TMP55]], [[TMP54]]
; CHECK-NEXT:    [[TMP57:%.*]] = zext i32 [[TMP54]] to i64
; CHECK-NEXT:    [[TMP58:%.*]] = zext i32 [[TMP56]] to i64
; CHECK-NEXT:    [[TMP59:%.*]] = mul i64 [[TMP57]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = trunc i64 [[TMP59]] to i32
; CHECK-NEXT:    [[TMP61:%.*]] = lshr i64 [[TMP59]], 32
; CHECK-NEXT:    [[TMP62:%.*]] = trunc i64 [[TMP61]] to i32
; CHECK-NEXT:    [[TMP63:%.*]] = add i32 [[TMP54]], [[TMP62]]
; CHECK-NEXT:    [[TMP64:%.*]] = zext i32 [[TMP49]] to i64
; CHECK-NEXT:    [[TMP65:%.*]] = zext i32 [[TMP63]] to i64
; CHECK-NEXT:    [[TMP66:%.*]] = mul i64 [[TMP64]], [[TMP65]]
; CHECK-NEXT:    [[TMP67:%.*]] = trunc i64 [[TMP66]] to i32
; CHECK-NEXT:    [[TMP68:%.*]] = lshr i64 [[TMP66]], 32
; CHECK-NEXT:    [[TMP69:%.*]] = trunc i64 [[TMP68]] to i32
; CHECK-NEXT:    [[TMP70:%.*]] = mul i32 [[TMP69]], [[TMP50]]
; CHECK-NEXT:    [[TMP71:%.*]] = sub i32 [[TMP49]], [[TMP70]]
; CHECK-NEXT:    [[TMP72:%.*]] = icmp uge i32 [[TMP71]], [[TMP50]]
; CHECK-NEXT:    [[TMP73:%.*]] = add i32 [[TMP69]], 1
; CHECK-NEXT:    [[TMP74:%.*]] = select i1 [[TMP72]], i32 [[TMP73]], i32 [[TMP69]]
; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP71]], [[TMP50]]
; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP72]], i32 [[TMP75]], i32 [[TMP71]]
; CHECK-NEXT:    [[TMP77:%.*]] = icmp uge i32 [[TMP76]], [[TMP50]]
; CHECK-NEXT:    [[TMP78:%.*]] = add i32 [[TMP74]], 1
; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP77]], i32 [[TMP78]], i32 [[TMP74]]
; CHECK-NEXT:    [[TMP80:%.*]] = xor i32 [[TMP79]], [[TMP46]]
; CHECK-NEXT:    [[TMP81:%.*]] = sub i32 [[TMP80]], [[TMP46]]
; CHECK-NEXT:    [[TMP82:%.*]] = insertelement <2 x i32> [[TMP41]], i32 [[TMP81]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP82]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v2i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s0, 0x1000, s10
; GFX6-NEXT:    s_ashr_i32 s1, s0, 31
; GFX6-NEXT:    s_add_i32 s0, s0, s1
; GFX6-NEXT:    s_xor_b32 s2, s0, s1
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
; GFX6-NEXT:    s_lshl_b32 s0, 0x1000, s11
; GFX6-NEXT:    s_ashr_i32 s3, s0, 31
; GFX6-NEXT:    s_add_i32 s0, s0, s3
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    s_sub_i32 s11, 0, s2
; GFX6-NEXT:    s_xor_b32 s10, s0, s3
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s10
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_ashr_i32 s0, s8, 31
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    s_add_i32 s8, s8, s0
; GFX6-NEXT:    v_mul_lo_u32 v2, s11, v0
; GFX6-NEXT:    s_xor_b32 s8, s8, s0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    s_xor_b32 s11, s0, s1
; GFX6-NEXT:    s_sub_i32 s0, 0, s10
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
; GFX6-NEXT:    v_mul_hi_u32 v2, v1, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s8, v3
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s2, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
; GFX6-NEXT:    s_ashr_i32 s0, s9, 31
; GFX6-NEXT:    s_add_i32 s1, s9, s0
; GFX6-NEXT:    s_xor_b32 s1, s1, s0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
; GFX6-NEXT:    v_mul_hi_u32 v1, s1, v1
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s2, v3
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s10
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX6-NEXT:    s_xor_b32 s2, s0, s3
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v2
; GFX6-NEXT:    v_xor_b32_e32 v0, s11, v0
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s10, v2
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s11, v0
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT:    v_xor_b32_e32 v1, s2, v1
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s2, v1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v2i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s0, 0x1000, s6
; GFX9-NEXT:    s_ashr_i32 s1, s0, 31
; GFX9-NEXT:    s_add_i32 s0, s0, s1
; GFX9-NEXT:    s_xor_b32 s0, s0, s1
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s0
; GFX9-NEXT:    s_lshl_b32 s6, 0x1000, s7
; GFX9-NEXT:    s_ashr_i32 s8, s6, 31
; GFX9-NEXT:    s_add_i32 s6, s6, s8
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_xor_b32 s6, s6, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s6
; GFX9-NEXT:    s_sub_i32 s10, 0, s0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX9-NEXT:    s_ashr_i32 s7, s4, 31
; GFX9-NEXT:    s_add_i32 s4, s4, s7
; GFX9-NEXT:    v_mul_lo_u32 v3, s10, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    s_sub_i32 s10, 0, s6
; GFX9-NEXT:    v_mul_hi_u32 v3, v0, v3
; GFX9-NEXT:    s_xor_b32 s4, s4, s7
; GFX9-NEXT:    v_mul_lo_u32 v4, s10, v1
; GFX9-NEXT:    s_ashr_i32 s9, s5, 31
; GFX9-NEXT:    v_add_u32_e32 v0, v0, v3
; GFX9-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v4
; GFX9-NEXT:    s_add_i32 s5, s5, s9
; GFX9-NEXT:    s_xor_b32 s5, s5, s9
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, s0
; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
; GFX9-NEXT:    v_mul_hi_u32 v1, s5, v1
; GFX9-NEXT:    v_add_u32_e32 v3, 1, v0
; GFX9-NEXT:    v_sub_u32_e32 v4, s4, v4
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s0, v4
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v3, s0, v4
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s0, v3
; GFX9-NEXT:    v_mul_lo_u32 v3, v1, s6
; GFX9-NEXT:    v_add_u32_e32 v4, 1, v0
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX9-NEXT:    v_add_u32_e32 v4, 1, v1
; GFX9-NEXT:    v_sub_u32_e32 v3, s5, v3
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX9-NEXT:    v_subrev_u32_e32 v4, s6, v3
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX9-NEXT:    v_add_u32_e32 v4, 1, v1
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v3
; GFX9-NEXT:    s_xor_b32 s1, s7, s1
; GFX9-NEXT:    s_xor_b32 s0, s9, s8
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX9-NEXT:    v_xor_b32_e32 v0, s1, v0
; GFX9-NEXT:    v_xor_b32_e32 v1, s0, v1
; GFX9-NEXT:    v_subrev_u32_e32 v0, s1, v0
; GFX9-NEXT:    v_subrev_u32_e32 v1, s0, v1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i32> <i32 4096, i32 4096>, %y
  %r = sdiv <2 x i32> %x, %shl.y
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @srem_i32_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], 1235195
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i32_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    v_mov_b32_e32 v0, 0xd9528441
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    v_mul_hi_i32 v0, s4, v0
; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
; GFX6-NEXT:    s_add_i32 s5, s5, s4
; GFX6-NEXT:    s_lshr_b32 s6, s5, 31
; GFX6-NEXT:    s_ashr_i32 s5, s5, 20
; GFX6-NEXT:    s_add_i32 s5, s5, s6
; GFX6-NEXT:    s_mul_i32 s5, s5, 0x12d8fb
; GFX6-NEXT:    s_sub_i32 s4, s4, s5
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i32_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_hi_i32 s0, s4, 0xd9528441
; GFX9-NEXT:    s_add_i32 s0, s0, s4
; GFX9-NEXT:    s_lshr_b32 s1, s0, 31
; GFX9-NEXT:    s_ashr_i32 s0, s0, 20
; GFX9-NEXT:    s_add_i32 s0, s0, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, 0x12d8fb
; GFX9-NEXT:    s_sub_i32 s0, s4, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = srem i32 %x, 1235195
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
; CHECK-LABEL: @srem_i32_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], 4096
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s5, s4, 31
; GFX6-NEXT:    s_lshr_b32 s5, s5, 20
; GFX6-NEXT:    s_add_i32 s5, s4, s5
; GFX6-NEXT:    s_and_b32 s5, s5, 0xfffff000
; GFX6-NEXT:    s_sub_i32 s4, s4, s5
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s4, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_add_i32 s0, s4, s0
; GFX9-NEXT:    s_and_b32 s0, s0, 0xfffff000
; GFX9-NEXT:    s_sub_i32 s0, s4, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    global_store_dword v0, v1, s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = srem i32 %x, 4096
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %x, i32 %y) {
; CHECK-LABEL: @srem_i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s3, 0x1000, s3
; GFX6-NEXT:    s_ashr_i32 s4, s3, 31
; GFX6-NEXT:    s_add_i32 s3, s3, s4
; GFX6-NEXT:    s_xor_b32 s4, s3, s4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s4
; GFX6-NEXT:    s_sub_i32 s3, 0, s4
; GFX6-NEXT:    s_ashr_i32 s5, s2, 31
; GFX6-NEXT:    s_add_i32 s2, s2, s5
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    s_xor_b32 s6, s2, s5
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v0
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s4
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s5, v0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v0
; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s3, 0x1000, s3
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_i32 s3, s3, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_sub_i32 s5, 0, s3
; GFX9-NEXT:    s_ashr_i32 s4, s2, 31
; GFX9-NEXT:    s_add_i32 s2, s2, s4
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_xor_b32 s2, s2, s4
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
; GFX9-NEXT:    s_mul_i32 s5, s5, s6
; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s5
; GFX9-NEXT:    s_add_i32 s6, s6, s5
; GFX9-NEXT:    s_mul_hi_u32 s5, s2, s6
; GFX9-NEXT:    s_mul_i32 s5, s5, s3
; GFX9-NEXT:    s_sub_i32 s2, s2, s5
; GFX9-NEXT:    s_sub_i32 s5, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
; GFX9-NEXT:    s_sub_i32 s5, s2, s3
; GFX9-NEXT:    s_cmp_ge_u32 s2, s3
; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
; GFX9-NEXT:    s_xor_b32 s2, s2, s4
; GFX9-NEXT:    s_sub_i32 s2, s2, s4
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i32 4096, %y
  %r = srem i32 %x, %shl.y
  store i32 %r, i32 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x) {
; CHECK-LABEL: @srem_v2i32_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = srem i32 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> undef, i32 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = srem i32 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v2i32_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s6, s4, 31
; GFX6-NEXT:    s_lshr_b32 s6, s6, 20
; GFX6-NEXT:    s_add_i32 s6, s4, s6
; GFX6-NEXT:    s_ashr_i32 s7, s5, 31
; GFX6-NEXT:    s_and_b32 s6, s6, 0xfffff000
; GFX6-NEXT:    s_sub_i32 s4, s4, s6
; GFX6-NEXT:    s_lshr_b32 s6, s7, 20
; GFX6-NEXT:    s_add_i32 s6, s5, s6
; GFX6-NEXT:    s_and_b32 s6, s6, 0xfffff000
; GFX6-NEXT:    s_sub_i32 s5, s5, s6
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v2i32_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s2, 31
; GFX9-NEXT:    s_ashr_i32 s1, s3, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_lshr_b32 s1, s1, 20
; GFX9-NEXT:    s_add_i32 s0, s2, s0
; GFX9-NEXT:    s_add_i32 s1, s3, s1
; GFX9-NEXT:    s_and_b32 s0, s0, 0xfffff000
; GFX9-NEXT:    s_and_b32 s1, s1, 0xfffff000
; GFX9-NEXT:    s_sub_i32 s0, s2, s0
; GFX9-NEXT:    s_sub_i32 s1, s3, s1
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = srem <2 x i32> %x, <i32 4096, i32 4096>
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @srem_v2i32_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i32> <i32 4096, i32 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = ashr i32 [[TMP1]], 31
; CHECK-NEXT:    [[TMP4:%.*]] = ashr i32 [[TMP2]], 31
; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP1]], [[TMP3]]
; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP2]], [[TMP4]]
; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], [[TMP3]]
; CHECK-NEXT:    [[TMP8:%.*]] = xor i32 [[TMP6]], [[TMP4]]
; CHECK-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
; CHECK-NEXT:    [[TMP10:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP9]])
; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast float [[TMP10]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP12:%.*]] = fptoui float [[TMP11]] to i32
; CHECK-NEXT:    [[TMP13:%.*]] = sub i32 0, [[TMP8]]
; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], [[TMP12]]
; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP14]] to i64
; CHECK-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP15]], [[TMP16]]
; CHECK-NEXT:    [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
; CHECK-NEXT:    [[TMP19:%.*]] = lshr i64 [[TMP17]], 32
; CHECK-NEXT:    [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
; CHECK-NEXT:    [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT:    [[TMP23:%.*]] = zext i32 [[TMP21]] to i64
; CHECK-NEXT:    [[TMP24:%.*]] = mul i64 [[TMP22]], [[TMP23]]
; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[TMP24]] to i32
; CHECK-NEXT:    [[TMP26:%.*]] = lshr i64 [[TMP24]], 32
; CHECK-NEXT:    [[TMP27:%.*]] = trunc i64 [[TMP26]] to i32
; CHECK-NEXT:    [[TMP28:%.*]] = mul i32 [[TMP27]], [[TMP8]]
; CHECK-NEXT:    [[TMP29:%.*]] = sub i32 [[TMP7]], [[TMP28]]
; CHECK-NEXT:    [[TMP30:%.*]] = icmp uge i32 [[TMP29]], [[TMP8]]
; CHECK-NEXT:    [[TMP31:%.*]] = sub i32 [[TMP29]], [[TMP8]]
; CHECK-NEXT:    [[TMP32:%.*]] = select i1 [[TMP30]], i32 [[TMP31]], i32 [[TMP29]]
; CHECK-NEXT:    [[TMP33:%.*]] = icmp uge i32 [[TMP32]], [[TMP8]]
; CHECK-NEXT:    [[TMP34:%.*]] = sub i32 [[TMP32]], [[TMP8]]
; CHECK-NEXT:    [[TMP35:%.*]] = select i1 [[TMP33]], i32 [[TMP34]], i32 [[TMP32]]
; CHECK-NEXT:    [[TMP36:%.*]] = xor i32 [[TMP35]], [[TMP3]]
; CHECK-NEXT:    [[TMP37:%.*]] = sub i32 [[TMP36]], [[TMP3]]
; CHECK-NEXT:    [[TMP38:%.*]] = insertelement <2 x i32> undef, i32 [[TMP37]], i64 0
; CHECK-NEXT:    [[TMP39:%.*]] = extractelement <2 x i32> [[X]], i64 1
; CHECK-NEXT:    [[TMP40:%.*]] = extractelement <2 x i32> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP41:%.*]] = ashr i32 [[TMP39]], 31
; CHECK-NEXT:    [[TMP42:%.*]] = ashr i32 [[TMP40]], 31
; CHECK-NEXT:    [[TMP43:%.*]] = add i32 [[TMP39]], [[TMP41]]
; CHECK-NEXT:    [[TMP44:%.*]] = add i32 [[TMP40]], [[TMP42]]
; CHECK-NEXT:    [[TMP45:%.*]] = xor i32 [[TMP43]], [[TMP41]]
; CHECK-NEXT:    [[TMP46:%.*]] = xor i32 [[TMP44]], [[TMP42]]
; CHECK-NEXT:    [[TMP47:%.*]] = uitofp i32 [[TMP46]] to float
; CHECK-NEXT:    [[TMP48:%.*]] = call fast float @llvm.amdgcn.rcp.f32(float [[TMP47]])
; CHECK-NEXT:    [[TMP49:%.*]] = fmul fast float [[TMP48]], 0x41EFFFFFC0000000
; CHECK-NEXT:    [[TMP50:%.*]] = fptoui float [[TMP49]] to i32
; CHECK-NEXT:    [[TMP51:%.*]] = sub i32 0, [[TMP46]]
; CHECK-NEXT:    [[TMP52:%.*]] = mul i32 [[TMP51]], [[TMP50]]
; CHECK-NEXT:    [[TMP53:%.*]] = zext i32 [[TMP50]] to i64
; CHECK-NEXT:    [[TMP54:%.*]] = zext i32 [[TMP52]] to i64
; CHECK-NEXT:    [[TMP55:%.*]] = mul i64 [[TMP53]], [[TMP54]]
; CHECK-NEXT:    [[TMP56:%.*]] = trunc i64 [[TMP55]] to i32
; CHECK-NEXT:    [[TMP57:%.*]] = lshr i64 [[TMP55]], 32
; CHECK-NEXT:    [[TMP58:%.*]] = trunc i64 [[TMP57]] to i32
; CHECK-NEXT:    [[TMP59:%.*]] = add i32 [[TMP50]], [[TMP58]]
; CHECK-NEXT:    [[TMP60:%.*]] = zext i32 [[TMP45]] to i64
; CHECK-NEXT:    [[TMP61:%.*]] = zext i32 [[TMP59]] to i64
; CHECK-NEXT:    [[TMP62:%.*]] = mul i64 [[TMP60]], [[TMP61]]
; CHECK-NEXT:    [[TMP63:%.*]] = trunc i64 [[TMP62]] to i32
; CHECK-NEXT:    [[TMP64:%.*]] = lshr i64 [[TMP62]], 32
; CHECK-NEXT:    [[TMP65:%.*]] = trunc i64 [[TMP64]] to i32
; CHECK-NEXT:    [[TMP66:%.*]] = mul i32 [[TMP65]], [[TMP46]]
; CHECK-NEXT:    [[TMP67:%.*]] = sub i32 [[TMP45]], [[TMP66]]
; CHECK-NEXT:    [[TMP68:%.*]] = icmp uge i32 [[TMP67]], [[TMP46]]
; CHECK-NEXT:    [[TMP69:%.*]] = sub i32 [[TMP67]], [[TMP46]]
; CHECK-NEXT:    [[TMP70:%.*]] = select i1 [[TMP68]], i32 [[TMP69]], i32 [[TMP67]]
; CHECK-NEXT:    [[TMP71:%.*]] = icmp uge i32 [[TMP70]], [[TMP46]]
; CHECK-NEXT:    [[TMP72:%.*]] = sub i32 [[TMP70]], [[TMP46]]
; CHECK-NEXT:    [[TMP73:%.*]] = select i1 [[TMP71]], i32 [[TMP72]], i32 [[TMP70]]
; CHECK-NEXT:    [[TMP74:%.*]] = xor i32 [[TMP73]], [[TMP41]]
; CHECK-NEXT:    [[TMP75:%.*]] = sub i32 [[TMP74]], [[TMP41]]
; CHECK-NEXT:    [[TMP76:%.*]] = insertelement <2 x i32> [[TMP38]], i32 [[TMP75]], i64 1
; CHECK-NEXT:    store <2 x i32> [[TMP76]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v2i32_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xb
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b32 s2, 0x1000, s6
; GFX6-NEXT:    s_ashr_i32 s3, s2, 31
; GFX6-NEXT:    s_add_i32 s2, s2, s3
; GFX6-NEXT:    s_xor_b32 s6, s2, s3
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
; GFX6-NEXT:    s_lshl_b32 s7, 0x1000, s7
; GFX6-NEXT:    s_ashr_i32 s8, s7, 31
; GFX6-NEXT:    s_add_i32 s7, s7, s8
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT:    s_xor_b32 s7, s7, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
; GFX6-NEXT:    s_sub_i32 s9, 0, s6
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_rcp_iflag_f32_e32 v1, v1
; GFX6-NEXT:    s_ashr_i32 s8, s4, 31
; GFX6-NEXT:    s_add_i32 s4, s4, s8
; GFX6-NEXT:    v_mul_lo_u32 v2, s9, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX6-NEXT:    s_xor_b32 s4, s4, s8
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, v2
; GFX6-NEXT:    s_sub_i32 s9, 0, s7
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v2, s9, v1
; GFX6-NEXT:    s_ashr_i32 s9, s5, 31
; GFX6-NEXT:    s_add_i32 s5, s5, s9
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT:    v_mul_hi_u32 v2, v1, v2
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s6, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    s_xor_b32 s4, s5, s9
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v1
; GFX6-NEXT:    v_subrev_i32_e32 v3, vcc, s6, v0
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s7
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s8, v0
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
; GFX6-NEXT:    v_sub_i32_e32 v1, vcc, s4, v1
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s7, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s7, v1
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_xor_b32_e32 v1, s9, v1
; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s9, v1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v2i32_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b32 s2, 0x1000, s6
; GFX9-NEXT:    s_ashr_i32 s3, s2, 31
; GFX9-NEXT:    s_add_i32 s2, s2, s3
; GFX9-NEXT:    s_xor_b32 s2, s2, s3
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
; GFX9-NEXT:    s_lshl_b32 s3, 0x1000, s7
; GFX9-NEXT:    s_sub_i32 s7, 0, s2
; GFX9-NEXT:    s_ashr_i32 s6, s4, 31
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_add_i32 s4, s4, s6
; GFX9-NEXT:    s_xor_b32 s4, s4, s6
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s8, v0
; GFX9-NEXT:    s_mul_i32 s7, s7, s8
; GFX9-NEXT:    s_mul_hi_u32 s7, s8, s7
; GFX9-NEXT:    s_add_i32 s8, s8, s7
; GFX9-NEXT:    s_mul_hi_u32 s7, s4, s8
; GFX9-NEXT:    s_mul_i32 s7, s7, s2
; GFX9-NEXT:    s_sub_i32 s4, s4, s7
; GFX9-NEXT:    s_sub_i32 s7, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s4, s7, s4
; GFX9-NEXT:    s_sub_i32 s7, s4, s2
; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
; GFX9-NEXT:    s_cselect_b32 s2, s7, s4
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_i32 s3, s3, s4
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
; GFX9-NEXT:    s_xor_b32 s2, s2, s6
; GFX9-NEXT:    s_sub_i32 s2, s2, s6
; GFX9-NEXT:    s_sub_i32 s6, 0, s3
; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT:    s_ashr_i32 s4, s5, 31
; GFX9-NEXT:    s_add_i32 s5, s5, s4
; GFX9-NEXT:    s_xor_b32 s5, s5, s4
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s7, v0
; GFX9-NEXT:    s_mul_i32 s6, s6, s7
; GFX9-NEXT:    s_mul_hi_u32 s6, s7, s6
; GFX9-NEXT:    s_add_i32 s7, s7, s6
; GFX9-NEXT:    s_mul_hi_u32 s6, s5, s7
; GFX9-NEXT:    s_mul_i32 s6, s6, s3
; GFX9-NEXT:    s_sub_i32 s5, s5, s6
; GFX9-NEXT:    s_sub_i32 s6, s5, s3
; GFX9-NEXT:    s_cmp_ge_u32 s5, s3
; GFX9-NEXT:    s_cselect_b32 s5, s6, s5
; GFX9-NEXT:    s_sub_i32 s6, s5, s3
; GFX9-NEXT:    s_cmp_ge_u32 s5, s3
; GFX9-NEXT:    s_cselect_b32 s3, s6, s5
; GFX9-NEXT:    s_xor_b32 s3, s3, s4
; GFX9-NEXT:    s_sub_i32 s3, s3, s4
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i32> <i32 4096, i32 4096>, %y
  %r = srem <2 x i32> %x, %shl.y
  store <2 x i32> %r, <2 x i32> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @udiv_i64_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], 1235195949943
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i64_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x4f176a73
; GFX6-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x438f8000, v0
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_movk_i32 s4, 0xfee0
; GFX6-NEXT:    s_mov_b32 s5, 0x68958c89
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_movk_i32 s8, 0x11f
; GFX6-NEXT:    s_mov_b32 s9, 0x976a7377
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s4
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s5
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s5
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, s5
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v5
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v5
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v5
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s4
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s5
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s5
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s5
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s8
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s9
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s9
; GFX6-NEXT:    v_mov_b32_e32 v5, 0x11f
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s9
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s3, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s2, v3
; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s9, v3
; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
; GFX6-NEXT:    s_movk_i32 s2, 0x11e
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s2, v4
; GFX6-NEXT:    s_mov_b32 s9, 0x976a7376
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s9, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
; GFX6-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v6, s3
; GFX6-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s2, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i64_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4f176a73
; GFX9-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x438f8000, v0
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    s_mul_i32 s1, s0, 0xfffffee0
; GFX9-NEXT:    s_mul_hi_u32 s2, s0, 0x68958c89
; GFX9-NEXT:    s_add_i32 s1, s2, s1
; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
; GFX9-NEXT:    s_mul_i32 s3, s2, 0x68958c89
; GFX9-NEXT:    s_add_i32 s1, s1, s3
; GFX9-NEXT:    s_mul_i32 s9, s0, 0x68958c89
; GFX9-NEXT:    s_mul_hi_u32 s3, s0, s1
; GFX9-NEXT:    s_mul_i32 s8, s0, s1
; GFX9-NEXT:    s_mul_hi_u32 s0, s0, s9
; GFX9-NEXT:    s_add_u32 s0, s0, s8
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    s_mul_hi_u32 s10, s2, s9
; GFX9-NEXT:    s_mul_i32 s9, s2, s9
; GFX9-NEXT:    s_add_u32 s0, s0, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s2, s1
; GFX9-NEXT:    s_addc_u32 s0, s3, s10
; GFX9-NEXT:    s_addc_u32 s3, s8, 0
; GFX9-NEXT:    s_mul_i32 s1, s2, s1
; GFX9-NEXT:    s_add_u32 s0, s0, s1
; GFX9-NEXT:    s_addc_u32 s1, 0, s3
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s0, s2, s1
; GFX9-NEXT:    v_readfirstlane_b32 s2, v0
; GFX9-NEXT:    s_mul_i32 s3, s2, 0xfffffee0
; GFX9-NEXT:    s_mul_hi_u32 s8, s2, 0x68958c89
; GFX9-NEXT:    s_mul_i32 s1, s0, 0x68958c89
; GFX9-NEXT:    s_add_i32 s3, s8, s3
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_mul_i32 s9, s2, 0x68958c89
; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
; GFX9-NEXT:    s_mul_i32 s8, s2, s3
; GFX9-NEXT:    s_mul_hi_u32 s2, s2, s9
; GFX9-NEXT:    s_add_u32 s2, s2, s8
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_hi_u32 s10, s0, s9
; GFX9-NEXT:    s_mul_i32 s9, s0, s9
; GFX9-NEXT:    s_add_u32 s2, s2, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s0, s3
; GFX9-NEXT:    s_addc_u32 s1, s1, s10
; GFX9-NEXT:    s_addc_u32 s2, s8, 0
; GFX9-NEXT:    s_mul_i32 s3, s0, s3
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_addc_u32 s2, 0, s2
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s1, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s0, s0, s2
; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_i32 s2, s6, s0
; GFX9-NEXT:    s_mul_hi_u32 s8, s6, s3
; GFX9-NEXT:    s_mul_hi_u32 s1, s6, s0
; GFX9-NEXT:    s_add_u32 s2, s8, s2
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_hi_u32 s9, s7, s3
; GFX9-NEXT:    s_mul_i32 s3, s7, s3
; GFX9-NEXT:    s_add_u32 s2, s2, s3
; GFX9-NEXT:    s_mul_hi_u32 s8, s7, s0
; GFX9-NEXT:    s_addc_u32 s1, s1, s9
; GFX9-NEXT:    s_addc_u32 s2, s8, 0
; GFX9-NEXT:    s_mul_i32 s0, s7, s0
; GFX9-NEXT:    s_add_u32 s3, s1, s0
; GFX9-NEXT:    s_addc_u32 s2, 0, s2
; GFX9-NEXT:    s_mul_i32 s0, s3, 0x11f
; GFX9-NEXT:    s_mul_hi_u32 s8, s3, 0x976a7377
; GFX9-NEXT:    s_add_i32 s0, s8, s0
; GFX9-NEXT:    s_mul_i32 s8, s2, 0x976a7377
; GFX9-NEXT:    s_mul_i32 s9, s3, 0x976a7377
; GFX9-NEXT:    s_add_i32 s8, s0, s8
; GFX9-NEXT:    v_mov_b32_e32 v0, s9
; GFX9-NEXT:    s_sub_i32 s0, s7, s8
; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT:    s_mov_b32 s1, 0x976a7377
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s0, 0x11f
; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s1, v0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s6, s6, 0
; GFX9-NEXT:    s_cmpk_gt_u32 s6, 0x11e
; GFX9-NEXT:    s_mov_b32 s10, 0x976a7376
; GFX9-NEXT:    s_cselect_b32 s9, -1, 0
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], s10, v1
; GFX9-NEXT:    s_cmpk_eq_i32 s6, 0x11f
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v3, s9
; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT:    s_add_u32 s6, s3, 2
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX9-NEXT:    s_addc_u32 s0, s2, 0
; GFX9-NEXT:    s_add_u32 s9, s3, 1
; GFX9-NEXT:    s_addc_u32 s1, s2, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s7, s7, s8
; GFX9-NEXT:    s_cmpk_gt_u32 s7, 0x11e
; GFX9-NEXT:    v_mov_b32_e32 v3, s1
; GFX9-NEXT:    v_mov_b32_e32 v4, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
; GFX9-NEXT:    s_cselect_b32 s8, -1, 0
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v0
; GFX9-NEXT:    s_cmpk_eq_i32 s7, 0x11f
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, v4, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v3, s8
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
; GFX9-NEXT:    v_mov_b32_e32 v3, s2
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v0, s9
; GFX9-NEXT:    v_mov_b32_e32 v3, s6
; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v3, s3
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = udiv i64 %x, 1235195949943
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @udiv_i64_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], 4096
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    s_lshr_b64 s[0:1], s[2:3], 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s0
; GFX6-NEXT:    v_mov_b32_e32 v1, s1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b64 s[2:3], s[2:3], 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = udiv i64 %x, 4096
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %x, i64 %y) {
; CHECK-LABEL: @udiv_i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
; GFX6-NEXT:    s_load_dword s8, s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s0, s4
; GFX6-NEXT:    s_add_i32 s8, s8, 12
; GFX6-NEXT:    s_mov_b32 s1, s5
; GFX6-NEXT:    s_lshr_b64 s[4:5], s[6:7], s8
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_add_i32 s2, s2, 12
; GFX9-NEXT:    s_lshr_b64 s[0:1], s[6:7], s2
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i64 4096, %y
  %r = udiv i64 %x, %shl.y
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @udiv_v2i64_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = udiv i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b64 s[4:5], s[4:5], 12
; GFX6-NEXT:    s_lshr_b64 s[6:7], s[6:7], 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s7
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b64 s[0:1], s[4:5], 12
; GFX9-NEXT:    s_lshr_b64 s[4:5], s[6:7], 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = udiv <2 x i64> %x, <i64 4096, i64 4096>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @udiv_v2i64_mixed_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = udiv i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4095
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i64_mixed_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x4f800000
; GFX6-NEXT:    v_madak_f32 v0, 0, v0, 0x457ff000
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_movk_i32 s6, 0xf001
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshr_b64 s[8:9], s[0:1], 12
; GFX6-NEXT:    s_movk_i32 s0, 0xfff
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s6
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s6
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s6
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v2, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, v3
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s6
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, s6
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, s6
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v2, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, v4
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s0
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
; GFX6-NEXT:    v_mul_lo_u32 v8, v0, s0
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mov_b32_e32 v5, s3
; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
; GFX6-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s0, v8
; GFX6-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
; GFX6-NEXT:    s_movk_i32 s0, 0xffe
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
; GFX6-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v3, v1, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v6, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v0, s8
; GFX6-NEXT:    v_mov_b32_e32 v1, s9
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i64_mixed_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4f800000
; GFX9-NEXT:    v_madak_f32 v0, 0, v0, 0x457ff000
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_movk_i32 s2, 0xf001
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s2
; GFX9-NEXT:    v_mul_lo_u32 v4, v1, s2
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, s2
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_add_u32_e32 v2, v2, v4
; GFX9-NEXT:    v_mul_hi_u32 v5, v0, v3
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v7, v0, v2
; GFX9-NEXT:    v_mul_lo_u32 v6, v1, v3
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX9-NEXT:    v_mul_hi_u32 v8, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v5, v4
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v3, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v8, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s2
; GFX9-NEXT:    v_mul_lo_u32 v3, v1, s2
; GFX9-NEXT:    v_mul_lo_u32 v5, v0, s2
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_add_u32_e32 v2, v2, v3
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v6, v0, v5
; GFX9-NEXT:    v_mul_hi_u32 v7, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v8, v1, v2
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v6, v3
; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v5
; GFX9-NEXT:    v_mul_hi_u32 v5, v1, v5
; GFX9-NEXT:    s_movk_i32 s0, 0xfff
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshr_b64 s[4:5], s[4:5], 12
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v5, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v8, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v5, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, s6, v1
; GFX9-NEXT:    v_mul_hi_u32 v3, s6, v0
; GFX9-NEXT:    v_mul_hi_u32 v5, s6, v1
; GFX9-NEXT:    v_mul_hi_u32 v6, s7, v1
; GFX9-NEXT:    v_mul_lo_u32 v1, s7, v1
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v5, vcc
; GFX9-NEXT:    v_mul_lo_u32 v5, s7, v0
; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v6, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 2, v0
; GFX9-NEXT:    v_mul_lo_u32 v5, v1, s0
; GFX9-NEXT:    v_mul_hi_u32 v6, v0, s0
; GFX9-NEXT:    v_mul_lo_u32 v9, v0, s0
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, 1, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_u32_e32 v5, v6, v5
; GFX9-NEXT:    v_mov_b32_e32 v6, s7
; GFX9-NEXT:    v_sub_co_u32_e32 v9, vcc, s6, v9
; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
; GFX9-NEXT:    v_subrev_co_u32_e32 v6, vcc, s0, v9
; GFX9-NEXT:    v_subbrev_co_u32_e32 v10, vcc, 0, v5, vcc
; GFX9-NEXT:    s_movk_i32 s0, 0xffe
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v10
; GFX9-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v9
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v5, -1, v6, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v8, v3, vcc
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v1, v3, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v7, v2, vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v0, s4
; GFX9-NEXT:    v_mov_b32_e32 v1, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = udiv <2 x i64> %x, <i64 4096, i64 4095>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @udiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: @udiv_v2i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = udiv i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP7:%.*]] = udiv i64 [[TMP5]], [[TMP6]]
; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: udiv_v2i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_add_i32 s8, s8, 12
; GFX6-NEXT:    s_add_i32 s9, s10, 12
; GFX6-NEXT:    s_lshr_b64 s[4:5], s[4:5], s8
; GFX6-NEXT:    s_lshr_b64 s[6:7], s[6:7], s9
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s7
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: udiv_v2i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_add_i32 s2, s8, 12
; GFX9-NEXT:    s_add_i32 s8, s10, 12
; GFX9-NEXT:    s_lshr_b64 s[2:3], s[4:5], s2
; GFX9-NEXT:    s_lshr_b64 s[4:5], s[6:7], s8
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
  %r = udiv <2 x i64> %x, %shl.y
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @urem_i64_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], 1235195393993
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i64_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x4f1761f8
; GFX6-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x438f8000, v0
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_movk_i32 s2, 0xfee0
; GFX6-NEXT:    s_mov_b32 s3, 0x689e0837
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s8, s4
; GFX6-NEXT:    s_movk_i32 s4, 0x11f
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s3
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s3
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, s3
; GFX6-NEXT:    s_mov_b32 s12, 0x9761f7c9
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v4, v0, v5
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v5
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v5
; GFX6-NEXT:    s_mov_b32 s9, s5
; GFX6-NEXT:    s_movk_i32 s5, 0x11e
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s3
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s3
; GFX6-NEXT:    s_mov_b32 s11, 0xf000
; GFX6-NEXT:    s_mov_b32 s10, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s6, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s6, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s6, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s7, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s7, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s7, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v0, s4
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s12
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s12
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s12
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s7, v1
; GFX6-NEXT:    v_mov_b32_e32 v3, 0x11f
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
; GFX6-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[2:3], s5, v5
; GFX6-NEXT:    s_mov_b32 s6, 0x9761f7c8
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[2:3], s6, v4
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s4, v5
; GFX6-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
; GFX6-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v5, s7
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s5, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s6, v0
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i64_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4f1761f8
; GFX9-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x438f8000, v0
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    s_mov_b32 s12, 0x9761f7c8
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_readfirstlane_b32 s0, v0
; GFX9-NEXT:    s_mul_i32 s1, s0, 0xfffffee0
; GFX9-NEXT:    s_mul_hi_u32 s2, s0, 0x689e0837
; GFX9-NEXT:    s_add_i32 s1, s2, s1
; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
; GFX9-NEXT:    s_mul_i32 s3, s2, 0x689e0837
; GFX9-NEXT:    s_add_i32 s1, s1, s3
; GFX9-NEXT:    s_mul_i32 s9, s0, 0x689e0837
; GFX9-NEXT:    s_mul_hi_u32 s3, s0, s1
; GFX9-NEXT:    s_mul_i32 s8, s0, s1
; GFX9-NEXT:    s_mul_hi_u32 s0, s0, s9
; GFX9-NEXT:    s_add_u32 s0, s0, s8
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    s_mul_hi_u32 s10, s2, s9
; GFX9-NEXT:    s_mul_i32 s9, s2, s9
; GFX9-NEXT:    s_add_u32 s0, s0, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s2, s1
; GFX9-NEXT:    s_addc_u32 s0, s3, s10
; GFX9-NEXT:    s_addc_u32 s3, s8, 0
; GFX9-NEXT:    s_mul_i32 s1, s2, s1
; GFX9-NEXT:    s_add_u32 s0, s0, s1
; GFX9-NEXT:    s_addc_u32 s1, 0, s3
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s0, s2, s1
; GFX9-NEXT:    v_readfirstlane_b32 s2, v0
; GFX9-NEXT:    s_mul_i32 s3, s2, 0xfffffee0
; GFX9-NEXT:    s_mul_hi_u32 s8, s2, 0x689e0837
; GFX9-NEXT:    s_mul_i32 s1, s0, 0x689e0837
; GFX9-NEXT:    s_add_i32 s3, s8, s3
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_mul_i32 s9, s2, 0x689e0837
; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
; GFX9-NEXT:    s_mul_i32 s8, s2, s3
; GFX9-NEXT:    s_mul_hi_u32 s2, s2, s9
; GFX9-NEXT:    s_add_u32 s2, s2, s8
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_hi_u32 s10, s0, s9
; GFX9-NEXT:    s_mul_i32 s9, s0, s9
; GFX9-NEXT:    s_add_u32 s2, s2, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s0, s3
; GFX9-NEXT:    s_addc_u32 s1, s1, s10
; GFX9-NEXT:    s_addc_u32 s2, s8, 0
; GFX9-NEXT:    s_mul_i32 s3, s0, s3
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_addc_u32 s2, 0, s2
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s1, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s0, s0, s2
; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_mul_i32 s2, s6, s0
; GFX9-NEXT:    s_mul_hi_u32 s8, s6, s3
; GFX9-NEXT:    s_mul_hi_u32 s1, s6, s0
; GFX9-NEXT:    s_add_u32 s2, s8, s2
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_hi_u32 s9, s7, s3
; GFX9-NEXT:    s_mul_i32 s3, s7, s3
; GFX9-NEXT:    s_add_u32 s2, s2, s3
; GFX9-NEXT:    s_mul_hi_u32 s8, s7, s0
; GFX9-NEXT:    s_addc_u32 s1, s1, s9
; GFX9-NEXT:    s_addc_u32 s2, s8, 0
; GFX9-NEXT:    s_mul_i32 s0, s7, s0
; GFX9-NEXT:    s_add_u32 s0, s1, s0
; GFX9-NEXT:    s_addc_u32 s1, 0, s2
; GFX9-NEXT:    s_mul_i32 s2, s0, 0x11f
; GFX9-NEXT:    s_mul_hi_u32 s3, s0, 0x9761f7c9
; GFX9-NEXT:    s_add_i32 s2, s3, s2
; GFX9-NEXT:    s_mul_i32 s1, s1, 0x9761f7c9
; GFX9-NEXT:    s_mul_i32 s0, s0, 0x9761f7c9
; GFX9-NEXT:    s_add_i32 s9, s2, s1
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    s_sub_i32 s1, s7, s9
; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT:    s_mov_b32 s8, 0x9761f7c9
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s1, 0x11f
; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s8, v0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s10, s6, 0
; GFX9-NEXT:    s_cmpk_gt_u32 s10, 0x11e
; GFX9-NEXT:    s_cselect_b32 s11, -1, 0
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[2:3], s12, v3
; GFX9-NEXT:    s_cmpk_eq_i32 s10, 0x11f
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[2:3]
; GFX9-NEXT:    v_mov_b32_e32 v4, s11
; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v4, v1, s[2:3]
; GFX9-NEXT:    s_subb_u32 s2, s6, 0x11f
; GFX9-NEXT:    v_subrev_co_u32_e64 v4, s[0:1], s8, v3
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s0, s2, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s2, s7, s9
; GFX9-NEXT:    s_cmpk_gt_u32 s2, 0x11e
; GFX9-NEXT:    v_mov_b32_e32 v5, s10
; GFX9-NEXT:    v_mov_b32_e32 v6, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
; GFX9-NEXT:    s_cselect_b32 s3, -1, 0
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s12, v0
; GFX9-NEXT:    s_cmpk_eq_i32 s2, 0x11f
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v5, v6, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s3
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s2
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v6, v1, vcc
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = urem i64 %x, 1235195393993
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @urem_i64_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], 4096
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_mov_b32_e32 v1, 0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    s_and_b32 s0, s2, 0xfff
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_mov_b32_e32 v0, s0
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s2, s2, 0xfff
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    global_store_dwordx2 v1, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = urem i64 %x, 4096
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %x, i64 %y) {
; CHECK-LABEL: @urem_i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
; GFX6-NEXT:    s_load_dword s8, s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s0, s4
; GFX6-NEXT:    s_mov_b32 s1, s5
; GFX6-NEXT:    s_mov_b64 s[4:5], 0x1000
; GFX6-NEXT:    s_lshl_b64 s[4:5], s[4:5], s8
; GFX6-NEXT:    s_add_u32 s4, s4, -1
; GFX6-NEXT:    s_addc_u32 s5, s5, -1
; GFX6-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    s_mov_b64 s[0:1], 0x1000
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[0:1], s[0:1], s2
; GFX9-NEXT:    s_add_u32 s0, s0, -1
; GFX9-NEXT:    s_addc_u32 s1, s1, -1
; GFX9-NEXT:    s_and_b64 s[0:1], s[6:7], s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i64 4096, %y
  %r = urem i64 %x, %shl.y
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @urem_v2i64_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = urem i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = urem i64 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v2i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    v_mov_b32_e32 v1, 0
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_and_b32 s4, s4, 0xfff
; GFX6-NEXT:    s_and_b32 s5, s6, 0xfff
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v2, s5
; GFX6-NEXT:    v_mov_b32_e32 v3, v1
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v2i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v1, 0
; GFX9-NEXT:    v_mov_b32_e32 v3, v1
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_and_b32 s0, s4, 0xfff
; GFX9-NEXT:    s_and_b32 s1, s6, 0xfff
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v2, s1
; GFX9-NEXT:    global_store_dwordx4 v1, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = urem <2 x i64> %x, <i64 4096, i64 4096>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @urem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: @urem_v2i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = urem i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP7:%.*]] = urem i64 [[TMP5]], [[TMP6]]
; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: urem_v2i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b64 s[12:13], 0x1000
; GFX6-NEXT:    s_mov_b32 s11, 0xf000
; GFX6-NEXT:    s_mov_b32 s10, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b64 s[6:7], s[12:13], s6
; GFX6-NEXT:    s_lshl_b64 s[4:5], s[12:13], s4
; GFX6-NEXT:    s_add_u32 s4, s4, -1
; GFX6-NEXT:    s_addc_u32 s5, s5, -1
; GFX6-NEXT:    s_and_b64 s[0:1], s[0:1], s[4:5]
; GFX6-NEXT:    s_add_u32 s4, s6, -1
; GFX6-NEXT:    s_addc_u32 s5, s7, -1
; GFX6-NEXT:    s_and_b64 s[2:3], s[2:3], s[4:5]
; GFX6-NEXT:    v_mov_b32_e32 v0, s0
; GFX6-NEXT:    v_mov_b32_e32 v1, s1
; GFX6-NEXT:    v_mov_b32_e32 v2, s2
; GFX6-NEXT:    v_mov_b32_e32 v3, s3
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: urem_v2i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
; GFX9-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
; GFX9-NEXT:    s_add_u32 s2, s2, -1
; GFX9-NEXT:    s_addc_u32 s3, s3, -1
; GFX9-NEXT:    s_and_b64 s[2:3], s[4:5], s[2:3]
; GFX9-NEXT:    s_add_u32 s4, s10, -1
; GFX9-NEXT:    s_addc_u32 s5, s11, -1
; GFX9-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
  %r = urem <2 x i64> %x, %shl.y
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @sdiv_i64_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], 1235195
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i64_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x4f800000
; GFX6-NEXT:    v_madak_f32 v0, 0, v0, 0x4996c7d8
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_mov_b32 s5, 0xffed2705
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_u32 s2, s2, s8
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s5
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s5
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, s5
; GFX6-NEXT:    s_mov_b32 s9, s8
; GFX6-NEXT:    s_addc_u32 s3, s3, s8
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[8:9]
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s5
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s5
; GFX6-NEXT:    s_mov_b32 s0, 0x12d8fb
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s5
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s0
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
; GFX6-NEXT:    v_mul_lo_u32 v8, v0, s0
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mov_b32_e32 v5, s3
; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s2, v8
; GFX6-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s0, v8
; GFX6-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
; GFX6-NEXT:    s_mov_b32 s0, 0x12d8fa
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
; GFX6-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX6-NEXT:    v_xor_b32_e32 v0, s8, v0
; GFX6-NEXT:    v_xor_b32_e32 v1, s8, v1
; GFX6-NEXT:    v_mov_b32_e32 v2, s8
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i64_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4f800000
; GFX9-NEXT:    v_madak_f32 v0, 0, v0, 0x4996c7d8
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_mov_b32 s2, 0xffed2705
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, s2
; GFX9-NEXT:    v_mul_hi_u32 v3, v0, s2
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, s2
; GFX9-NEXT:    v_add_u32_e32 v2, v3, v2
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX9-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v7, v0, v2
; GFX9-NEXT:    v_mul_lo_u32 v5, v1, v4
; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX9-NEXT:    v_mul_hi_u32 v8, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v7, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v8, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, s2
; GFX9-NEXT:    v_mul_hi_u32 v3, v0, s2
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, s2
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s2, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s2
; GFX9-NEXT:    v_add_u32_e32 v2, v3, v2
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v7, v0, v4
; GFX9-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v5, v1, v4
; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v4
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v7, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v7, vcc, 0, v8, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v6, v4
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v7, v5, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    s_mov_b32 s3, s2
; GFX9-NEXT:    s_addc_u32 s1, s7, s2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
; GFX9-NEXT:    v_mul_lo_u32 v2, s0, v1
; GFX9-NEXT:    v_mul_hi_u32 v3, s0, v0
; GFX9-NEXT:    v_mul_hi_u32 v5, s0, v1
; GFX9-NEXT:    v_mul_hi_u32 v6, s1, v1
; GFX9-NEXT:    v_mul_lo_u32 v1, s1, v1
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v5, vcc
; GFX9-NEXT:    v_mul_lo_u32 v5, s1, v0
; GFX9-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX9-NEXT:    s_mov_b32 s3, 0x12d8fb
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v6, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 2, v0
; GFX9-NEXT:    v_mul_lo_u32 v5, v1, s3
; GFX9-NEXT:    v_mul_hi_u32 v6, v0, s3
; GFX9-NEXT:    v_mul_lo_u32 v9, v0, s3
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, 1, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_u32_e32 v5, v6, v5
; GFX9-NEXT:    v_mov_b32_e32 v6, s1
; GFX9-NEXT:    v_sub_co_u32_e32 v9, vcc, s0, v9
; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
; GFX9-NEXT:    v_subrev_co_u32_e32 v6, vcc, s3, v9
; GFX9-NEXT:    v_subbrev_co_u32_e32 v10, vcc, 0, v5, vcc
; GFX9-NEXT:    s_mov_b32 s0, 0x12d8fa
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v10
; GFX9-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v9
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v5, -1, v6, s[0:1]
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v7, v2, vcc
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v8, v3, vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX9-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX9-NEXT:    v_xor_b32_e32 v1, s2, v1
; GFX9-NEXT:    v_mov_b32_e32 v2, s2
; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s2, v0
; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i64 %x, 1235195
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @sdiv_i64_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], 4096
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    s_ashr_i32 s0, s3, 31
; GFX6-NEXT:    s_lshr_b32 s0, s0, 20
; GFX6-NEXT:    s_add_u32 s0, s2, s0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    s_addc_u32 s1, s3, 0
; GFX6-NEXT:    s_ashr_i64 s[0:1], s[0:1], 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s0
; GFX6-NEXT:    v_mov_b32_e32 v1, s1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_lshr_b32 s4, s4, 20
; GFX9-NEXT:    s_add_u32 s2, s2, s4
; GFX9-NEXT:    s_addc_u32 s3, s3, 0
; GFX9-NEXT:    s_ashr_i64 s[2:3], s[2:3], 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = sdiv i64 %x, 4096
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %x, i64 %y) {
; CHECK-LABEL: @sdiv_i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xd
; GFX6-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_u32 s2, s2, s8
; GFX6-NEXT:    s_mov_b32 s9, s8
; GFX6-NEXT:    s_addc_u32 s3, s3, s8
; GFX6-NEXT:    s_xor_b64 s[10:11], s[2:3], s[8:9]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s10
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s11
; GFX6-NEXT:    s_sub_u32 s4, 0, s10
; GFX6-NEXT:    s_subb_u32 s5, 0, s11
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s12, s3, 31
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_add_u32 s2, s2, s12
; GFX6-NEXT:    s_mov_b32 s13, s12
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v5, s5, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s4, v0
; GFX6-NEXT:    s_addc_u32 s3, s3, s12
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[12:13]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s10, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s11, v0
; GFX6-NEXT:    v_mov_b32_e32 v5, s11
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, s10, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s3, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s2, v3
; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s10, v3
; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s11, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
; GFX6-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v6, s3
; GFX6-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s11, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    s_xor_b64 s[0:1], s[12:13], s[8:9]
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s0, v0
; GFX6-NEXT:    v_xor_b32_e32 v1, s1, v1
; GFX6-NEXT:    v_mov_b32_e32 v2, s1
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x34
; GFX9-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[4:5], s[2:3], s4
; GFX9-NEXT:    s_ashr_i32 s2, s5, 31
; GFX9-NEXT:    s_add_u32 s4, s4, s2
; GFX9-NEXT:    s_mov_b32 s3, s2
; GFX9-NEXT:    s_addc_u32 s5, s5, s2
; GFX9-NEXT:    s_xor_b64 s[8:9], s[4:5], s[2:3]
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    s_sub_u32 s0, 0, s8
; GFX9-NEXT:    s_subb_u32 s1, 0, s9
; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v1
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
; GFX9-NEXT:    v_readfirstlane_b32 s11, v1
; GFX9-NEXT:    s_mul_i32 s12, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s14, s0, s11
; GFX9-NEXT:    s_mul_i32 s13, s1, s11
; GFX9-NEXT:    s_add_i32 s12, s14, s12
; GFX9-NEXT:    s_add_i32 s12, s12, s13
; GFX9-NEXT:    s_mul_i32 s15, s0, s11
; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s12
; GFX9-NEXT:    s_mul_i32 s14, s11, s12
; GFX9-NEXT:    s_mul_hi_u32 s11, s11, s15
; GFX9-NEXT:    s_add_u32 s11, s11, s14
; GFX9-NEXT:    s_addc_u32 s13, 0, s13
; GFX9-NEXT:    s_mul_hi_u32 s16, s10, s15
; GFX9-NEXT:    s_mul_i32 s15, s10, s15
; GFX9-NEXT:    s_add_u32 s11, s11, s15
; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s12
; GFX9-NEXT:    s_addc_u32 s11, s13, s16
; GFX9-NEXT:    s_addc_u32 s13, s14, 0
; GFX9-NEXT:    s_mul_i32 s12, s10, s12
; GFX9-NEXT:    s_add_u32 s11, s11, s12
; GFX9-NEXT:    s_addc_u32 s12, 0, s13
; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s11, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s10, s10, s12
; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
; GFX9-NEXT:    s_mul_i32 s11, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s13, s0, s12
; GFX9-NEXT:    s_add_i32 s11, s13, s11
; GFX9-NEXT:    s_mul_i32 s1, s1, s12
; GFX9-NEXT:    s_add_i32 s11, s11, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s12
; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s0
; GFX9-NEXT:    s_mul_i32 s14, s10, s0
; GFX9-NEXT:    s_mul_i32 s16, s12, s11
; GFX9-NEXT:    s_mul_hi_u32 s0, s12, s0
; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s11
; GFX9-NEXT:    s_add_u32 s0, s0, s16
; GFX9-NEXT:    s_addc_u32 s12, 0, s15
; GFX9-NEXT:    s_add_u32 s0, s0, s14
; GFX9-NEXT:    s_mul_hi_u32 s1, s10, s11
; GFX9-NEXT:    s_addc_u32 s0, s12, s13
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s11, s10, s11
; GFX9-NEXT:    s_add_u32 s0, s0, s11
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s12, s10, s1
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s10, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s10
; GFX9-NEXT:    s_mov_b32 s11, s10
; GFX9-NEXT:    s_addc_u32 s1, s7, s10
; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
; GFX9-NEXT:    v_readfirstlane_b32 s13, v1
; GFX9-NEXT:    s_mul_i32 s1, s6, s12
; GFX9-NEXT:    s_mul_hi_u32 s14, s6, s13
; GFX9-NEXT:    s_mul_hi_u32 s0, s6, s12
; GFX9-NEXT:    s_add_u32 s1, s14, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s15, s7, s13
; GFX9-NEXT:    s_mul_i32 s13, s7, s13
; GFX9-NEXT:    s_add_u32 s1, s1, s13
; GFX9-NEXT:    s_mul_hi_u32 s14, s7, s12
; GFX9-NEXT:    s_addc_u32 s0, s0, s15
; GFX9-NEXT:    s_addc_u32 s1, s14, 0
; GFX9-NEXT:    s_mul_i32 s12, s7, s12
; GFX9-NEXT:    s_add_u32 s12, s0, s12
; GFX9-NEXT:    s_addc_u32 s13, 0, s1
; GFX9-NEXT:    s_mul_i32 s0, s8, s13
; GFX9-NEXT:    s_mul_hi_u32 s1, s8, s12
; GFX9-NEXT:    s_add_i32 s0, s1, s0
; GFX9-NEXT:    s_mul_i32 s1, s9, s12
; GFX9-NEXT:    s_add_i32 s14, s0, s1
; GFX9-NEXT:    s_mul_i32 s1, s8, s12
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    s_sub_i32 s0, s7, s14
; GFX9-NEXT:    v_sub_co_u32_e32 v1, vcc, s6, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s0, s9
; GFX9-NEXT:    v_subrev_co_u32_e64 v2, s[0:1], s8, v1
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s6, s6, 0
; GFX9-NEXT:    s_cmp_ge_u32 s6, s9
; GFX9-NEXT:    s_cselect_b32 s15, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v2
; GFX9-NEXT:    s_cmp_eq_u32 s6, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v3, s15
; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT:    s_add_u32 s6, s12, 2
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[0:1]
; GFX9-NEXT:    s_addc_u32 s0, s13, 0
; GFX9-NEXT:    s_add_u32 s15, s12, 1
; GFX9-NEXT:    s_addc_u32 s1, s13, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s7, s7, s14
; GFX9-NEXT:    s_cmp_ge_u32 s7, s9
; GFX9-NEXT:    v_mov_b32_e32 v3, s1
; GFX9-NEXT:    v_mov_b32_e32 v4, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
; GFX9-NEXT:    s_cselect_b32 s14, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s8, v1
; GFX9-NEXT:    s_cmp_eq_u32 s7, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v4, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v3, s14
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v3, s13
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v2, vcc
; GFX9-NEXT:    v_mov_b32_e32 v2, s15
; GFX9-NEXT:    v_mov_b32_e32 v3, s6
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v3, s12
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
; GFX9-NEXT:    s_xor_b64 s[0:1], s[10:11], s[2:3]
; GFX9-NEXT:    v_xor_b32_e32 v2, s0, v2
; GFX9-NEXT:    v_xor_b32_e32 v3, s1, v1
; GFX9-NEXT:    v_mov_b32_e32 v4, s1
; GFX9-NEXT:    v_subrev_co_u32_e32 v1, vcc, s0, v2
; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v3, v4, vcc
; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i64 4096, %y
  %r = sdiv i64 %x, %shl.y
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @sdiv_v2i64_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v2i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s5, 31
; GFX6-NEXT:    s_lshr_b32 s8, s8, 20
; GFX6-NEXT:    s_add_u32 s4, s4, s8
; GFX6-NEXT:    s_addc_u32 s5, s5, 0
; GFX6-NEXT:    s_ashr_i32 s8, s7, 31
; GFX6-NEXT:    s_ashr_i64 s[4:5], s[4:5], 12
; GFX6-NEXT:    s_lshr_b32 s8, s8, 20
; GFX6-NEXT:    s_add_u32 s6, s6, s8
; GFX6-NEXT:    s_addc_u32 s7, s7, 0
; GFX6-NEXT:    s_ashr_i64 s[6:7], s[6:7], 12
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s7
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v2i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s5, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_add_u32 s0, s4, s0
; GFX9-NEXT:    s_addc_u32 s1, s5, 0
; GFX9-NEXT:    s_ashr_i32 s4, s7, 31
; GFX9-NEXT:    s_ashr_i64 s[0:1], s[0:1], 12
; GFX9-NEXT:    s_lshr_b32 s4, s4, 20
; GFX9-NEXT:    s_add_u32 s4, s6, s4
; GFX9-NEXT:    s_addc_u32 s5, s7, 0
; GFX9-NEXT:    s_ashr_i64 s[4:5], s[4:5], 12
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <2 x i64> %x, <i64 4096, i64 4096>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @ssdiv_v2i64_mixed_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4095
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: ssdiv_v2i64_mixed_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x457ff000
; GFX6-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX6-NEXT:    v_mac_f32_e32 v0, 0, v1
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_movk_i32 s6, 0xf001
; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s1, 31
; GFX6-NEXT:    s_lshr_b32 s8, s8, 20
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s6
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, s6
; GFX6-NEXT:    s_add_u32 s0, s0, s8
; GFX6-NEXT:    s_addc_u32 s1, s1, 0
; GFX6-NEXT:    s_ashr_i64 s[8:9], s[0:1], 12
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s6
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v2, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
; GFX6-NEXT:    s_add_u32 s0, s2, s10
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s6
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s6
; GFX6-NEXT:    s_mov_b32 s11, s10
; GFX6-NEXT:    s_addc_u32 s1, s3, s10
; GFX6-NEXT:    s_xor_b64 s[0:1], s[0:1], s[10:11]
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s6
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s0, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s1, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s1, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s1, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX6-NEXT:    s_movk_i32 s2, 0xfff
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, v1, s2
; GFX6-NEXT:    v_mul_hi_u32 v5, v0, s2
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
; GFX6-NEXT:    v_mul_lo_u32 v8, v0, s2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mov_b32_e32 v5, s1
; GFX6-NEXT:    v_sub_i32_e32 v8, vcc, s0, v8
; GFX6-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v5, vcc, s2, v8
; GFX6-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
; GFX6-NEXT:    s_movk_i32 s0, 0xffe
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
; GFX6-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX6-NEXT:    v_xor_b32_e32 v0, s10, v0
; GFX6-NEXT:    v_xor_b32_e32 v1, s10, v1
; GFX6-NEXT:    v_mov_b32_e32 v3, s10
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s10, v0
; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mov_b32_e32 v0, s8
; GFX6-NEXT:    v_mov_b32_e32 v1, s9
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: ssdiv_v2i64_mixed_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x457ff000
; GFX9-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX9-NEXT:    v_mac_f32_e32 v0, 0, v1
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_movk_i32 s8, 0xf001
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s5, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    v_mul_hi_u32 v2, v0, s8
; GFX9-NEXT:    v_mul_lo_u32 v3, v1, s8
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, s8
; GFX9-NEXT:    s_add_u32 s0, s4, s0
; GFX9-NEXT:    s_addc_u32 s1, s5, 0
; GFX9-NEXT:    v_add_u32_e32 v2, v2, v3
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_mul_lo_u32 v3, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v5, v0, v4
; GFX9-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v5, v3
; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v6, vcc
; GFX9-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX9-NEXT:    s_ashr_i64 s[4:5], s[0:1], 12
; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v7, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, s8
; GFX9-NEXT:    v_mul_hi_u32 v3, v0, s8
; GFX9-NEXT:    v_mul_lo_u32 v4, v0, s8
; GFX9-NEXT:    s_ashr_i32 s8, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s8
; GFX9-NEXT:    v_add_u32_e32 v2, v3, v2
; GFX9-NEXT:    v_sub_u32_e32 v2, v2, v0
; GFX9-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v7, v0, v4
; GFX9-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX9-NEXT:    v_mul_hi_u32 v5, v1, v4
; GFX9-NEXT:    v_mul_lo_u32 v4, v1, v4
; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v7, v6
; GFX9-NEXT:    v_addc_co_u32_e32 v7, vcc, 0, v8, vcc
; GFX9-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v6, v4
; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v7, v5, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT:    s_mov_b32 s9, s8
; GFX9-NEXT:    s_addc_u32 s1, s7, s8
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], s[8:9]
; GFX9-NEXT:    v_mul_lo_u32 v2, s0, v1
; GFX9-NEXT:    v_mul_hi_u32 v3, s0, v0
; GFX9-NEXT:    v_mul_hi_u32 v5, s0, v1
; GFX9-NEXT:    v_mul_hi_u32 v6, s1, v1
; GFX9-NEXT:    v_mul_lo_u32 v1, s1, v1
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v5, vcc
; GFX9-NEXT:    v_mul_lo_u32 v5, s1, v0
; GFX9-NEXT:    v_mul_hi_u32 v0, s1, v0
; GFX9-NEXT:    s_movk_i32 s6, 0xfff
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v5
; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v6, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, 2, v0
; GFX9-NEXT:    v_mul_lo_u32 v5, v1, s6
; GFX9-NEXT:    v_mul_hi_u32 v6, v0, s6
; GFX9-NEXT:    v_mul_lo_u32 v9, v0, s6
; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_co_u32_e32 v7, vcc, 1, v0
; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, 0, v1, vcc
; GFX9-NEXT:    v_add_u32_e32 v5, v6, v5
; GFX9-NEXT:    v_mov_b32_e32 v6, s1
; GFX9-NEXT:    v_sub_co_u32_e32 v9, vcc, s0, v9
; GFX9-NEXT:    v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
; GFX9-NEXT:    v_subrev_co_u32_e32 v6, vcc, s6, v9
; GFX9-NEXT:    v_subbrev_co_u32_e32 v10, vcc, 0, v5, vcc
; GFX9-NEXT:    s_movk_i32 s0, 0xffe
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v10
; GFX9-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v9
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v5, -1, v6, s[0:1]
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v7, v2, vcc
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v8, v3, vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX9-NEXT:    v_xor_b32_e32 v0, s8, v0
; GFX9-NEXT:    v_xor_b32_e32 v1, s8, v1
; GFX9-NEXT:    v_mov_b32_e32 v3, s8
; GFX9-NEXT:    v_subrev_co_u32_e32 v2, vcc, s8, v0
; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
; GFX9-NEXT:    v_mov_b32_e32 v0, s4
; GFX9-NEXT:    v_mov_b32_e32 v1, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = sdiv <2 x i64> %x, <i64 4096, i64 4095>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: @sdiv_v2i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = sdiv i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP7:%.*]] = sdiv i64 [[TMP5]], [[TMP6]]
; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: sdiv_v2i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b64 s[12:13], 0x1000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b64 s[8:9], s[12:13], s8
; GFX6-NEXT:    s_lshl_b64 s[2:3], s[12:13], s10
; GFX6-NEXT:    s_ashr_i32 s14, s9, 31
; GFX6-NEXT:    s_add_u32 s8, s8, s14
; GFX6-NEXT:    s_mov_b32 s15, s14
; GFX6-NEXT:    s_addc_u32 s9, s9, s14
; GFX6-NEXT:    s_xor_b64 s[12:13], s[8:9], s[14:15]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s12
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s13
; GFX6-NEXT:    s_sub_u32 s10, 0, s12
; GFX6-NEXT:    s_subb_u32 s11, 0, s13
; GFX6-NEXT:    s_ashr_i32 s16, s5, 31
; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
; GFX6-NEXT:    s_add_u32 s0, s4, s16
; GFX6-NEXT:    s_mov_b32 s17, s16
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_addc_u32 s1, s5, s16
; GFX6-NEXT:    s_xor_b64 s[4:5], s[0:1], s[16:17]
; GFX6-NEXT:    v_mul_lo_u32 v2, s10, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v0
; GFX6-NEXT:    v_mul_lo_u32 v5, s11, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s10, v0
; GFX6-NEXT:    s_xor_b64 s[14:15], s[16:17], s[14:15]
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s10, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s11, v0
; GFX6-NEXT:    s_mov_b32 s11, 0xf000
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, s10, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s5, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
; GFX6-NEXT:    s_mov_b32 s10, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s12, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s13, v0
; GFX6-NEXT:    v_mov_b32_e32 v5, s13
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, s12, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s5, v2
; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s4, v3
; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s12, v3
; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s13, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
; GFX6-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
; GFX6-NEXT:    s_ashr_i32 s4, s3, 31
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX6-NEXT:    s_add_u32 s2, s2, s4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v6, s5
; GFX6-NEXT:    s_mov_b32 s5, s4
; GFX6-NEXT:    s_addc_u32 s3, s3, s4
; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v8, s2
; GFX6-NEXT:    v_cvt_f32_u32_e32 v9, s3
; GFX6-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s13, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s12, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v2
; GFX6-NEXT:    v_mac_f32_e32 v8, 0x4f800000, v9
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
; GFX6-NEXT:    v_rcp_f32_e32 v3, v8
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
; GFX6-NEXT:    v_mul_f32_e32 v3, 0x5f7ffffc, v3
; GFX6-NEXT:    v_mul_f32_e32 v4, 0x2f800000, v3
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mac_f32_e32 v3, 0xcf800000, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    s_sub_u32 s0, 0, s2
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_mul_hi_u32 v2, s0, v3
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v4
; GFX6-NEXT:    s_subb_u32 s1, 0, s3
; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v3
; GFX6-NEXT:    s_ashr_i32 s12, s7, 31
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v3, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
; GFX6-NEXT:    v_mul_hi_u32 v8, v3, v2
; GFX6-NEXT:    v_mul_hi_u32 v9, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v4, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v8, v4, v5
; GFX6-NEXT:    v_mul_hi_u32 v5, v4, v5
; GFX6-NEXT:    s_mov_b32 s13, s12
; GFX6-NEXT:    v_xor_b32_e32 v0, s14, v0
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v6, v8
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v9, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s0, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, s0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v2
; GFX6-NEXT:    v_xor_b32_e32 v1, s15, v1
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
; GFX6-NEXT:    v_mul_lo_u32 v8, v2, v4
; GFX6-NEXT:    v_mul_hi_u32 v9, v2, v5
; GFX6-NEXT:    v_mul_hi_u32 v10, v2, v4
; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
; GFX6-NEXT:    v_mul_lo_u32 v5, v3, v5
; GFX6-NEXT:    v_mul_hi_u32 v6, v3, v4
; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
; GFX6-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, v3, v4
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v9, v7, vcc
; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    s_add_u32 s0, s6, s12
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    s_addc_u32 s1, s7, s12
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v5, vcc
; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[12:13]
; GFX6-NEXT:    v_mul_lo_u32 v4, s6, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, s6, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, s6, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, s7, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, s7, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
; GFX6-NEXT:    v_mul_lo_u32 v7, s7, v2
; GFX6-NEXT:    v_mul_hi_u32 v2, s7, v2
; GFX6-NEXT:    v_mov_b32_e32 v6, s15
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s2, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, s2, v2
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s14, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, s3, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, s2, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s7, v4
; GFX6-NEXT:    v_mov_b32_e32 v7, s3
; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s6, v5
; GFX6-NEXT:    v_subb_u32_e64 v6, s[0:1], v6, v7, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v7, s[0:1], s2, v5
; GFX6-NEXT:    v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v7
; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v2
; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v3, s[0:1]
; GFX6-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v2
; GFX6-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v8, s7
; GFX6-NEXT:    v_subb_u32_e32 v4, vcc, v8, v4, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s2, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v4
; GFX6-NEXT:    v_cndmask_b32_e32 v4, v8, v5, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v9, v7, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT:    s_xor_b64 s[0:1], s[12:13], s[4:5]
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
; GFX6-NEXT:    v_xor_b32_e32 v2, s0, v2
; GFX6-NEXT:    v_xor_b32_e32 v3, s1, v3
; GFX6-NEXT:    v_mov_b32_e32 v4, s1
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s0, v2
; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: sdiv_v2i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
; GFX9-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
; GFX9-NEXT:    s_ashr_i32 s8, s3, 31
; GFX9-NEXT:    s_add_u32 s2, s2, s8
; GFX9-NEXT:    s_mov_b32 s9, s8
; GFX9-NEXT:    s_addc_u32 s3, s3, s8
; GFX9-NEXT:    s_xor_b64 s[12:13], s[2:3], s[8:9]
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s12
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s13
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    s_sub_u32 s0, 0, s12
; GFX9-NEXT:    s_subb_u32 s1, 0, s13
; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s14, v1
; GFX9-NEXT:    v_readfirstlane_b32 s15, v0
; GFX9-NEXT:    s_mul_i32 s16, s0, s14
; GFX9-NEXT:    s_mul_hi_u32 s18, s0, s15
; GFX9-NEXT:    s_mul_i32 s17, s1, s15
; GFX9-NEXT:    s_add_i32 s16, s18, s16
; GFX9-NEXT:    s_add_i32 s16, s16, s17
; GFX9-NEXT:    s_mul_i32 s19, s0, s15
; GFX9-NEXT:    s_mul_hi_u32 s17, s15, s16
; GFX9-NEXT:    s_mul_i32 s18, s15, s16
; GFX9-NEXT:    s_mul_hi_u32 s15, s15, s19
; GFX9-NEXT:    s_add_u32 s15, s15, s18
; GFX9-NEXT:    s_addc_u32 s17, 0, s17
; GFX9-NEXT:    s_mul_hi_u32 s20, s14, s19
; GFX9-NEXT:    s_mul_i32 s19, s14, s19
; GFX9-NEXT:    s_add_u32 s15, s15, s19
; GFX9-NEXT:    s_mul_hi_u32 s18, s14, s16
; GFX9-NEXT:    s_addc_u32 s15, s17, s20
; GFX9-NEXT:    s_addc_u32 s17, s18, 0
; GFX9-NEXT:    s_mul_i32 s16, s14, s16
; GFX9-NEXT:    s_add_u32 s15, s15, s16
; GFX9-NEXT:    s_addc_u32 s16, 0, s17
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s15, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s14, s14, s16
; GFX9-NEXT:    v_readfirstlane_b32 s16, v0
; GFX9-NEXT:    s_mul_i32 s15, s0, s14
; GFX9-NEXT:    s_mul_hi_u32 s17, s0, s16
; GFX9-NEXT:    s_add_i32 s15, s17, s15
; GFX9-NEXT:    s_mul_i32 s1, s1, s16
; GFX9-NEXT:    s_add_i32 s15, s15, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s16
; GFX9-NEXT:    s_mul_hi_u32 s17, s14, s0
; GFX9-NEXT:    s_mul_i32 s18, s14, s0
; GFX9-NEXT:    s_mul_i32 s20, s16, s15
; GFX9-NEXT:    s_mul_hi_u32 s0, s16, s0
; GFX9-NEXT:    s_mul_hi_u32 s19, s16, s15
; GFX9-NEXT:    s_add_u32 s0, s0, s20
; GFX9-NEXT:    s_addc_u32 s16, 0, s19
; GFX9-NEXT:    s_add_u32 s0, s0, s18
; GFX9-NEXT:    s_mul_hi_u32 s1, s14, s15
; GFX9-NEXT:    s_addc_u32 s0, s16, s17
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s15, s14, s15
; GFX9-NEXT:    s_add_u32 s0, s0, s15
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s16, s14, s1
; GFX9-NEXT:    s_ashr_i32 s14, s5, 31
; GFX9-NEXT:    s_add_u32 s0, s4, s14
; GFX9-NEXT:    s_mov_b32 s15, s14
; GFX9-NEXT:    s_addc_u32 s1, s5, s14
; GFX9-NEXT:    s_xor_b64 s[4:5], s[0:1], s[14:15]
; GFX9-NEXT:    v_readfirstlane_b32 s17, v0
; GFX9-NEXT:    s_mul_i32 s1, s4, s16
; GFX9-NEXT:    s_mul_hi_u32 s18, s4, s17
; GFX9-NEXT:    s_mul_hi_u32 s0, s4, s16
; GFX9-NEXT:    s_add_u32 s1, s18, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s19, s5, s17
; GFX9-NEXT:    s_mul_i32 s17, s5, s17
; GFX9-NEXT:    s_add_u32 s1, s1, s17
; GFX9-NEXT:    s_mul_hi_u32 s18, s5, s16
; GFX9-NEXT:    s_addc_u32 s0, s0, s19
; GFX9-NEXT:    s_addc_u32 s1, s18, 0
; GFX9-NEXT:    s_mul_i32 s16, s5, s16
; GFX9-NEXT:    s_add_u32 s16, s0, s16
; GFX9-NEXT:    s_addc_u32 s17, 0, s1
; GFX9-NEXT:    s_mul_i32 s0, s12, s17
; GFX9-NEXT:    s_mul_hi_u32 s1, s12, s16
; GFX9-NEXT:    s_add_i32 s0, s1, s0
; GFX9-NEXT:    s_mul_i32 s1, s13, s16
; GFX9-NEXT:    s_add_i32 s18, s0, s1
; GFX9-NEXT:    s_mul_i32 s1, s12, s16
; GFX9-NEXT:    v_mov_b32_e32 v0, s1
; GFX9-NEXT:    s_sub_i32 s0, s5, s18
; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s4, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s4, s0, s13
; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s12, v0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s4, s4, 0
; GFX9-NEXT:    s_cmp_ge_u32 s4, s13
; GFX9-NEXT:    s_cselect_b32 s19, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v1
; GFX9-NEXT:    s_cmp_eq_u32 s4, s13
; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v2, s19
; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT:    s_add_u32 s4, s16, 2
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v2, v1, s[0:1]
; GFX9-NEXT:    s_addc_u32 s0, s17, 0
; GFX9-NEXT:    s_add_u32 s19, s16, 1
; GFX9-NEXT:    s_addc_u32 s1, s17, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s5, s5, s18
; GFX9-NEXT:    s_cmp_ge_u32 s5, s13
; GFX9-NEXT:    v_mov_b32_e32 v2, s1
; GFX9-NEXT:    v_mov_b32_e32 v3, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
; GFX9-NEXT:    s_cselect_b32 s18, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
; GFX9-NEXT:    s_cmp_eq_u32 s5, s13
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v2, s18
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
; GFX9-NEXT:    v_mov_b32_e32 v2, s17
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v1, s19
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[0:1]
; GFX9-NEXT:    s_xor_b64 s[0:1], s[14:15], s[8:9]
; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
; GFX9-NEXT:    s_add_u32 s8, s10, s4
; GFX9-NEXT:    s_mov_b32 s5, s4
; GFX9-NEXT:    s_addc_u32 s9, s11, s4
; GFX9-NEXT:    v_mov_b32_e32 v2, s16
; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[4:5]
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s9
; GFX9-NEXT:    v_xor_b32_e32 v1, s0, v1
; GFX9-NEXT:    v_xor_b32_e32 v5, s1, v0
; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s0, v1
; GFX9-NEXT:    v_mac_f32_e32 v2, 0x4f800000, v3
; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
; GFX9-NEXT:    s_sub_u32 s0, 0, s8
; GFX9-NEXT:    v_mov_b32_e32 v6, s1
; GFX9-NEXT:    s_subb_u32 s1, 0, s9
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v2
; GFX9-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v6, vcc
; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
; GFX9-NEXT:    v_readfirstlane_b32 s13, v3
; GFX9-NEXT:    s_mul_hi_u32 s12, s0, s10
; GFX9-NEXT:    s_mul_i32 s14, s0, s13
; GFX9-NEXT:    s_mul_i32 s11, s1, s10
; GFX9-NEXT:    s_add_i32 s12, s12, s14
; GFX9-NEXT:    s_add_i32 s12, s12, s11
; GFX9-NEXT:    s_mul_i32 s15, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s11, s10, s12
; GFX9-NEXT:    s_mul_i32 s14, s10, s12
; GFX9-NEXT:    s_mul_hi_u32 s10, s10, s15
; GFX9-NEXT:    s_add_u32 s10, s10, s14
; GFX9-NEXT:    s_addc_u32 s11, 0, s11
; GFX9-NEXT:    s_mul_hi_u32 s16, s13, s15
; GFX9-NEXT:    s_mul_i32 s15, s13, s15
; GFX9-NEXT:    s_add_u32 s10, s10, s15
; GFX9-NEXT:    s_mul_hi_u32 s14, s13, s12
; GFX9-NEXT:    s_addc_u32 s10, s11, s16
; GFX9-NEXT:    s_addc_u32 s11, s14, 0
; GFX9-NEXT:    s_mul_i32 s12, s13, s12
; GFX9-NEXT:    s_add_u32 s10, s10, s12
; GFX9-NEXT:    s_addc_u32 s11, 0, s11
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s10, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s10, s13, s11
; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
; GFX9-NEXT:    s_mul_i32 s11, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s13, s0, s12
; GFX9-NEXT:    s_add_i32 s11, s13, s11
; GFX9-NEXT:    s_mul_i32 s1, s1, s12
; GFX9-NEXT:    s_add_i32 s11, s11, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s12
; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s0
; GFX9-NEXT:    s_mul_i32 s14, s10, s0
; GFX9-NEXT:    s_mul_i32 s16, s12, s11
; GFX9-NEXT:    s_mul_hi_u32 s0, s12, s0
; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s11
; GFX9-NEXT:    s_add_u32 s0, s0, s16
; GFX9-NEXT:    s_addc_u32 s12, 0, s15
; GFX9-NEXT:    s_add_u32 s0, s0, s14
; GFX9-NEXT:    s_mul_hi_u32 s1, s10, s11
; GFX9-NEXT:    s_addc_u32 s0, s12, s13
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s11, s10, s11
; GFX9-NEXT:    s_add_u32 s0, s0, s11
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s12, s10, s1
; GFX9-NEXT:    s_ashr_i32 s10, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s10
; GFX9-NEXT:    s_mov_b32 s11, s10
; GFX9-NEXT:    s_addc_u32 s1, s7, s10
; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
; GFX9-NEXT:    v_readfirstlane_b32 s13, v2
; GFX9-NEXT:    s_mul_i32 s1, s6, s12
; GFX9-NEXT:    s_mul_hi_u32 s14, s6, s13
; GFX9-NEXT:    s_mul_hi_u32 s0, s6, s12
; GFX9-NEXT:    s_add_u32 s1, s14, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s15, s7, s13
; GFX9-NEXT:    s_mul_i32 s13, s7, s13
; GFX9-NEXT:    s_add_u32 s1, s1, s13
; GFX9-NEXT:    s_mul_hi_u32 s14, s7, s12
; GFX9-NEXT:    s_addc_u32 s0, s0, s15
; GFX9-NEXT:    s_addc_u32 s1, s14, 0
; GFX9-NEXT:    s_mul_i32 s12, s7, s12
; GFX9-NEXT:    s_add_u32 s12, s0, s12
; GFX9-NEXT:    s_addc_u32 s13, 0, s1
; GFX9-NEXT:    s_mul_i32 s0, s8, s13
; GFX9-NEXT:    s_mul_hi_u32 s1, s8, s12
; GFX9-NEXT:    s_add_i32 s0, s1, s0
; GFX9-NEXT:    s_mul_i32 s1, s9, s12
; GFX9-NEXT:    s_add_i32 s14, s0, s1
; GFX9-NEXT:    s_mul_i32 s1, s8, s12
; GFX9-NEXT:    v_mov_b32_e32 v2, s1
; GFX9-NEXT:    s_sub_i32 s0, s7, s14
; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, s6, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s0, s9
; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s8, v2
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s6, s6, 0
; GFX9-NEXT:    s_cmp_ge_u32 s6, s9
; GFX9-NEXT:    s_cselect_b32 s15, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v3
; GFX9-NEXT:    s_cmp_eq_u32 s6, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v5, s15
; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT:    s_add_u32 s6, s12, 2
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[0:1]
; GFX9-NEXT:    s_addc_u32 s0, s13, 0
; GFX9-NEXT:    s_add_u32 s15, s12, 1
; GFX9-NEXT:    s_addc_u32 s1, s13, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s7, s7, s14
; GFX9-NEXT:    s_cmp_ge_u32 s7, s9
; GFX9-NEXT:    v_mov_b32_e32 v5, s1
; GFX9-NEXT:    v_mov_b32_e32 v6, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
; GFX9-NEXT:    s_cselect_b32 s14, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s8, v2
; GFX9-NEXT:    s_cmp_eq_u32 s7, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v6, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v5, s14
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
; GFX9-NEXT:    v_mov_b32_e32 v5, s13
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v5, v3, vcc
; GFX9-NEXT:    v_mov_b32_e32 v3, s15
; GFX9-NEXT:    v_mov_b32_e32 v5, s6
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v5, s12
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
; GFX9-NEXT:    s_xor_b64 s[0:1], s[10:11], s[4:5]
; GFX9-NEXT:    v_xor_b32_e32 v3, s0, v3
; GFX9-NEXT:    v_xor_b32_e32 v5, s1, v2
; GFX9-NEXT:    v_mov_b32_e32 v6, s1
; GFX9-NEXT:    v_subrev_co_u32_e32 v2, vcc, s0, v3
; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v6, vcc
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
  %r = sdiv <2 x i64> %x, %shl.y
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @srem_i64_oddk_denom(
; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], 1235195
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i64_oddk_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    v_mov_b32_e32 v0, 0x4f800000
; GFX6-NEXT:    v_madak_f32 v0, 0, v0, 0x4996c7d8
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_mov_b32 s4, 0xffed2705
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_u32 s2, s2, s8
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s4
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s4
; GFX6-NEXT:    v_mul_lo_u32 v4, v0, s4
; GFX6-NEXT:    s_mov_b32 s9, s8
; GFX6-NEXT:    s_addc_u32 s3, s3, s8
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v2, v0
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[8:9]
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, s4
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s4
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, v0, s4
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    s_mov_b32 s0, 0x12d8fb
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v1, v1, s0
; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s0
; GFX6-NEXT:    v_mul_lo_u32 v0, v0, s0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    v_mov_b32_e32 v2, s3
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s0, v0
; GFX6-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
; GFX6-NEXT:    v_subrev_i32_e32 v4, vcc, s0, v2
; GFX6-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v3, vcc
; GFX6-NEXT:    s_mov_b32 s0, 0x12d8fa
; GFX6-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
; GFX6-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v0
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v5, -1, v5, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX6-NEXT:    v_xor_b32_e32 v0, s8, v0
; GFX6-NEXT:    v_xor_b32_e32 v1, s8, v1
; GFX6-NEXT:    v_mov_b32_e32 v2, s8
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i64_oddk_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4996c7d8
; GFX9-NEXT:    v_mov_b32_e32 v1, 0x4f800000
; GFX9-NEXT:    v_mac_f32_e32 v0, 0, v1
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s0, v1
; GFX9-NEXT:    v_readfirstlane_b32 s1, v0
; GFX9-NEXT:    s_mul_hi_u32 s2, s1, 0xffed2705
; GFX9-NEXT:    s_mul_i32 s3, s0, 0xffed2705
; GFX9-NEXT:    s_add_i32 s2, s2, s3
; GFX9-NEXT:    s_sub_i32 s2, s2, s1
; GFX9-NEXT:    s_mul_i32 s9, s1, 0xffed2705
; GFX9-NEXT:    s_mul_hi_u32 s3, s1, s2
; GFX9-NEXT:    s_mul_i32 s8, s1, s2
; GFX9-NEXT:    s_mul_hi_u32 s1, s1, s9
; GFX9-NEXT:    s_add_u32 s1, s1, s8
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    s_mul_hi_u32 s10, s0, s9
; GFX9-NEXT:    s_mul_i32 s9, s0, s9
; GFX9-NEXT:    s_add_u32 s1, s1, s9
; GFX9-NEXT:    s_mul_hi_u32 s8, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, s3, s10
; GFX9-NEXT:    s_addc_u32 s3, s8, 0
; GFX9-NEXT:    s_mul_i32 s2, s0, s2
; GFX9-NEXT:    s_add_u32 s1, s1, s2
; GFX9-NEXT:    s_addc_u32 s2, 0, s3
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s1, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s0, s0, s2
; GFX9-NEXT:    v_readfirstlane_b32 s2, v0
; GFX9-NEXT:    s_mul_i32 s1, s0, 0xffed2705
; GFX9-NEXT:    s_mul_hi_u32 s3, s2, 0xffed2705
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_sub_i32 s1, s3, s2
; GFX9-NEXT:    s_mul_i32 s8, s2, 0xffed2705
; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s1
; GFX9-NEXT:    s_mul_i32 s12, s2, s1
; GFX9-NEXT:    s_mul_hi_u32 s2, s2, s8
; GFX9-NEXT:    s_add_u32 s2, s2, s12
; GFX9-NEXT:    s_mul_hi_u32 s9, s0, s8
; GFX9-NEXT:    s_mul_i32 s10, s0, s8
; GFX9-NEXT:    s_addc_u32 s8, 0, s11
; GFX9-NEXT:    s_add_u32 s2, s2, s10
; GFX9-NEXT:    s_mul_hi_u32 s3, s0, s1
; GFX9-NEXT:    s_addc_u32 s2, s8, s9
; GFX9-NEXT:    s_addc_u32 s3, s3, 0
; GFX9-NEXT:    s_mul_i32 s1, s0, s1
; GFX9-NEXT:    s_add_u32 s1, s2, s1
; GFX9-NEXT:    s_addc_u32 s2, 0, s3
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s1, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s8, s0, s2
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s2, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s2
; GFX9-NEXT:    s_mov_b32 s3, s2
; GFX9-NEXT:    s_addc_u32 s1, s7, s2
; GFX9-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
; GFX9-NEXT:    v_readfirstlane_b32 s7, v0
; GFX9-NEXT:    s_mul_i32 s6, s0, s8
; GFX9-NEXT:    s_mul_hi_u32 s9, s0, s7
; GFX9-NEXT:    s_mul_hi_u32 s3, s0, s8
; GFX9-NEXT:    s_add_u32 s6, s9, s6
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    s_mul_hi_u32 s10, s1, s7
; GFX9-NEXT:    s_mul_i32 s7, s1, s7
; GFX9-NEXT:    s_add_u32 s6, s6, s7
; GFX9-NEXT:    s_mul_hi_u32 s9, s1, s8
; GFX9-NEXT:    s_addc_u32 s3, s3, s10
; GFX9-NEXT:    s_addc_u32 s6, s9, 0
; GFX9-NEXT:    s_mul_i32 s7, s1, s8
; GFX9-NEXT:    s_add_u32 s3, s3, s7
; GFX9-NEXT:    s_addc_u32 s6, 0, s6
; GFX9-NEXT:    s_mul_hi_u32 s8, s3, 0x12d8fb
; GFX9-NEXT:    s_mul_i32 s3, s3, 0x12d8fb
; GFX9-NEXT:    s_mul_i32 s6, s6, 0x12d8fb
; GFX9-NEXT:    v_mov_b32_e32 v0, s3
; GFX9-NEXT:    s_add_i32 s8, s8, s6
; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    s_mov_b32 s7, 0x12d8fb
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s3, s1, s8
; GFX9-NEXT:    v_subrev_co_u32_e32 v1, vcc, s7, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s0, s3, 0
; GFX9-NEXT:    v_subrev_co_u32_e32 v3, vcc, s7, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s1, s0, 0
; GFX9-NEXT:    s_mov_b32 s6, 0x12d8fa
; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, s6, v1
; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v4, -1, v4, vcc
; GFX9-NEXT:    v_mov_b32_e32 v5, s0
; GFX9-NEXT:    v_mov_b32_e32 v6, s1
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], s6, v0
; GFX9-NEXT:    s_cmp_eq_u32 s3, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT:    v_cndmask_b32_e64 v5, -1, v5, s[0:1]
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s3
; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v4, s[0:1]
; GFX9-NEXT:    v_xor_b32_e32 v0, s2, v0
; GFX9-NEXT:    v_xor_b32_e32 v1, s2, v4
; GFX9-NEXT:    v_mov_b32_e32 v3, s2
; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s2, v0
; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
; GFX9-NEXT:    s_endpgm
  %r = srem i64 %x, 1235195
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
; CHECK-LABEL: @srem_i64_pow2k_denom(
; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], 4096
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    s_ashr_i32 s0, s3, 31
; GFX6-NEXT:    s_lshr_b32 s0, s0, 20
; GFX6-NEXT:    s_add_u32 s0, s2, s0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    s_addc_u32 s1, s3, 0
; GFX6-NEXT:    s_and_b32 s0, s0, 0xfffff000
; GFX6-NEXT:    s_sub_u32 s0, s2, s0
; GFX6-NEXT:    s_subb_u32 s1, s3, s1
; GFX6-NEXT:    v_mov_b32_e32 v0, s0
; GFX6-NEXT:    v_mov_b32_e32 v1, s1
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v2, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_lshr_b32 s4, s4, 20
; GFX9-NEXT:    s_add_u32 s4, s2, s4
; GFX9-NEXT:    s_addc_u32 s5, s3, 0
; GFX9-NEXT:    s_and_b32 s4, s4, 0xfffff000
; GFX9-NEXT:    s_sub_u32 s2, s2, s4
; GFX9-NEXT:    s_subb_u32 s3, s3, s5
; GFX9-NEXT:    v_mov_b32_e32 v0, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s3
; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT:    s_endpgm
  %r = srem i64 %x, 4096
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %x, i64 %y) {
; CHECK-LABEL: @srem_i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], [[SHL_Y]]
; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xd
; GFX6-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX6-NEXT:    s_mov_b32 s7, 0xf000
; GFX6-NEXT:    s_mov_b32 s6, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
; GFX6-NEXT:    s_ashr_i32 s4, s3, 31
; GFX6-NEXT:    s_add_u32 s2, s2, s4
; GFX6-NEXT:    s_mov_b32 s5, s4
; GFX6-NEXT:    s_addc_u32 s3, s3, s4
; GFX6-NEXT:    s_xor_b64 s[8:9], s[2:3], s[4:5]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX6-NEXT:    s_sub_u32 s4, 0, s8
; GFX6-NEXT:    s_subb_u32 s5, 0, s9
; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_add_u32 s2, s2, s10
; GFX6-NEXT:    s_mov_b32 s11, s10
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v5, s5, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s4, v0
; GFX6-NEXT:    s_addc_u32 s3, s3, s10
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    s_xor_b64 s[12:13], s[2:3], s[10:11]
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
; GFX6-NEXT:    s_mov_b32 s5, s1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s12, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s12, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s13, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s13, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s13, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s13, v0
; GFX6-NEXT:    s_mov_b32 s4, s0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v1, s8, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, s8, v0
; GFX6-NEXT:    v_mul_lo_u32 v3, s9, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, s8, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s13, v1
; GFX6-NEXT:    v_mov_b32_e32 v3, s9
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s12, v0
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
; GFX6-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v4
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v5
; GFX6-NEXT:    v_subrev_i32_e64 v3, s[0:1], s8, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
; GFX6-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v5, s13
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_xor_b32_e32 v0, s10, v0
; GFX6-NEXT:    v_xor_b32_e32 v1, s10, v1
; GFX6-NEXT:    v_mov_b32_e32 v2, s10
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v0
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x34
; GFX9-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
; GFX9-NEXT:    s_add_u32 s2, s2, s4
; GFX9-NEXT:    s_mov_b32 s5, s4
; GFX9-NEXT:    s_addc_u32 s3, s3, s4
; GFX9-NEXT:    s_xor_b64 s[8:9], s[2:3], s[4:5]
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT:    s_sub_u32 s0, 0, s8
; GFX9-NEXT:    s_subb_u32 s1, 0, s9
; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
; GFX9-NEXT:    v_mov_b32_e32 v0, 0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v1
; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
; GFX9-NEXT:    s_mul_i32 s10, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s12, s0, s3
; GFX9-NEXT:    s_mul_i32 s11, s1, s3
; GFX9-NEXT:    s_add_i32 s10, s12, s10
; GFX9-NEXT:    s_add_i32 s10, s10, s11
; GFX9-NEXT:    s_mul_i32 s13, s0, s3
; GFX9-NEXT:    s_mul_hi_u32 s11, s3, s10
; GFX9-NEXT:    s_mul_i32 s12, s3, s10
; GFX9-NEXT:    s_mul_hi_u32 s3, s3, s13
; GFX9-NEXT:    s_add_u32 s3, s3, s12
; GFX9-NEXT:    s_addc_u32 s11, 0, s11
; GFX9-NEXT:    s_mul_hi_u32 s14, s2, s13
; GFX9-NEXT:    s_mul_i32 s13, s2, s13
; GFX9-NEXT:    s_add_u32 s3, s3, s13
; GFX9-NEXT:    s_mul_hi_u32 s12, s2, s10
; GFX9-NEXT:    s_addc_u32 s3, s11, s14
; GFX9-NEXT:    s_addc_u32 s11, s12, 0
; GFX9-NEXT:    s_mul_i32 s10, s2, s10
; GFX9-NEXT:    s_add_u32 s3, s3, s10
; GFX9-NEXT:    s_addc_u32 s10, 0, s11
; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s3, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s2, s10
; GFX9-NEXT:    v_readfirstlane_b32 s10, v1
; GFX9-NEXT:    s_mul_i32 s3, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s11, s0, s10
; GFX9-NEXT:    s_add_i32 s3, s11, s3
; GFX9-NEXT:    s_mul_i32 s1, s1, s10
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s0
; GFX9-NEXT:    s_mul_i32 s12, s2, s0
; GFX9-NEXT:    s_mul_i32 s14, s10, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s0
; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s14
; GFX9-NEXT:    s_addc_u32 s10, 0, s13
; GFX9-NEXT:    s_add_u32 s0, s0, s12
; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
; GFX9-NEXT:    s_addc_u32 s0, s10, s11
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s3, s2, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s3
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s2, s1
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s10, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s10
; GFX9-NEXT:    s_mov_b32 s11, s10
; GFX9-NEXT:    s_addc_u32 s1, s7, s10
; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
; GFX9-NEXT:    s_mul_i32 s1, s6, s2
; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s6, s2
; GFX9-NEXT:    s_add_u32 s1, s11, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s12, s7, s3
; GFX9-NEXT:    s_mul_i32 s3, s7, s3
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_mul_hi_u32 s11, s7, s2
; GFX9-NEXT:    s_addc_u32 s0, s0, s12
; GFX9-NEXT:    s_addc_u32 s1, s11, 0
; GFX9-NEXT:    s_mul_i32 s2, s7, s2
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_i32 s1, s8, s1
; GFX9-NEXT:    s_mul_hi_u32 s2, s8, s0
; GFX9-NEXT:    s_add_i32 s1, s2, s1
; GFX9-NEXT:    s_mul_i32 s2, s9, s0
; GFX9-NEXT:    s_mul_i32 s0, s8, s0
; GFX9-NEXT:    s_add_i32 s11, s1, s2
; GFX9-NEXT:    v_mov_b32_e32 v1, s0
; GFX9-NEXT:    s_sub_i32 s1, s7, s11
; GFX9-NEXT:    v_sub_co_u32_e32 v1, vcc, s6, v1
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s1, s9
; GFX9-NEXT:    v_subrev_co_u32_e64 v2, s[0:1], s8, v1
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s12, s6, 0
; GFX9-NEXT:    s_cmp_ge_u32 s12, s9
; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v2
; GFX9-NEXT:    s_cmp_eq_u32 s12, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[2:3]
; GFX9-NEXT:    v_mov_b32_e32 v4, s13
; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[2:3]
; GFX9-NEXT:    s_subb_u32 s2, s6, s9
; GFX9-NEXT:    v_subrev_co_u32_e64 v4, s[0:1], s8, v2
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s0, s2, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s2, s7, s11
; GFX9-NEXT:    s_cmp_ge_u32 s2, s9
; GFX9-NEXT:    v_mov_b32_e32 v5, s12
; GFX9-NEXT:    v_mov_b32_e32 v6, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
; GFX9-NEXT:    s_cselect_b32 s3, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s8, v1
; GFX9-NEXT:    s_cmp_eq_u32 s2, s9
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v6, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s3
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v6, s2
; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX9-NEXT:    v_cndmask_b32_e32 v3, v6, v3, vcc
; GFX9-NEXT:    v_xor_b32_e32 v1, s10, v1
; GFX9-NEXT:    v_xor_b32_e32 v2, s10, v3
; GFX9-NEXT:    v_mov_b32_e32 v3, s10
; GFX9-NEXT:    v_subrev_co_u32_e32 v1, vcc, s10, v1
; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v2, v3, vcc
; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[4:5]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl i64 4096, %y
  %r = srem i64 %x, %shl.y
  store i64 %r, i64 addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
; CHECK-LABEL: @srem_v2i64_pow2k_denom(
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = srem i64 [[TMP1]], 4096
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP5:%.*]] = srem i64 [[TMP4]], 4096
; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v2i64_pow2k_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT:    s_mov_b32 s3, 0xf000
; GFX6-NEXT:    s_mov_b32 s2, -1
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_ashr_i32 s8, s5, 31
; GFX6-NEXT:    s_lshr_b32 s8, s8, 20
; GFX6-NEXT:    s_add_u32 s8, s4, s8
; GFX6-NEXT:    s_addc_u32 s9, s5, 0
; GFX6-NEXT:    s_and_b32 s8, s8, 0xfffff000
; GFX6-NEXT:    s_sub_u32 s4, s4, s8
; GFX6-NEXT:    s_subb_u32 s5, s5, s9
; GFX6-NEXT:    s_ashr_i32 s8, s7, 31
; GFX6-NEXT:    s_lshr_b32 s8, s8, 20
; GFX6-NEXT:    s_add_u32 s8, s6, s8
; GFX6-NEXT:    s_addc_u32 s9, s7, 0
; GFX6-NEXT:    s_and_b32 s8, s8, 0xfffff000
; GFX6-NEXT:    s_sub_u32 s6, s6, s8
; GFX6-NEXT:    s_subb_u32 s7, s7, s9
; GFX6-NEXT:    v_mov_b32_e32 v0, s4
; GFX6-NEXT:    v_mov_b32_e32 v1, s5
; GFX6-NEXT:    v_mov_b32_e32 v2, s6
; GFX6-NEXT:    v_mov_b32_e32 v3, s7
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v2i64_pow2k_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_ashr_i32 s0, s5, 31
; GFX9-NEXT:    s_lshr_b32 s0, s0, 20
; GFX9-NEXT:    s_add_u32 s0, s4, s0
; GFX9-NEXT:    s_addc_u32 s1, s5, 0
; GFX9-NEXT:    s_and_b32 s0, s0, 0xfffff000
; GFX9-NEXT:    s_sub_u32 s0, s4, s0
; GFX9-NEXT:    s_subb_u32 s1, s5, s1
; GFX9-NEXT:    s_ashr_i32 s4, s7, 31
; GFX9-NEXT:    s_lshr_b32 s4, s4, 20
; GFX9-NEXT:    s_add_u32 s4, s6, s4
; GFX9-NEXT:    s_addc_u32 s5, s7, 0
; GFX9-NEXT:    s_and_b32 s4, s4, 0xfffff000
; GFX9-NEXT:    s_sub_u32 s4, s6, s4
; GFX9-NEXT:    s_subb_u32 s5, s7, s5
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    v_mov_b32_e32 v1, s1
; GFX9-NEXT:    v_mov_b32_e32 v2, s4
; GFX9-NEXT:    v_mov_b32_e32 v3, s5
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX9-NEXT:    s_endpgm
  %r = srem <2 x i64> %x, <i64 4096, i64 4096>
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}

define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: @srem_v2i64_pow2_shl_denom(
; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
; CHECK-NEXT:    [[TMP3:%.*]] = srem i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
; CHECK-NEXT:    [[TMP7:%.*]] = srem i64 [[TMP5]], [[TMP6]]
; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
; CHECK-NEXT:    ret void
;
; GFX6-LABEL: srem_v2i64_pow2_shl_denom:
; GFX6:       ; %bb.0:
; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
; GFX6-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    s_mov_b32 s11, 0xf000
; GFX6-NEXT:    s_lshl_b64 s[14:15], s[2:3], s10
; GFX6-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
; GFX6-NEXT:    s_ashr_i32 s8, s3, 31
; GFX6-NEXT:    s_add_u32 s2, s2, s8
; GFX6-NEXT:    s_mov_b32 s9, s8
; GFX6-NEXT:    s_addc_u32 s3, s3, s8
; GFX6-NEXT:    s_xor_b64 s[16:17], s[2:3], s[8:9]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s16
; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s17
; GFX6-NEXT:    s_sub_u32 s2, 0, s16
; GFX6-NEXT:    s_subb_u32 s3, 0, s17
; GFX6-NEXT:    s_ashr_i32 s12, s5, 31
; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
; GFX6-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
; GFX6-NEXT:    s_add_u32 s0, s4, s12
; GFX6-NEXT:    s_mov_b32 s13, s12
; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT:    s_addc_u32 s1, s5, s12
; GFX6-NEXT:    s_xor_b64 s[4:5], s[0:1], s[12:13]
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_lo_u32 v5, s3, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s2, v0
; GFX6-NEXT:    s_mov_b32 s10, -1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
; GFX6-NEXT:    v_mul_hi_u32 v7, v1, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_mul_lo_u32 v3, s2, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
; GFX6-NEXT:    v_mul_hi_u32 v4, s4, v1
; GFX6-NEXT:    v_mul_hi_u32 v5, s5, v1
; GFX6-NEXT:    v_mul_lo_u32 v1, s5, v1
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
; GFX6-NEXT:    v_mul_lo_u32 v1, s16, v1
; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v0
; GFX6-NEXT:    v_mul_lo_u32 v3, s17, v0
; GFX6-NEXT:    v_mul_lo_u32 v0, s16, v0
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s5, v1
; GFX6-NEXT:    v_mov_b32_e32 v3, s17
; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v4, s[0:1], s16, v0
; GFX6-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s17, v5
; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s16, v4
; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s17, v5
; GFX6-NEXT:    v_subrev_i32_e64 v3, s[0:1], s16, v4
; GFX6-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
; GFX6-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
; GFX6-NEXT:    s_ashr_i32 s2, s15, 31
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
; GFX6-NEXT:    s_add_u32 s4, s14, s2
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v5, s5
; GFX6-NEXT:    s_mov_b32 s3, s2
; GFX6-NEXT:    s_addc_u32 s5, s15, s2
; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[2:3]
; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s4
; GFX6-NEXT:    v_cvt_f32_u32_e32 v7, s5
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s17, v1
; GFX6-NEXT:    v_mac_f32_e32 v6, 0x4f800000, v7
; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s16, v0
; GFX6-NEXT:    v_rcp_f32_e32 v6, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s17, v1
; GFX6-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
; GFX6-NEXT:    v_mul_f32_e32 v3, 0x5f7ffffc, v6
; GFX6-NEXT:    v_mul_f32_e32 v4, 0x2f800000, v3
; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
; GFX6-NEXT:    v_mac_f32_e32 v3, 0xcf800000, v4
; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
; GFX6-NEXT:    s_sub_u32 s0, 0, s4
; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX6-NEXT:    v_mul_hi_u32 v2, s0, v3
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v4
; GFX6-NEXT:    s_subb_u32 s1, 0, s5
; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v3
; GFX6-NEXT:    s_ashr_i32 s14, s7, 31
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v3
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, v3, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
; GFX6-NEXT:    v_mul_hi_u32 v8, v3, v2
; GFX6-NEXT:    v_mul_hi_u32 v9, v4, v2
; GFX6-NEXT:    v_mul_lo_u32 v2, v4, v2
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GFX6-NEXT:    v_mul_lo_u32 v8, v4, v5
; GFX6-NEXT:    v_mul_hi_u32 v5, v4, v5
; GFX6-NEXT:    s_mov_b32 s15, s14
; GFX6-NEXT:    v_xor_b32_e32 v0, s12, v0
; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v6, v8
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v9, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, s0, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, s0, v2
; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v2
; GFX6-NEXT:    v_xor_b32_e32 v1, s12, v1
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v2
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
; GFX6-NEXT:    v_mul_lo_u32 v8, v2, v4
; GFX6-NEXT:    v_mul_hi_u32 v9, v2, v5
; GFX6-NEXT:    v_mul_hi_u32 v10, v2, v4
; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
; GFX6-NEXT:    v_mul_lo_u32 v5, v3, v5
; GFX6-NEXT:    v_mul_hi_u32 v6, v3, v4
; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
; GFX6-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
; GFX6-NEXT:    v_mul_lo_u32 v4, v3, v4
; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v9, v7, vcc
; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GFX6-NEXT:    s_add_u32 s0, s6, s14
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
; GFX6-NEXT:    s_addc_u32 s1, s7, s14
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v5, vcc
; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[14:15]
; GFX6-NEXT:    v_mul_lo_u32 v4, s6, v3
; GFX6-NEXT:    v_mul_hi_u32 v5, s6, v2
; GFX6-NEXT:    v_mul_hi_u32 v7, s6, v3
; GFX6-NEXT:    v_mul_hi_u32 v8, s7, v3
; GFX6-NEXT:    v_mul_lo_u32 v3, s7, v3
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
; GFX6-NEXT:    v_mul_lo_u32 v7, s7, v2
; GFX6-NEXT:    v_mul_hi_u32 v2, s7, v2
; GFX6-NEXT:    v_mov_b32_e32 v6, s12
; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v3
; GFX6-NEXT:    v_mul_hi_u32 v4, s4, v2
; GFX6-NEXT:    v_mul_lo_u32 v5, s5, v2
; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v2
; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v6, vcc
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s7, v3
; GFX6-NEXT:    v_mov_b32_e32 v5, s5
; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
; GFX6-NEXT:    v_subrev_i32_e64 v6, s[0:1], s4, v2
; GFX6-NEXT:    v_subbrev_u32_e64 v7, s[2:3], 0, v4, s[0:1]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v7
; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s4, v6
; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[2:3]
; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s5, v7
; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s4, v6
; GFX6-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s[2:3]
; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v8
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v7, v4, s[0:1]
; GFX6-NEXT:    v_mov_b32_e32 v7, s7
; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v7, v3, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s5, v3
; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s5, v3
; GFX6-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT:    v_xor_b32_e32 v2, s14, v2
; GFX6-NEXT:    v_xor_b32_e32 v3, s14, v3
; GFX6-NEXT:    v_mov_b32_e32 v4, s14
; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s14, v2
; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GFX6-NEXT:    s_endpgm
;
; GFX9-LABEL: srem_v2i64_pow2_shl_denom:
; GFX9:       ; %bb.0:
; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x34
; GFX9-NEXT:    s_mov_b64 s[2:3], 0x1000
; GFX9-NEXT:    v_mov_b32_e32 v4, 0
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
; GFX9-NEXT:    s_lshl_b64 s[2:3], s[2:3], s8
; GFX9-NEXT:    s_ashr_i32 s8, s3, 31
; GFX9-NEXT:    s_add_u32 s2, s2, s8
; GFX9-NEXT:    s_mov_b32 s9, s8
; GFX9-NEXT:    s_addc_u32 s3, s3, s8
; GFX9-NEXT:    s_xor_b64 s[12:13], s[2:3], s[8:9]
; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s12
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s13
; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
; GFX9-NEXT:    s_sub_u32 s0, 0, s12
; GFX9-NEXT:    s_subb_u32 s1, 0, s13
; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
; GFX9-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
; GFX9-NEXT:    v_trunc_f32_e32 v1, v1
; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
; GFX9-NEXT:    s_mul_i32 s14, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s16, s0, s3
; GFX9-NEXT:    s_mul_i32 s15, s1, s3
; GFX9-NEXT:    s_add_i32 s14, s16, s14
; GFX9-NEXT:    s_add_i32 s14, s14, s15
; GFX9-NEXT:    s_mul_i32 s17, s0, s3
; GFX9-NEXT:    s_mul_hi_u32 s15, s3, s14
; GFX9-NEXT:    s_mul_i32 s16, s3, s14
; GFX9-NEXT:    s_mul_hi_u32 s3, s3, s17
; GFX9-NEXT:    s_add_u32 s3, s3, s16
; GFX9-NEXT:    s_addc_u32 s15, 0, s15
; GFX9-NEXT:    s_mul_hi_u32 s18, s2, s17
; GFX9-NEXT:    s_mul_i32 s17, s2, s17
; GFX9-NEXT:    s_add_u32 s3, s3, s17
; GFX9-NEXT:    s_mul_hi_u32 s16, s2, s14
; GFX9-NEXT:    s_addc_u32 s3, s15, s18
; GFX9-NEXT:    s_addc_u32 s15, s16, 0
; GFX9-NEXT:    s_mul_i32 s14, s2, s14
; GFX9-NEXT:    s_add_u32 s3, s3, s14
; GFX9-NEXT:    s_addc_u32 s14, 0, s15
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s3, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s2, s14
; GFX9-NEXT:    v_readfirstlane_b32 s14, v0
; GFX9-NEXT:    s_mul_i32 s3, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s15, s0, s14
; GFX9-NEXT:    s_add_i32 s3, s15, s3
; GFX9-NEXT:    s_mul_i32 s1, s1, s14
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s14
; GFX9-NEXT:    s_mul_hi_u32 s15, s2, s0
; GFX9-NEXT:    s_mul_i32 s16, s2, s0
; GFX9-NEXT:    s_mul_i32 s18, s14, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s14, s0
; GFX9-NEXT:    s_mul_hi_u32 s17, s14, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s18
; GFX9-NEXT:    s_addc_u32 s14, 0, s17
; GFX9-NEXT:    s_add_u32 s0, s0, s16
; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
; GFX9-NEXT:    s_addc_u32 s0, s14, s15
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s3, s2, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s3
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s2, s1
; GFX9-NEXT:    s_ashr_i32 s14, s5, 31
; GFX9-NEXT:    s_add_u32 s0, s4, s14
; GFX9-NEXT:    s_mov_b32 s15, s14
; GFX9-NEXT:    s_addc_u32 s1, s5, s14
; GFX9-NEXT:    s_xor_b64 s[4:5], s[0:1], s[14:15]
; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
; GFX9-NEXT:    s_mul_i32 s1, s4, s2
; GFX9-NEXT:    s_mul_hi_u32 s15, s4, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s4, s2
; GFX9-NEXT:    s_add_u32 s1, s15, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s16, s5, s3
; GFX9-NEXT:    s_mul_i32 s3, s5, s3
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_mul_hi_u32 s15, s5, s2
; GFX9-NEXT:    s_addc_u32 s0, s0, s16
; GFX9-NEXT:    s_addc_u32 s1, s15, 0
; GFX9-NEXT:    s_mul_i32 s2, s5, s2
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_i32 s1, s12, s1
; GFX9-NEXT:    s_mul_hi_u32 s2, s12, s0
; GFX9-NEXT:    s_add_i32 s1, s2, s1
; GFX9-NEXT:    s_mul_i32 s2, s13, s0
; GFX9-NEXT:    s_mul_i32 s0, s12, s0
; GFX9-NEXT:    s_add_i32 s15, s1, s2
; GFX9-NEXT:    v_mov_b32_e32 v0, s0
; GFX9-NEXT:    s_sub_i32 s1, s5, s15
; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s4, v0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s4, s1, s13
; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s12, v0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s16, s4, 0
; GFX9-NEXT:    s_cmp_ge_u32 s16, s13
; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v1
; GFX9-NEXT:    s_cmp_eq_u32 s16, s13
; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[2:3]
; GFX9-NEXT:    v_mov_b32_e32 v3, s17
; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[2:3]
; GFX9-NEXT:    s_subb_u32 s2, s4, s13
; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s12, v1
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s0, s2, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s2, s5, s15
; GFX9-NEXT:    s_cmp_ge_u32 s2, s13
; GFX9-NEXT:    v_mov_b32_e32 v5, s16
; GFX9-NEXT:    v_mov_b32_e32 v6, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
; GFX9-NEXT:    s_cselect_b32 s3, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
; GFX9-NEXT:    s_cmp_eq_u32 s2, s13
; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v6, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s3
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
; GFX9-NEXT:    s_ashr_i32 s0, s11, 31
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
; GFX9-NEXT:    v_mov_b32_e32 v6, s2
; GFX9-NEXT:    s_add_u32 s2, s10, s0
; GFX9-NEXT:    s_mov_b32 s1, s0
; GFX9-NEXT:    s_addc_u32 s3, s11, s0
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
; GFX9-NEXT:    s_xor_b64 s[4:5], s[2:3], s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s4
; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s5
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
; GFX9-NEXT:    v_xor_b32_e32 v0, s14, v0
; GFX9-NEXT:    v_xor_b32_e32 v2, s14, v2
; GFX9-NEXT:    v_mac_f32_e32 v1, 0x4f800000, v3
; GFX9-NEXT:    v_rcp_f32_e32 v3, v1
; GFX9-NEXT:    v_mov_b32_e32 v5, s14
; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s14, v0
; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v2, v5, vcc
; GFX9-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v3
; GFX9-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
; GFX9-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v3
; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
; GFX9-NEXT:    s_sub_u32 s0, 0, s4
; GFX9-NEXT:    s_subb_u32 s1, 0, s5
; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
; GFX9-NEXT:    v_readfirstlane_b32 s11, v3
; GFX9-NEXT:    s_mul_hi_u32 s10, s0, s2
; GFX9-NEXT:    s_mul_i32 s12, s0, s11
; GFX9-NEXT:    s_mul_i32 s3, s1, s2
; GFX9-NEXT:    s_add_i32 s10, s10, s12
; GFX9-NEXT:    s_add_i32 s10, s10, s3
; GFX9-NEXT:    s_mul_i32 s13, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s3, s2, s10
; GFX9-NEXT:    s_mul_i32 s12, s2, s10
; GFX9-NEXT:    s_mul_hi_u32 s2, s2, s13
; GFX9-NEXT:    s_add_u32 s2, s2, s12
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s13
; GFX9-NEXT:    s_mul_i32 s13, s11, s13
; GFX9-NEXT:    s_add_u32 s2, s2, s13
; GFX9-NEXT:    s_mul_hi_u32 s12, s11, s10
; GFX9-NEXT:    s_addc_u32 s2, s3, s14
; GFX9-NEXT:    s_addc_u32 s3, s12, 0
; GFX9-NEXT:    s_mul_i32 s10, s11, s10
; GFX9-NEXT:    s_add_u32 s2, s2, s10
; GFX9-NEXT:    s_addc_u32 s3, 0, s3
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s2, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s11, s3
; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
; GFX9-NEXT:    s_mul_i32 s3, s0, s2
; GFX9-NEXT:    s_mul_hi_u32 s11, s0, s10
; GFX9-NEXT:    s_add_i32 s3, s11, s3
; GFX9-NEXT:    s_mul_i32 s1, s1, s10
; GFX9-NEXT:    s_add_i32 s3, s3, s1
; GFX9-NEXT:    s_mul_i32 s0, s0, s10
; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s0
; GFX9-NEXT:    s_mul_i32 s12, s2, s0
; GFX9-NEXT:    s_mul_i32 s14, s10, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s0
; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s14
; GFX9-NEXT:    s_addc_u32 s10, 0, s13
; GFX9-NEXT:    s_add_u32 s0, s0, s12
; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
; GFX9-NEXT:    s_addc_u32 s0, s10, s11
; GFX9-NEXT:    s_addc_u32 s1, s1, 0
; GFX9-NEXT:    s_mul_i32 s3, s2, s3
; GFX9-NEXT:    s_add_u32 s0, s0, s3
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_addc_u32 s2, s2, s1
; GFX9-NEXT:    s_ashr_i32 s10, s7, 31
; GFX9-NEXT:    s_add_u32 s0, s6, s10
; GFX9-NEXT:    s_mov_b32 s11, s10
; GFX9-NEXT:    s_addc_u32 s1, s7, s10
; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
; GFX9-NEXT:    v_readfirstlane_b32 s3, v2
; GFX9-NEXT:    s_mul_i32 s1, s6, s2
; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s3
; GFX9-NEXT:    s_mul_hi_u32 s0, s6, s2
; GFX9-NEXT:    s_add_u32 s1, s11, s1
; GFX9-NEXT:    s_addc_u32 s0, 0, s0
; GFX9-NEXT:    s_mul_hi_u32 s12, s7, s3
; GFX9-NEXT:    s_mul_i32 s3, s7, s3
; GFX9-NEXT:    s_add_u32 s1, s1, s3
; GFX9-NEXT:    s_mul_hi_u32 s11, s7, s2
; GFX9-NEXT:    s_addc_u32 s0, s0, s12
; GFX9-NEXT:    s_addc_u32 s1, s11, 0
; GFX9-NEXT:    s_mul_i32 s2, s7, s2
; GFX9-NEXT:    s_add_u32 s0, s0, s2
; GFX9-NEXT:    s_addc_u32 s1, 0, s1
; GFX9-NEXT:    s_mul_i32 s1, s4, s1
; GFX9-NEXT:    s_mul_hi_u32 s2, s4, s0
; GFX9-NEXT:    s_add_i32 s1, s2, s1
; GFX9-NEXT:    s_mul_i32 s2, s5, s0
; GFX9-NEXT:    s_mul_i32 s0, s4, s0
; GFX9-NEXT:    s_add_i32 s11, s1, s2
; GFX9-NEXT:    v_mov_b32_e32 v2, s0
; GFX9-NEXT:    s_sub_i32 s1, s7, s11
; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, s6, v2
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s6, s1, s5
; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s4, v2
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s12, s6, 0
; GFX9-NEXT:    s_cmp_ge_u32 s12, s5
; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s4, v3
; GFX9-NEXT:    s_cmp_eq_u32 s12, s5
; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[2:3]
; GFX9-NEXT:    v_mov_b32_e32 v6, s13
; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s[2:3]
; GFX9-NEXT:    s_subb_u32 s2, s6, s5
; GFX9-NEXT:    v_subrev_co_u32_e64 v6, s[0:1], s4, v3
; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT:    s_subb_u32 s0, s2, 0
; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
; GFX9-NEXT:    s_subb_u32 s2, s7, s11
; GFX9-NEXT:    s_cmp_ge_u32 s2, s5
; GFX9-NEXT:    v_mov_b32_e32 v7, s12
; GFX9-NEXT:    v_mov_b32_e32 v8, s0
; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
; GFX9-NEXT:    s_cselect_b32 s3, -1, 0
; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
; GFX9-NEXT:    s_cmp_eq_u32 s2, s5
; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v8, s[0:1]
; GFX9-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
; GFX9-NEXT:    v_mov_b32_e32 v8, s3
; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
; GFX9-NEXT:    v_cndmask_b32_e32 v7, v8, v7, vcc
; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
; GFX9-NEXT:    v_mov_b32_e32 v8, s2
; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
; GFX9-NEXT:    v_cndmask_b32_e32 v5, v8, v5, vcc
; GFX9-NEXT:    v_xor_b32_e32 v2, s10, v2
; GFX9-NEXT:    v_xor_b32_e32 v3, s10, v5
; GFX9-NEXT:    v_mov_b32_e32 v5, s10
; GFX9-NEXT:    v_subrev_co_u32_e32 v2, vcc, s10, v2
; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[8:9]
; GFX9-NEXT:    s_endpgm
  %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
  %r = srem <2 x i64> %x, %shl.y
  store <2 x i64> %r, <2 x i64> addrspace(1)* %out
  ret void
}