; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s ; i32 saturate define <2 x i32> @stest_f64i32(<2 x double> %x) { ; CHECK-LABEL: stest_f64i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295] ; CHECK-NEXT: pcmpgtd %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; CHECK-NEXT: por %xmm1, %xmm3 ; CHECK-NEXT: pxor %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm3 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: por %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i64> %0 = icmp slt <2 x i64> %conv, <i64 2147483647, i64 2147483647> %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647> %1 = icmp sgt <2 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648> %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648> %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> ret <2 x i32> %conv6 } define <2 x i32> @utest_f64i32(<2 x double> %x) { ; CHECK-LABEL: utest_f64i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: subsd %xmm2, %xmm1 ; CHECK-NEXT: cvttsd2si %xmm1, %rax ; CHECK-NEXT: cvttsd2si %xmm0, %rcx ; CHECK-NEXT: movq %rcx, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rax, %rdx ; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rdx, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: subsd %xmm2, %xmm0 ; CHECK-NEXT: cvttsd2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: por %xmm1, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i64> %0 = icmp ult <2 x i64> %conv, <i64 4294967295, i64 4294967295> %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295> %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32> ret <2 x i32> %conv6 } define <2 x i32> @ustest_f64i32(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647] ; CHECK-NEXT: pcmpgtd %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; CHECK-NEXT: por %xmm1, %xmm3 ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i64> %0 = icmp slt <2 x i64> %conv, <i64 4294967295, i64 4294967295> %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295> %1 = icmp sgt <2 x i64> %spec.store.select, zeroinitializer %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> zeroinitializer %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> ret <2 x i32> %conv6 } define <4 x i32> @stest_f32i32(<4 x float> %x) { ; CHECK-LABEL: stest_f32i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm2 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; CHECK-NEXT: cvttss2si %xmm2, %rax ; CHECK-NEXT: movq %rax, %xmm2 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm4 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm4, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm9, %xmm9 ; CHECK-NEXT: pcmpeqd %xmm9, %xmm5 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295] ; CHECK-NEXT: movdqa %xmm3, %xmm7 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3] ; CHECK-NEXT: por %xmm6, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm4 ; CHECK-NEXT: pandn %xmm8, %xmm1 ; CHECK-NEXT: por %xmm4, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pxor %xmm0, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm9, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm4, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm4, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pandn %xmm8, %xmm3 ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067968,18446744071562067968] ; CHECK-NEXT: movdqa %xmm3, %xmm4 ; CHECK-NEXT: pxor %xmm0, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm6, %xmm6 ; CHECK-NEXT: pcmpeqd %xmm6, %xmm5 ; CHECK-NEXT: movdqa {{.*#+}} xmm7 = [18446744069414584320,18446744069414584320] ; CHECK-NEXT: pcmpgtd %xmm7, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm4 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pandn %xmm8, %xmm4 ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm6, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm7, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm8, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648> %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648> %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @utest_f32i32(<4 x float> %x) { ; CHECK-LABEL: utest_f32i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: subss %xmm2, %xmm1 ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rcx, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rax, %rdx ; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rdx, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1] ; CHECK-NEXT: cvttss2si %xmm3, %rax ; CHECK-NEXT: subss %xmm2, %xmm3 ; CHECK-NEXT: cvttss2si %xmm3, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm3 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; CHECK-NEXT: movaps %xmm0, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm3, %rax ; CHECK-NEXT: subss %xmm2, %xmm3 ; CHECK-NEXT: cvttss2si %xmm3, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm3 ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss %xmm2, %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295] ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: movdqa %xmm0, %xmm4 ; CHECK-NEXT: pxor %xmm3, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm3, %xmm5 ; CHECK-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: movdqa %xmm6, %xmm7 ; CHECK-NEXT: pcmpgtd %xmm4, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3] ; CHECK-NEXT: por %xmm4, %xmm5 ; CHECK-NEXT: pand %xmm5, %xmm0 ; CHECK-NEXT: pandn %xmm2, %xmm5 ; CHECK-NEXT: por %xmm0, %xmm5 ; CHECK-NEXT: movdqa %xmm1, %xmm0 ; CHECK-NEXT: pxor %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm3, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2] ; CHECK-NEXT: retq entry: %conv = fptoui <4 x float> %x to <4 x i64> %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @ustest_f32i32(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm2 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; CHECK-NEXT: cvttss2si %xmm2, %rax ; CHECK-NEXT: movq %rax, %xmm2 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm4 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm4, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm9, %xmm9 ; CHECK-NEXT: pcmpeqd %xmm9, %xmm5 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647] ; CHECK-NEXT: movdqa %xmm3, %xmm7 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3] ; CHECK-NEXT: por %xmm6, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm4 ; CHECK-NEXT: pandn %xmm8, %xmm1 ; CHECK-NEXT: por %xmm4, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pxor %xmm0, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm9, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm4, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm4, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pandn %xmm8, %xmm3 ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: movdqa %xmm3, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm4 ; CHECK-NEXT: pand %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: pand %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @stest_f16i32(<4 x half> %x) { ; CHECK-LABEL: stest_f16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: movdqa %xmm3, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm4, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [4294967295,4294967295] ; CHECK-NEXT: movdqa %xmm5, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm7, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: pandn %xmm8, %xmm1 ; CHECK-NEXT: por %xmm2, %xmm1 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pxor %xmm0, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm4, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm6, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pandn %xmm8, %xmm4 ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067968,18446744071562067968] ; CHECK-NEXT: movdqa %xmm4, %xmm3 ; CHECK-NEXT: pxor %xmm0, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm6, %xmm6 ; CHECK-NEXT: pcmpeqd %xmm6, %xmm5 ; CHECK-NEXT: movdqa {{.*#+}} xmm7 = [18446744069414584320,18446744069414584320] ; CHECK-NEXT: pcmpgtd %xmm7, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm4 ; CHECK-NEXT: pandn %xmm8, %xmm3 ; CHECK-NEXT: por %xmm4, %xmm3 ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm6, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm7, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm4, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm8, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x half> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648> %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648> %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @utesth_f16i32(<4 x half> %x) { ; CHECK-LABEL: utesth_f16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: psrlq $48, %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [4294967295,4294967295] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: movdqa %xmm0, %xmm3 ; CHECK-NEXT: pxor %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: movdqa %xmm5, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: pand %xmm4, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm4 ; CHECK-NEXT: por %xmm0, %xmm4 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm6, %xmm0 ; CHECK-NEXT: pxor %xmm2, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: movdqa %xmm6, %xmm2 ; CHECK-NEXT: pand %xmm0, %xmm2 ; CHECK-NEXT: pandn %xmm1, %xmm0 ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <4 x half> %x to <4 x i64> %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-LABEL: ustest_f16i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: movdqa %xmm3, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm4, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647] ; CHECK-NEXT: movdqa %xmm5, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm7, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: pandn %xmm8, %xmm1 ; CHECK-NEXT: por %xmm2, %xmm1 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pxor %xmm0, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm4, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm6, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pandn %xmm8, %xmm4 ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: pand %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x half> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295> %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } ; i16 saturate define <2 x i16> @stest_f64i16(<2 x double> %x) { ; CHECK-LABEL: stest_f64i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <32767,32767,u,u> ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4294934528,4294934528,u,u> ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: pandn %xmm0, %xmm1 ; CHECK-NEXT: por %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i32> %0 = icmp slt <2 x i32> %conv, <i32 32767, i32 32767> %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 32767, i32 32767> %1 = icmp sgt <2 x i32> %spec.store.select, <i32 -32768, i32 -32768> %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768> %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> ret <2 x i16> %conv6 } define <2 x i16> @utest_f64i16(<2 x double> %x) { ; CHECK-LABEL: utest_f64i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 ; CHECK-NEXT: movapd %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: addpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: andpd %xmm2, %xmm0 ; CHECK-NEXT: orpd %xmm1, %xmm0 ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: xorpd %xmm0, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <2147549183,2147549183,u,u> ; CHECK-NEXT: pcmpgtd %xmm1, %xmm2 ; CHECK-NEXT: andpd %xmm2, %xmm0 ; CHECK-NEXT: andnpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: orpd %xmm0, %xmm2 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i32> %0 = icmp ult <2 x i32> %conv, <i32 65535, i32 65535> %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535> %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16> ret <2 x i16> %conv6 } define <2 x i16> @ustest_f64i16(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <65535,65535,u,u> ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i32> %0 = icmp slt <2 x i32> %conv, <i32 65535, i32 65535> %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535> %1 = icmp sgt <2 x i32> %spec.store.select, zeroinitializer %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> zeroinitializer %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> ret <2 x i16> %conv6 } define <4 x i16> @stest_f32i16(<4 x float> %x) { ; CHECK-LABEL: stest_f32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: packssdw %xmm0, %xmm0 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i32> %0 = icmp slt <4 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767> %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767> %1 = icmp sgt <4 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768> %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768> %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> ret <4 x i16> %conv6 } define <4 x i16> @utest_f32i16(<4 x float> %x) { ; CHECK-LABEL: utest_f32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [2147549183,2147549183,2147549183,2147549183] ; CHECK-NEXT: pcmpgtd %xmm1, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,2,3,4,5,6,7] ; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptoui <4 x float> %x to <4 x i32> %0 = icmp ult <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535> %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535> %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16> ret <4 x i16> %conv6 } define <4 x i16> @ustest_f32i16(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i32> %0 = icmp slt <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535> %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535> %1 = icmp sgt <4 x i32> %spec.store.select, zeroinitializer %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> zeroinitializer %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> ret <4 x i16> %conv6 } define <8 x i16> @stest_f16i16(<8 x half> %x) { ; CHECK-LABEL: stest_f16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm1, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: packssdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <8 x half> %x to <8 x i32> %0 = icmp slt <8 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> %1 = icmp sgt <8 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768> %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768> %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> ret <8 x i16> %conv6 } define <8 x i16> @utesth_f16i16(<8 x half> %x) { ; CHECK-LABEL: utesth_f16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm2, %xmm0 ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrad $31, %xmm1 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: cvttps2dq %xmm2, %xmm2 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload ; CHECK-NEXT: # xmm2 = xmm2[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pxor %xmm1, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183] ; CHECK-NEXT: movdqa %xmm4, %xmm0 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm3 ; CHECK-NEXT: pxor %xmm3, %xmm0 ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; CHECK-NEXT: pxor %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm4 ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm3, %xmm4 ; CHECK-NEXT: por %xmm2, %xmm4 ; CHECK-NEXT: pslld $16, %xmm4 ; CHECK-NEXT: psrad $16, %xmm4 ; CHECK-NEXT: pslld $16, %xmm0 ; CHECK-NEXT: psrad $16, %xmm0 ; CHECK-NEXT: packssdw %xmm4, %xmm0 ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <8 x half> %x to <8 x i32> %0 = icmp ult <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16> ret <8 x i16> %conv6 } define <8 x i16> @ustest_f16i16(<8 x half> %x) { ; CHECK-LABEL: ustest_f16i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm1, %xmm0 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm1, %xmm3 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm3 ; CHECK-NEXT: por %xmm0, %xmm3 ; CHECK-NEXT: pxor %xmm1, %xmm1 ; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm0 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm3 ; CHECK-NEXT: pand %xmm2, %xmm3 ; CHECK-NEXT: pslld $16, %xmm3 ; CHECK-NEXT: psrad $16, %xmm3 ; CHECK-NEXT: pslld $16, %xmm0 ; CHECK-NEXT: psrad $16, %xmm0 ; CHECK-NEXT: packssdw %xmm3, %xmm0 ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <8 x half> %x to <8 x i32> %0 = icmp slt <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535> %1 = icmp sgt <8 x i32> %spec.store.select, zeroinitializer %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> zeroinitializer %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> ret <8 x i16> %conv6 } ; i64 saturate define <2 x i64> @stest_f64i64(<2 x double> %x) { ; CHECK-LABEL: stest_f64i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: movq %rax, %r14 ; CHECK-NEXT: movq %rdx, %rbx ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rsi, %rax ; CHECK-NEXT: movq %rdx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rdx ; CHECK-NEXT: cmovgeq %rsi, %rax ; CHECK-NEXT: cmpq %rsi, %r14 ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovlq %rbx, %rcx ; CHECK-NEXT: cmovlq %r14, %rsi ; CHECK-NEXT: movabsq $-9223372036854775808, %r8 # imm = 0x8000000000000000 ; CHECK-NEXT: cmpq %rsi, %r8 ; CHECK-NEXT: movq $-1, %rbx ; CHECK-NEXT: movq $-1, %rdi ; CHECK-NEXT: sbbq %rcx, %rdi ; CHECK-NEXT: cmovgeq %r8, %rsi ; CHECK-NEXT: cmpq %rax, %r8 ; CHECK-NEXT: sbbq %rdx, %rbx ; CHECK-NEXT: cmovgeq %r8, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rsi, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807> %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808> %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808> %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utest_f64i64(<2 x double> %x) { ; CHECK-LABEL: utest_f64i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunsdfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixunsdfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f64i64(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: cmovgq %rsi, %rdx ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: negq %rdi ; CHECK-NEXT: movl $0, %edi ; CHECK-NEXT: sbbq %rsi, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rbx ; CHECK-NEXT: movq %rax, %rsi ; CHECK-NEXT: negq %rsi ; CHECK-NEXT: movl $0, %esi ; CHECK-NEXT: sbbq %rdx, %rsi ; CHECK-NEXT: cmovgeq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @stest_f32i64(<2 x float> %x) { ; CHECK-LABEL: stest_f32i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: movq %rax, %r14 ; CHECK-NEXT: movq %rdx, %rbx ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rsi, %rax ; CHECK-NEXT: movq %rdx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rdx ; CHECK-NEXT: cmovgeq %rsi, %rax ; CHECK-NEXT: cmpq %rsi, %r14 ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovlq %rbx, %rcx ; CHECK-NEXT: cmovlq %r14, %rsi ; CHECK-NEXT: movabsq $-9223372036854775808, %r8 # imm = 0x8000000000000000 ; CHECK-NEXT: cmpq %rsi, %r8 ; CHECK-NEXT: movq $-1, %rbx ; CHECK-NEXT: movq $-1, %rdi ; CHECK-NEXT: sbbq %rcx, %rdi ; CHECK-NEXT: cmovgeq %r8, %rsi ; CHECK-NEXT: cmpq %rax, %r8 ; CHECK-NEXT: sbbq %rdx, %rbx ; CHECK-NEXT: cmovgeq %r8, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rsi, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x float> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807> %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808> %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808> %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utest_f32i64(<2 x float> %x) { ; CHECK-LABEL: utest_f32i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunssfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixunssfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x float> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f32i64(<2 x float> %x) { ; CHECK-LABEL: ustest_f32i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: cmovgq %rsi, %rdx ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: negq %rdi ; CHECK-NEXT: movl $0, %edi ; CHECK-NEXT: sbbq %rsi, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rbx ; CHECK-NEXT: movq %rax, %rsi ; CHECK-NEXT: negq %rsi ; CHECK-NEXT: movl $0, %esi ; CHECK-NEXT: sbbq %rdx, %rsi ; CHECK-NEXT: cmovgeq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x float> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @stest_f16i64(<2 x half> %x) { ; CHECK-LABEL: stest_f16i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: movq %rax, %r14 ; CHECK-NEXT: movq %rdx, %rbx ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rsi, %rax ; CHECK-NEXT: movq %rdx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rdx ; CHECK-NEXT: cmovgeq %rsi, %rax ; CHECK-NEXT: cmpq %rsi, %r14 ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: sbbq $0, %rdi ; CHECK-NEXT: cmovlq %rbx, %rcx ; CHECK-NEXT: cmovlq %r14, %rsi ; CHECK-NEXT: movabsq $-9223372036854775808, %r8 # imm = 0x8000000000000000 ; CHECK-NEXT: cmpq %rsi, %r8 ; CHECK-NEXT: movq $-1, %rbx ; CHECK-NEXT: movq $-1, %rdi ; CHECK-NEXT: sbbq %rcx, %rdi ; CHECK-NEXT: cmovgeq %r8, %rsi ; CHECK-NEXT: cmpq %rax, %r8 ; CHECK-NEXT: sbbq %rdx, %rbx ; CHECK-NEXT: cmovgeq %r8, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rsi, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x half> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807> %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808> %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808> %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utesth_f16i64(<2 x half> %x) { ; CHECK-LABEL: utesth_f16i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunshfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixunshfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x half> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f16i64(<2 x half> %x) { ; CHECK-LABEL: ustest_f16i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: cmovgq %rsi, %rdx ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: negq %rdi ; CHECK-NEXT: movl $0, %edi ; CHECK-NEXT: sbbq %rsi, %rdi ; CHECK-NEXT: cmovgeq %rcx, %rbx ; CHECK-NEXT: movq %rax, %rsi ; CHECK-NEXT: negq %rsi ; CHECK-NEXT: movl $0, %esi ; CHECK-NEXT: sbbq %rdx, %rsi ; CHECK-NEXT: cmovgeq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x half> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } ; i32 saturate define <2 x i32> @stest_f64i32_mm(<2 x double> %x) { ; CHECK-LABEL: stest_f64i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295] ; CHECK-NEXT: pcmpgtd %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; CHECK-NEXT: por %xmm1, %xmm3 ; CHECK-NEXT: pxor %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm3 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: por %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i64> %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647>) %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648>) %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> ret <2 x i32> %conv6 } define <2 x i32> @utest_f64i32_mm(<2 x double> %x) { ; CHECK-LABEL: utest_f64i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: subsd %xmm2, %xmm1 ; CHECK-NEXT: cvttsd2si %xmm1, %rax ; CHECK-NEXT: cvttsd2si %xmm0, %rcx ; CHECK-NEXT: movq %rcx, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rax, %rdx ; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rdx, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: subsd %xmm2, %xmm0 ; CHECK-NEXT: cvttsd2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: por %xmm1, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i64> %spec.store.select = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>) %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32> ret <2 x i32> %conv6 } define <2 x i32> @ustest_f64i32_mm(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttsd2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pxor %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647] ; CHECK-NEXT: pcmpgtd %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; CHECK-NEXT: por %xmm1, %xmm3 ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm3, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i64> %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>) %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> zeroinitializer) %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32> ret <2 x i32> %conv6 } define <4 x i32> @stest_f32i32_mm(<4 x float> %x) { ; CHECK-LABEL: stest_f32i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm2 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; CHECK-NEXT: cvttss2si %xmm2, %rax ; CHECK-NEXT: movq %rax, %xmm2 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm8, %xmm8 ; CHECK-NEXT: pcmpeqd %xmm8, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm6 = [4294967295,4294967295] ; CHECK-NEXT: movdqa %xmm6, %xmm7 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3] ; CHECK-NEXT: por %xmm5, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647] ; CHECK-NEXT: pand %xmm1, %xmm3 ; CHECK-NEXT: pandn %xmm4, %xmm1 ; CHECK-NEXT: por %xmm3, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pxor %xmm0, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm8, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm5 ; CHECK-NEXT: pand %xmm5, %xmm2 ; CHECK-NEXT: pandn %xmm4, %xmm5 ; CHECK-NEXT: por %xmm2, %xmm5 ; CHECK-NEXT: movdqa %xmm5, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm4, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm4, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm6 = [18446744069414584320,18446744069414584320] ; CHECK-NEXT: pcmpgtd %xmm6, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm7, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968] ; CHECK-NEXT: pand %xmm2, %xmm5 ; CHECK-NEXT: pandn %xmm3, %xmm2 ; CHECK-NEXT: por %xmm5, %xmm2 ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm4, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm6, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm4, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm3, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>) %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>) %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @utest_f32i32_mm(<4 x float> %x) { ; CHECK-LABEL: utest_f32i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: subss %xmm2, %xmm1 ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rcx, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rax, %rdx ; CHECK-NEXT: orq %rcx, %rdx ; CHECK-NEXT: movq %rdx, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1] ; CHECK-NEXT: cvttss2si %xmm3, %rax ; CHECK-NEXT: subss %xmm2, %xmm3 ; CHECK-NEXT: cvttss2si %xmm3, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm3 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; CHECK-NEXT: movaps %xmm0, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm3, %rax ; CHECK-NEXT: subss %xmm2, %xmm3 ; CHECK-NEXT: cvttss2si %xmm3, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm3 ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss %xmm2, %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: movdqa %xmm0, %xmm3 ; CHECK-NEXT: pxor %xmm2, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: movdqa %xmm5, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295] ; CHECK-NEXT: pand %xmm4, %xmm0 ; CHECK-NEXT: pandn %xmm3, %xmm4 ; CHECK-NEXT: por %xmm0, %xmm4 ; CHECK-NEXT: movdqa %xmm1, %xmm0 ; CHECK-NEXT: pxor %xmm2, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm2, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm6, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm3, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2] ; CHECK-NEXT: retq entry: %conv = fptoui <4 x float> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>) %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3] ; CHECK-NEXT: cvttss2si %xmm1, %rax ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: movaps %xmm0, %xmm2 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; CHECK-NEXT: cvttss2si %xmm2, %rax ; CHECK-NEXT: movq %rax, %xmm2 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm3 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm3, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm8, %xmm8 ; CHECK-NEXT: pcmpeqd %xmm8, %xmm4 ; CHECK-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647] ; CHECK-NEXT: movdqa %xmm6, %xmm7 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3] ; CHECK-NEXT: por %xmm5, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [4294967295,4294967295] ; CHECK-NEXT: pand %xmm1, %xmm3 ; CHECK-NEXT: pandn %xmm4, %xmm1 ; CHECK-NEXT: por %xmm3, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pxor %xmm0, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm8, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm5 ; CHECK-NEXT: pand %xmm5, %xmm2 ; CHECK-NEXT: pandn %xmm4, %xmm5 ; CHECK-NEXT: por %xmm2, %xmm5 ; CHECK-NEXT: movdqa %xmm5, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm5, %xmm3 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: pand %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>) %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @stest_f16i32_mm(<4 x half> %x) { ; CHECK-LABEL: stest_f16i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm7 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm3, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [4294967295,4294967295] ; CHECK-NEXT: movdqa %xmm4, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm6, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647] ; CHECK-NEXT: pand %xmm1, %xmm7 ; CHECK-NEXT: pandn %xmm2, %xmm1 ; CHECK-NEXT: por %xmm7, %xmm1 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm7, %xmm5 ; CHECK-NEXT: pxor %xmm0, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm3, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2] ; CHECK-NEXT: pand %xmm6, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pandn %xmm2, %xmm4 ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm5, %xmm5 ; CHECK-NEXT: pcmpeqd %xmm5, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm6 = [18446744069414584320,18446744069414584320] ; CHECK-NEXT: pcmpgtd %xmm6, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm7 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: por %xmm7, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968] ; CHECK-NEXT: pand %xmm2, %xmm4 ; CHECK-NEXT: pandn %xmm3, %xmm2 ; CHECK-NEXT: por %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm5, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm6, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2] ; CHECK-NEXT: pand %xmm4, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; CHECK-NEXT: por %xmm5, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm3, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>) %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>) %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) { ; CHECK-LABEL: utesth_f16i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: psrlq $48, %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttss2si %xmm0, %rcx ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: sarq $63, %rdx ; CHECK-NEXT: andq %rcx, %rdx ; CHECK-NEXT: orq %rax, %rdx ; CHECK-NEXT: movq %rdx, %xmm0 ; CHECK-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259456,9223372039002259456] ; CHECK-NEXT: movdqa %xmm0, %xmm2 ; CHECK-NEXT: pxor %xmm1, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm1, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259455,9223372039002259455] ; CHECK-NEXT: movdqa %xmm4, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm2, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295] ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: pandn %xmm2, %xmm3 ; CHECK-NEXT: por %xmm0, %xmm3 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm6, %xmm0 ; CHECK-NEXT: pxor %xmm1, %xmm0 ; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm1, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,0,2,2] ; CHECK-NEXT: pand %xmm5, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa %xmm6, %xmm1 ; CHECK-NEXT: pand %xmm0, %xmm1 ; CHECK-NEXT: pandn %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>) %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32> ret <4 x i32> %conv6 } define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) { ; CHECK-LABEL: ustest_f16i32_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1] ; CHECK-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: cvttss2si %xmm0, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: movdqa %xmm2, %xmm7 ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] ; CHECK-NEXT: pxor %xmm3, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647] ; CHECK-NEXT: movdqa %xmm4, %xmm5 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2] ; CHECK-NEXT: pand %xmm2, %xmm6 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3] ; CHECK-NEXT: por %xmm6, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295] ; CHECK-NEXT: pand %xmm1, %xmm7 ; CHECK-NEXT: pandn %xmm2, %xmm1 ; CHECK-NEXT: por %xmm7, %xmm1 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; CHECK-NEXT: movdqa %xmm7, %xmm5 ; CHECK-NEXT: pxor %xmm0, %xmm5 ; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3] ; CHECK-NEXT: pcmpeqd %xmm3, %xmm6 ; CHECK-NEXT: pcmpgtd %xmm5, %xmm4 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2] ; CHECK-NEXT: pand %xmm6, %xmm3 ; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm7, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: pandn %xmm2, %xmm4 ; CHECK-NEXT: por %xmm3, %xmm4 ; CHECK-NEXT: movdqa %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm3, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm3 ; CHECK-NEXT: pand %xmm4, %xmm3 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm4 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm4 ; CHECK-NEXT: pcmpeqd %xmm0, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: pand %xmm1, %xmm0 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2] ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>) %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> ret <4 x i32> %conv6 } ; i16 saturate define <2 x i16> @stest_f64i16_mm(<2 x double> %x) { ; CHECK-LABEL: stest_f64i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <32767,32767,u,u> ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4294934528,4294934528,u,u> ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: pandn %xmm0, %xmm1 ; CHECK-NEXT: por %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i32> %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 32767, i32 32767>) %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768>) %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> ret <2 x i16> %conv6 } define <2 x i16> @utest_f64i16_mm(<2 x double> %x) { ; CHECK-LABEL: utest_f64i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 ; CHECK-NEXT: movapd %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: addpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: andpd %xmm2, %xmm0 ; CHECK-NEXT: orpd %xmm1, %xmm0 ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: xorpd %xmm0, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <2147549183,2147549183,u,u> ; CHECK-NEXT: pcmpgtd %xmm1, %xmm2 ; CHECK-NEXT: andpd %xmm2, %xmm0 ; CHECK-NEXT: andnpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: orpd %xmm0, %xmm2 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i32> %spec.store.select = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>) %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16> ret <2 x i16> %conv6 } define <2 x i16> @ustest_f64i16_mm(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttpd2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <65535,65535,u,u> ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i32> %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>) %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> zeroinitializer) %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16> ret <2 x i16> %conv6 } define <4 x i16> @stest_f32i16_mm(<4 x float> %x) { ; CHECK-LABEL: stest_f32i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: packssdw %xmm0, %xmm0 ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i32> %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>) %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>) %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> ret <4 x i16> %conv6 } define <4 x i16> @utest_f32i16_mm(<4 x float> %x) { ; CHECK-LABEL: utest_f32i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: pxor %xmm0, %xmm1 ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [2147549183,2147549183,2147549183,2147549183] ; CHECK-NEXT: pcmpgtd %xmm1, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,2,2,3,4,5,6,7] ; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptoui <4 x float> %x to <4 x i32> %spec.store.select = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>) %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16> ret <4 x i16> %conv6 } define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: pxor %xmm0, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm1 ; CHECK-NEXT: pand %xmm2, %xmm1 ; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7] ; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; CHECK-NEXT: retq entry: %conv = fptosi <4 x float> %x to <4 x i32> %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>) %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer) %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16> ret <4 x i16> %conv6 } define <8 x i16> @stest_f16i16_mm(<8 x half> %x) { ; CHECK-LABEL: stest_f16i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm1, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: packssdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <8 x half> %x to <8 x i32> %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>) %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>) %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> ret <8 x i16> %conv6 } define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) { ; CHECK-LABEL: utesth_f16i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm1 ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: psrad $31, %xmm2 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm2, %xmm0 ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrad $31, %xmm1 ; CHECK-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; CHECK-NEXT: cvttps2dq %xmm2, %xmm2 ; CHECK-NEXT: pand %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload ; CHECK-NEXT: # xmm2 = xmm2[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648] ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pxor %xmm1, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183] ; CHECK-NEXT: movdqa %xmm4, %xmm0 ; CHECK-NEXT: pcmpgtd %xmm3, %xmm0 ; CHECK-NEXT: pand %xmm0, %xmm2 ; CHECK-NEXT: pcmpeqd %xmm3, %xmm3 ; CHECK-NEXT: pxor %xmm3, %xmm0 ; CHECK-NEXT: por %xmm2, %xmm0 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; CHECK-NEXT: pxor %xmm2, %xmm1 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm4 ; CHECK-NEXT: pand %xmm4, %xmm2 ; CHECK-NEXT: pxor %xmm3, %xmm4 ; CHECK-NEXT: por %xmm2, %xmm4 ; CHECK-NEXT: pslld $16, %xmm4 ; CHECK-NEXT: psrad $16, %xmm4 ; CHECK-NEXT: pslld $16, %xmm0 ; CHECK-NEXT: psrad $16, %xmm0 ; CHECK-NEXT: packssdw %xmm4, %xmm0 ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <8 x half> %x to <8 x i32> %spec.store.select = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>) %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16> ret <8 x i16> %conv6 } define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) { ; CHECK-LABEL: ustest_f16i16_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrlq $48, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: cvttps2dq %xmm1, %xmm0 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __extendhfsf2@PLT ; CHECK-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; CHECK-NEXT: cvttps2dq %xmm0, %xmm0 ; CHECK-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; CHECK-NEXT: movdqa %xmm1, %xmm2 ; CHECK-NEXT: pcmpgtd %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm2, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm2 ; CHECK-NEXT: por %xmm0, %xmm2 ; CHECK-NEXT: movdqa %xmm1, %xmm3 ; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; CHECK-NEXT: pcmpgtd %xmm0, %xmm3 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: pandn %xmm1, %xmm3 ; CHECK-NEXT: por %xmm0, %xmm3 ; CHECK-NEXT: pxor %xmm1, %xmm1 ; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm0 ; CHECK-NEXT: pand %xmm3, %xmm0 ; CHECK-NEXT: movdqa %xmm2, %xmm3 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm3 ; CHECK-NEXT: pand %xmm2, %xmm3 ; CHECK-NEXT: pslld $16, %xmm3 ; CHECK-NEXT: psrad $16, %xmm3 ; CHECK-NEXT: pslld $16, %xmm0 ; CHECK-NEXT: psrad $16, %xmm0 ; CHECK-NEXT: packssdw %xmm3, %xmm0 ; CHECK-NEXT: addq $72, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <8 x half> %x to <8 x i32> %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>) %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> zeroinitializer) %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16> ret <8 x i16> %conv6 } ; i64 saturate define <2 x i64> @stest_f64i64_mm(<2 x double> %x) { ; CHECK-LABEL: stest_f64i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rcx, %rax ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rax, %rsi ; CHECK-NEXT: xorl %edi, %edi ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovnsq %rcx, %rax ; CHECK-NEXT: cmoveq %rsi, %rax ; CHECK-NEXT: cmovnsq %rdi, %rdx ; CHECK-NEXT: cmpq %rcx, %rbx ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rbx, %rsi ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovsq %rbx, %rcx ; CHECK-NEXT: cmoveq %rsi, %rcx ; CHECK-NEXT: cmovsq %r14, %rdi ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: movabsq $-9223372036854775808, %rbx # imm = 0x8000000000000000 ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rcx, %rsi ; CHECK-NEXT: cmpq %rbx, %rcx ; CHECK-NEXT: cmovbeq %rbx, %rcx ; CHECK-NEXT: cmpq $-1, %rdi ; CHECK-NEXT: cmovneq %rsi, %rcx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rax, %rsi ; CHECK-NEXT: cmpq %rbx, %rax ; CHECK-NEXT: cmovbeq %rbx, %rax ; CHECK-NEXT: cmpq $-1, %rdx ; CHECK-NEXT: cmovneq %rsi, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rcx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utest_f64i64_mm(<2 x double> %x) { ; CHECK-LABEL: utest_f64i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunsdfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixunsdfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixdfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: movl $1, %edi ; CHECK-NEXT: cmovleq %rdx, %rdi ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: testq %rsi, %rsi ; CHECK-NEXT: cmovsq %rcx, %rbx ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: cmovsq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @stest_f32i64_mm(<2 x float> %x) { ; CHECK-LABEL: stest_f32i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rcx, %rax ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rax, %rsi ; CHECK-NEXT: xorl %edi, %edi ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovnsq %rcx, %rax ; CHECK-NEXT: cmoveq %rsi, %rax ; CHECK-NEXT: cmovnsq %rdi, %rdx ; CHECK-NEXT: cmpq %rcx, %rbx ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rbx, %rsi ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovsq %rbx, %rcx ; CHECK-NEXT: cmoveq %rsi, %rcx ; CHECK-NEXT: cmovsq %r14, %rdi ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: movabsq $-9223372036854775808, %rbx # imm = 0x8000000000000000 ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rcx, %rsi ; CHECK-NEXT: cmpq %rbx, %rcx ; CHECK-NEXT: cmovbeq %rbx, %rcx ; CHECK-NEXT: cmpq $-1, %rdi ; CHECK-NEXT: cmovneq %rsi, %rcx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rax, %rsi ; CHECK-NEXT: cmpq %rbx, %rax ; CHECK-NEXT: cmovbeq %rbx, %rax ; CHECK-NEXT: cmpq $-1, %rdx ; CHECK-NEXT: cmovneq %rsi, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rcx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utest_f32i64_mm(<2 x float> %x) { ; CHECK-LABEL: utest_f32i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunssfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixunssfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) { ; CHECK-LABEL: ustest_f32i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixsfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: movl $1, %edi ; CHECK-NEXT: cmovleq %rdx, %rdi ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: testq %rsi, %rsi ; CHECK-NEXT: cmovsq %rcx, %rbx ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: cmovsq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @stest_f16i64_mm(<2 x half> %x) { ; CHECK-LABEL: stest_f16i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF ; CHECK-NEXT: cmpq %rcx, %rax ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rax, %rsi ; CHECK-NEXT: xorl %edi, %edi ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovnsq %rcx, %rax ; CHECK-NEXT: cmoveq %rsi, %rax ; CHECK-NEXT: cmovnsq %rdi, %rdx ; CHECK-NEXT: cmpq %rcx, %rbx ; CHECK-NEXT: movq %rcx, %rsi ; CHECK-NEXT: cmovbq %rbx, %rsi ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovsq %rbx, %rcx ; CHECK-NEXT: cmoveq %rsi, %rcx ; CHECK-NEXT: cmovsq %r14, %rdi ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: movabsq $-9223372036854775808, %rbx # imm = 0x8000000000000000 ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rcx, %rsi ; CHECK-NEXT: cmpq %rbx, %rcx ; CHECK-NEXT: cmovbeq %rbx, %rcx ; CHECK-NEXT: cmpq $-1, %rdi ; CHECK-NEXT: cmovneq %rsi, %rcx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movq %rbx, %rsi ; CHECK-NEXT: cmovnsq %rax, %rsi ; CHECK-NEXT: cmpq %rbx, %rax ; CHECK-NEXT: cmovbeq %rbx, %rax ; CHECK-NEXT: cmpq $-1, %rdx ; CHECK-NEXT: cmovneq %rsi, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rcx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) { ; CHECK-LABEL: utesth_f16i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, %xmm1 ; CHECK-NEXT: psrld $16, %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill ; CHECK-NEXT: callq __fixunshfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixunshfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: cmovneq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovneq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: movq %rbx, %xmm0 ; CHECK-NEXT: movq %rax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptoui <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64> ret <2 x i64> %conv6 } define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) { ; CHECK-LABEL: ustest_f16i64_mm: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: psrld $16, %xmm0 ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rdx, %r14 ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: callq __fixhfti@PLT ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movl $1, %esi ; CHECK-NEXT: movl $1, %edi ; CHECK-NEXT: cmovleq %rdx, %rdi ; CHECK-NEXT: cmovgq %rcx, %rax ; CHECK-NEXT: cmpq $1, %rdx ; CHECK-NEXT: cmoveq %rcx, %rax ; CHECK-NEXT: testq %r14, %r14 ; CHECK-NEXT: cmovleq %r14, %rsi ; CHECK-NEXT: cmovgq %rcx, %rbx ; CHECK-NEXT: cmpq $1, %r14 ; CHECK-NEXT: cmoveq %rcx, %rbx ; CHECK-NEXT: testq %rsi, %rsi ; CHECK-NEXT: cmovsq %rcx, %rbx ; CHECK-NEXT: testq %rdi, %rdi ; CHECK-NEXT: cmovsq %rcx, %rax ; CHECK-NEXT: movq %rax, %xmm0 ; CHECK-NEXT: movq %rbx, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %conv = fptosi <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>) %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer) %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> ret <2 x i64> %conv6 } declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>) declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>) declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>)