; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512VL-FALLBACK ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512BW ; These test cases are inspired by C++2a std::midpoint(). ; See https://bugs.llvm.org/show_bug.cgi?id=40965 ; Using 512-bit vector regs. ; ---------------------------------------------------------------------------- ; ; 32-bit width. 512 / 32 = 16 elts. ; ---------------------------------------------------------------------------- ; ; Values come from regs define <16 x i32> @vec512_i32_signed_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind { ; ALL-LABEL: vec512_i32_signed_reg_reg: ; ALL: # %bb.0: ; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrld $1, %zmm1, %zmm1 ; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1 ; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1 %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2 %t7 = sub <16 x i32> %t6, %t5 %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t9 = mul nsw <16 x i32> %t16, %t16 ; signed %a10 = add nsw <16 x i32> %t9, %a1 ; signed ret <16 x i32> %a10 } define <16 x i32> @vec512_i32_unsigned_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind { ; ALL-LABEL: vec512_i32_unsigned_reg_reg: ; ALL: # %bb.0: ; ALL-NEXT: vpminud %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxud %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrld $1, %zmm1, %zmm1 ; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1 ; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %t3 = icmp ugt <16 x i32> %a1, %a2 %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1 %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2 %t7 = sub <16 x i32> %t6, %t5 %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t9 = mul <16 x i32> %t16, %t16 %a10 = add <16 x i32> %t9, %a1 ret <16 x i32> %a10 } ; Values are loaded. Only check signed case. define <16 x i32> @vec512_i32_signed_mem_reg(ptr %a1_addr, <16 x i32> %a2) nounwind { ; ALL-LABEL: vec512_i32_signed_mem_reg: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm1 ; ALL-NEXT: vpminsd %zmm0, %zmm1, %zmm2 ; ALL-NEXT: vpmaxsd %zmm0, %zmm1, %zmm0 ; ALL-NEXT: vpsubd %zmm2, %zmm0, %zmm0 ; ALL-NEXT: vpsrld $1, %zmm0, %zmm0 ; ALL-NEXT: vpmulld %zmm0, %zmm0, %zmm0 ; ALL-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %a1 = load <16 x i32>, ptr %a1_addr %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1 %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2 %t7 = sub <16 x i32> %t6, %t5 %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t9 = mul nsw <16 x i32> %t16, %t16 ; signed %a10 = add nsw <16 x i32> %t9, %a1 ; signed ret <16 x i32> %a10 } define <16 x i32> @vec512_i32_signed_reg_mem(<16 x i32> %a1, ptr %a2_addr) nounwind { ; ALL-LABEL: vec512_i32_signed_reg_mem: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm1 ; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrld $1, %zmm1, %zmm1 ; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1 ; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %a2 = load <16 x i32>, ptr %a2_addr %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1 %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2 %t7 = sub <16 x i32> %t6, %t5 %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t9 = mul nsw <16 x i32> %t16, %t16 ; signed %a10 = add nsw <16 x i32> %t9, %a1 ; signed ret <16 x i32> %a10 } define <16 x i32> @vec512_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; ALL-LABEL: vec512_i32_signed_mem_mem: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm0 ; ALL-NEXT: vmovdqa64 (%rsi), %zmm1 ; ALL-NEXT: vpminsd %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsd %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrld $1, %zmm1, %zmm1 ; ALL-NEXT: vpmulld %zmm1, %zmm1, %zmm1 ; ALL-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %a1 = load <16 x i32>, ptr %a1_addr %a2 = load <16 x i32>, ptr %a2_addr %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1 %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2 %t7 = sub <16 x i32> %t6, %t5 %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %t9 = mul nsw <16 x i32> %t16, %t16 ; signed %a10 = add nsw <16 x i32> %t9, %a1 ; signed ret <16 x i32> %a10 } ; ---------------------------------------------------------------------------- ; ; 64-bit width. 512 / 64 = 8 elts. ; ---------------------------------------------------------------------------- ; ; Values come from regs define <8 x i64> @vec512_i64_signed_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind { ; ALL-LABEL: vec512_i64_signed_reg_reg: ; ALL: # %bb.0: ; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1} ; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1 %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2 %t7 = sub <8 x i64> %t6, %t5 %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t9 = mul nsw <8 x i64> %t8, %t4 ; signed %a10 = add nsw <8 x i64> %t9, %a1 ; signed ret <8 x i64> %a10 } define <8 x i64> @vec512_i64_unsigned_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind { ; ALL-LABEL: vec512_i64_unsigned_reg_reg: ; ALL: # %bb.0: ; ALL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 ; ALL-NEXT: vpminuq %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1} ; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %t3 = icmp ugt <8 x i64> %a1, %a2 %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1 %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2 %t7 = sub <8 x i64> %t6, %t5 %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t9 = mul <8 x i64> %t8, %t4 %a10 = add <8 x i64> %t9, %a1 ret <8 x i64> %a10 } ; Values are loaded. Only check signed case. define <8 x i64> @vec512_i64_signed_mem_reg(ptr %a1_addr, <8 x i64> %a2) nounwind { ; ALL-LABEL: vec512_i64_signed_mem_reg: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm1 ; ALL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 ; ALL-NEXT: vpminsq %zmm0, %zmm1, %zmm2 ; ALL-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0 ; ALL-NEXT: vpsubq %zmm2, %zmm0, %zmm0 ; ALL-NEXT: vpsrlq $1, %zmm0, %zmm0 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vpsubq %zmm0, %zmm2, %zmm0 {%k1} ; ALL-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; ALL-NEXT: retq %a1 = load <8 x i64>, ptr %a1_addr %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1 %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2 %t7 = sub <8 x i64> %t6, %t5 %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t9 = mul nsw <8 x i64> %t8, %t4 ; signed %a10 = add nsw <8 x i64> %t9, %a1 ; signed ret <8 x i64> %a10 } define <8 x i64> @vec512_i64_signed_reg_mem(<8 x i64> %a1, ptr %a2_addr) nounwind { ; ALL-LABEL: vec512_i64_signed_reg_mem: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm1 ; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1} ; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %a2 = load <8 x i64>, ptr %a2_addr %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1 %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2 %t7 = sub <8 x i64> %t6, %t5 %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t9 = mul nsw <8 x i64> %t8, %t4 ; signed %a10 = add nsw <8 x i64> %t9, %a1 ; signed ret <8 x i64> %a10 } define <8 x i64> @vec512_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; ALL-LABEL: vec512_i64_signed_mem_mem: ; ALL: # %bb.0: ; ALL-NEXT: vmovdqa64 (%rdi), %zmm0 ; ALL-NEXT: vmovdqa64 (%rsi), %zmm1 ; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 ; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2 ; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1 ; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1 ; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1 ; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1} ; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; ALL-NEXT: retq %a1 = load <8 x i64>, ptr %a1_addr %a2 = load <8 x i64>, ptr %a2_addr %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1 %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2 %t7 = sub <8 x i64> %t6, %t5 %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1> %t9 = mul nsw <8 x i64> %t8, %t4 ; signed %a10 = add nsw <8 x i64> %t9, %a1 ; signed ret <8 x i64> %a10 } ; ---------------------------------------------------------------------------- ; ; 16-bit width. 512 / 16 = 32 elts. ; ---------------------------------------------------------------------------- ; ; Values come from regs define <32 x i16> @vec512_i16_signed_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind { ; AVX512F-LABEL: vec512_i16_signed_reg_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubw %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm5, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_signed_reg_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1 %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2 %t7 = sub <32 x i16> %t6, %t5 %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t9 = mul nsw <32 x i16> %t16, %t4 ; signed %a10 = add nsw <32 x i16> %t9, %a1 ; signed ret <32 x i16> %a10 } define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind { ; AVX512F-LABEL: vec512_i16_unsigned_reg_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpminuw %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5 ; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm6 ; AVX512F-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7 ; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5 ; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubw %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX512F-NEXT: vpsubw %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsubw %ymm1, %ymm4, %ymm4 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2 ; AVX512F-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_unsigned_reg_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpminuw %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminuw %ymm1, %ymm0, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5 ; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm4, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm4, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm4, %ymm4 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_unsigned_reg_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %t3 = icmp ugt <32 x i16> %a1, %a2 %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1 %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2 %t7 = sub <32 x i16> %t6, %t5 %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t9 = mul <32 x i16> %t16, %t4 %a10 = add <32 x i16> %t9, %a1 ret <32 x i16> %a10 } ; Values are loaded. Only check signed case. define <32 x i16> @vec512_i16_signed_mem_reg(ptr %a1_addr, <32 x i16> %a2) nounwind { ; AVX512F-LABEL: vec512_i16_signed_mem_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0 ; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm1 ; AVX512F-NEXT: vpsubw %ymm0, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_signed_mem_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 ; AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm1, %k1 ; AVX512BW-NEXT: vpminsw %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpmaxsw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: vpsubw %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubw %zmm0, %zmm2, %zmm0 {%k1} ; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %a1 = load <32 x i16>, ptr %a1_addr %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1 %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2 %t7 = sub <32 x i16> %t6, %t5 %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t9 = mul nsw <32 x i16> %t16, %t4 ; signed %a10 = add nsw <32 x i16> %t9, %a1 ; signed ret <32 x i16> %a10 } define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i16_signed_reg_mem: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubw %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm5, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_signed_reg_mem: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 ; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %a2 = load <32 x i16>, ptr %a2_addr %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1 %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2 %t7 = sub <32 x i16> %t6, %t5 %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t9 = mul nsw <32 x i16> %t16, %t4 ; signed %a10 = add nsw <32 x i16> %t9, %a1 ; signed ret <32 x i16> %a10 } define <32 x i16> @vec512_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i16_signed_mem_mem: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0 ; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm1 ; AVX512F-NEXT: vpsubw %ymm0, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_mem: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i16_signed_mem_mem: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1 ; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %a1 = load <32 x i16>, ptr %a1_addr %a2 = load <32 x i16>, ptr %a2_addr %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1 %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2 %t7 = sub <32 x i16> %t6, %t5 %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> %t9 = mul nsw <32 x i16> %t16, %t4 ; signed %a10 = add nsw <32 x i16> %t9, %a1 ; signed ret <32 x i16> %a10 } ; ---------------------------------------------------------------------------- ; ; 8-bit width. 512 / 8 = 64 elts. ; ---------------------------------------------------------------------------- ; ; Values come from regs define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind { ; AVX512F-LABEL: vec512_i8_signed_reg_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm5, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_signed_reg_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1 %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2 %t7 = sub <64 x i8> %t6, %t5 %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t9 = mul nsw <64 x i8> %t8, %t4 ; signed %a10 = add nsw <64 x i8> %t9, %a1 ; signed ret <64 x i8> %a10 } define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind { ; AVX512F-LABEL: vec512_i8_unsigned_reg_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpminub %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm6 ; AVX512F-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7 ; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5 ; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpmaxub %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubb %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX512F-NEXT: vpsubb %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsubb %ymm1, %ymm4, %ymm4 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2 ; AVX512F-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_unsigned_reg_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpminub %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminub %ymm1, %ymm0, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5 ; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm4, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm4, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm4, %ymm4 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_unsigned_reg_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpcmpnleub %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %t3 = icmp ugt <64 x i8> %a1, %a2 %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1 %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2 %t7 = sub <64 x i8> %t6, %t5 %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t9 = mul <64 x i8> %t8, %t4 %a10 = add <64 x i8> %t9, %a1 ret <64 x i8> %a10 } ; Values are loaded. Only check signed case. define <64 x i8> @vec512_i8_signed_mem_reg(ptr %a1_addr, <64 x i8> %a2) nounwind { ; AVX512F-LABEL: vec512_i8_signed_mem_reg: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0 ; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm1 ; AVX512F-NEXT: vpsubb %ymm0, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_reg: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_signed_mem_reg: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 ; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k1 ; AVX512BW-NEXT: vpminsb %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpmaxsb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubb %zmm0, %zmm2, %zmm0 {%k1} ; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %a1 = load <64 x i8>, ptr %a1_addr %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1 %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2 %t7 = sub <64 x i8> %t6, %t5 %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t9 = mul nsw <64 x i8> %t8, %t4 ; signed %a10 = add nsw <64 x i8> %t9, %a1 ; signed ret <64 x i8> %a10 } define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i8_signed_reg_mem: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5 ; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 ; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm5, %ymm2 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_signed_reg_mem: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1 ; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %a2 = load <64 x i8>, ptr %a2_addr %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1 %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2 %t7 = sub <64 x i8> %t6, %t5 %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t9 = mul nsw <64 x i8> %t8, %t4 ; signed %a10 = add nsw <64 x i8> %t9, %a1 ; signed ret <64 x i8> %a10 } define <64 x i8> @vec512_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; AVX512F-LABEL: vec512_i8_signed_mem_mem: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4 ; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5 ; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm6 ; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0 ; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm1 ; AVX512F-NEXT: vpsubb %ymm0, %ymm5, %ymm5 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_mem: ; AVX512VL-FALLBACK: # %bb.0: ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2 ; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5 ; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm6 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm1 ; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm5, %ymm5 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1 ; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1 ; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512VL-FALLBACK-NEXT: retq ; ; AVX512BW-LABEL: vec512_i8_signed_mem_mem: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1 ; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1 ; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1} ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq %a1 = load <64 x i8>, ptr %a1_addr %a2 = load <64 x i8>, ptr %a2_addr %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1 %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2 %t7 = sub <64 x i8> %t6, %t5 %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> %t9 = mul nsw <64 x i8> %t8, %t4 ; signed %a10 = add nsw <64 x i8> %t9, %a1 ; signed ret <64 x i8> %a10 }