; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefix=XOP ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=INT256,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=INT256,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=INT256,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=INT256,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi | FileCheck %s --check-prefixes=INT256,AVX512 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=INT256,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=INT256,AVX512VL,AVX512VLDQ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=INT256,AVX512VL,AVX512VLBW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+avx512vbmi | FileCheck %s --check-prefixes=INT256,AVX512VL,VLVBMI define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind { ; XOP-LABEL: var_shuffle_v4i64: ; XOP: # %bb.0: ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm3 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4i64: ; AVX1: # %bb.0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm4 ; AVX1-NEXT: vpermilpd %ymm4, %ymm2, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4i64: ; AVX2: # %bb.0: ; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,2,2,2] ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,3,2,3] ; AVX2-NEXT: vpermilpd %ymm1, %ymm3, %ymm3 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v4i64: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v4i64: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 %index1 = extractelement <4 x i64> %indices, i32 1 %index2 = extractelement <4 x i64> %indices, i32 2 %index3 = extractelement <4 x i64> %indices, i32 3 %v0 = extractelement <4 x i64> %v, i64 %index0 %v1 = extractelement <4 x i64> %v, i64 %index1 %v2 = extractelement <4 x i64> %v, i64 %index2 %v3 = extractelement <4 x i64> %v, i64 %index3 %ret0 = insertelement <4 x i64> undef, i64 %v0, i32 0 %ret1 = insertelement <4 x i64> %ret0, i64 %v1, i32 1 %ret2 = insertelement <4 x i64> %ret1, i64 %v2, i32 2 %ret3 = insertelement <4 x i64> %ret2, i64 %v3, i32 3 ret <4 x i64> %ret3 } define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind { ; XOP-LABEL: var_shuffle_v8i32: ; XOP: # %bb.0: ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v8i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX1-NEXT: vpermilps %ymm1, %ymm2, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; INT256-LABEL: var_shuffle_v8i32: ; INT256: # %bb.0: ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq %index0 = extractelement <8 x i32> %indices, i32 0 %index1 = extractelement <8 x i32> %indices, i32 1 %index2 = extractelement <8 x i32> %indices, i32 2 %index3 = extractelement <8 x i32> %indices, i32 3 %index4 = extractelement <8 x i32> %indices, i32 4 %index5 = extractelement <8 x i32> %indices, i32 5 %index6 = extractelement <8 x i32> %indices, i32 6 %index7 = extractelement <8 x i32> %indices, i32 7 %v0 = extractelement <8 x i32> %v, i32 %index0 %v1 = extractelement <8 x i32> %v, i32 %index1 %v2 = extractelement <8 x i32> %v, i32 %index2 %v3 = extractelement <8 x i32> %v, i32 %index3 %v4 = extractelement <8 x i32> %v, i32 %index4 %v5 = extractelement <8 x i32> %v, i32 %index5 %v6 = extractelement <8 x i32> %v, i32 %index6 %v7 = extractelement <8 x i32> %v, i32 %index7 %ret0 = insertelement <8 x i32> undef, i32 %v0, i32 0 %ret1 = insertelement <8 x i32> %ret0, i32 %v1, i32 1 %ret2 = insertelement <8 x i32> %ret1, i32 %v2, i32 2 %ret3 = insertelement <8 x i32> %ret2, i32 %v3, i32 3 %ret4 = insertelement <8 x i32> %ret3, i32 %v4, i32 4 %ret5 = insertelement <8 x i32> %ret4, i32 %v5, i32 5 %ret6 = insertelement <8 x i32> %ret5, i32 %v6, i32 6 %ret7 = insertelement <8 x i32> %ret6, i32 %v7, i32 7 ret <8 x i32> %ret7 } define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwind { ; XOP-LABEL: var_shuffle_v16i16: ; XOP: # %bb.0: ; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [256,256,256,256,256,256,256,256] ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [514,514,514,514,514,514,514,514] ; XOP-NEXT: vpmacsww %xmm2, %xmm3, %xmm1, %xmm4 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpmacsww %xmm2, %xmm3, %xmm1, %xmm1 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vpperm %xmm1, %xmm2, %xmm0, %xmm1 ; XOP-NEXT: vpperm %xmm4, %xmm2, %xmm0, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v16i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [514,514,514,514,514,514,514,514] ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,256,256,256,256,256,256,256] ; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm6 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpshufb %xmm3, %xmm5, %xmm4 ; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v16i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v16i16: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v16i16: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v16i16: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v16i16: ; VLVBMI: # %bb.0: ; VLVBMI-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; VLVBMI-NEXT: retq %index0 = extractelement <16 x i16> %indices, i32 0 %index1 = extractelement <16 x i16> %indices, i32 1 %index2 = extractelement <16 x i16> %indices, i32 2 %index3 = extractelement <16 x i16> %indices, i32 3 %index4 = extractelement <16 x i16> %indices, i32 4 %index5 = extractelement <16 x i16> %indices, i32 5 %index6 = extractelement <16 x i16> %indices, i32 6 %index7 = extractelement <16 x i16> %indices, i32 7 %index8 = extractelement <16 x i16> %indices, i32 8 %index9 = extractelement <16 x i16> %indices, i32 9 %index10 = extractelement <16 x i16> %indices, i32 10 %index11 = extractelement <16 x i16> %indices, i32 11 %index12 = extractelement <16 x i16> %indices, i32 12 %index13 = extractelement <16 x i16> %indices, i32 13 %index14 = extractelement <16 x i16> %indices, i32 14 %index15 = extractelement <16 x i16> %indices, i32 15 %v0 = extractelement <16 x i16> %v, i16 %index0 %v1 = extractelement <16 x i16> %v, i16 %index1 %v2 = extractelement <16 x i16> %v, i16 %index2 %v3 = extractelement <16 x i16> %v, i16 %index3 %v4 = extractelement <16 x i16> %v, i16 %index4 %v5 = extractelement <16 x i16> %v, i16 %index5 %v6 = extractelement <16 x i16> %v, i16 %index6 %v7 = extractelement <16 x i16> %v, i16 %index7 %v8 = extractelement <16 x i16> %v, i16 %index8 %v9 = extractelement <16 x i16> %v, i16 %index9 %v10 = extractelement <16 x i16> %v, i16 %index10 %v11 = extractelement <16 x i16> %v, i16 %index11 %v12 = extractelement <16 x i16> %v, i16 %index12 %v13 = extractelement <16 x i16> %v, i16 %index13 %v14 = extractelement <16 x i16> %v, i16 %index14 %v15 = extractelement <16 x i16> %v, i16 %index15 %ret0 = insertelement <16 x i16> undef, i16 %v0, i32 0 %ret1 = insertelement <16 x i16> %ret0, i16 %v1, i32 1 %ret2 = insertelement <16 x i16> %ret1, i16 %v2, i32 2 %ret3 = insertelement <16 x i16> %ret2, i16 %v3, i32 3 %ret4 = insertelement <16 x i16> %ret3, i16 %v4, i32 4 %ret5 = insertelement <16 x i16> %ret4, i16 %v5, i32 5 %ret6 = insertelement <16 x i16> %ret5, i16 %v6, i32 6 %ret7 = insertelement <16 x i16> %ret6, i16 %v7, i32 7 %ret8 = insertelement <16 x i16> %ret7, i16 %v8, i32 8 %ret9 = insertelement <16 x i16> %ret8, i16 %v9, i32 9 %ret10 = insertelement <16 x i16> %ret9, i16 %v10, i32 10 %ret11 = insertelement <16 x i16> %ret10, i16 %v11, i32 11 %ret12 = insertelement <16 x i16> %ret11, i16 %v12, i32 12 %ret13 = insertelement <16 x i16> %ret12, i16 %v13, i32 13 %ret14 = insertelement <16 x i16> %ret13, i16 %v14, i32 14 %ret15 = insertelement <16 x i16> %ret14, i16 %v15, i32 15 ret <16 x i16> %ret15 } define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind { ; XOP-LABEL: var_shuffle_v32i8: ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 ; XOP-NEXT: vpperm %xmm2, %xmm3, %xmm0, %xmm2 ; XOP-NEXT: vpperm %xmm1, %xmm3, %xmm0, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v32i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm6 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm3 ; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm4 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v32i8: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v32i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm2, %ymm2 ; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX512VLBW-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm0, %ymm2 {%k1} ; AVX512VLBW-NEXT: vmovdqa %ymm2, %ymm0 ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v32i8: ; VLVBMI: # %bb.0: ; VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0 ; VLVBMI-NEXT: retq %index0 = extractelement <32 x i8> %indices, i32 0 %index1 = extractelement <32 x i8> %indices, i32 1 %index2 = extractelement <32 x i8> %indices, i32 2 %index3 = extractelement <32 x i8> %indices, i32 3 %index4 = extractelement <32 x i8> %indices, i32 4 %index5 = extractelement <32 x i8> %indices, i32 5 %index6 = extractelement <32 x i8> %indices, i32 6 %index7 = extractelement <32 x i8> %indices, i32 7 %index8 = extractelement <32 x i8> %indices, i32 8 %index9 = extractelement <32 x i8> %indices, i32 9 %index10 = extractelement <32 x i8> %indices, i32 10 %index11 = extractelement <32 x i8> %indices, i32 11 %index12 = extractelement <32 x i8> %indices, i32 12 %index13 = extractelement <32 x i8> %indices, i32 13 %index14 = extractelement <32 x i8> %indices, i32 14 %index15 = extractelement <32 x i8> %indices, i32 15 %index16 = extractelement <32 x i8> %indices, i32 16 %index17 = extractelement <32 x i8> %indices, i32 17 %index18 = extractelement <32 x i8> %indices, i32 18 %index19 = extractelement <32 x i8> %indices, i32 19 %index20 = extractelement <32 x i8> %indices, i32 20 %index21 = extractelement <32 x i8> %indices, i32 21 %index22 = extractelement <32 x i8> %indices, i32 22 %index23 = extractelement <32 x i8> %indices, i32 23 %index24 = extractelement <32 x i8> %indices, i32 24 %index25 = extractelement <32 x i8> %indices, i32 25 %index26 = extractelement <32 x i8> %indices, i32 26 %index27 = extractelement <32 x i8> %indices, i32 27 %index28 = extractelement <32 x i8> %indices, i32 28 %index29 = extractelement <32 x i8> %indices, i32 29 %index30 = extractelement <32 x i8> %indices, i32 30 %index31 = extractelement <32 x i8> %indices, i32 31 %v0 = extractelement <32 x i8> %v, i8 %index0 %v1 = extractelement <32 x i8> %v, i8 %index1 %v2 = extractelement <32 x i8> %v, i8 %index2 %v3 = extractelement <32 x i8> %v, i8 %index3 %v4 = extractelement <32 x i8> %v, i8 %index4 %v5 = extractelement <32 x i8> %v, i8 %index5 %v6 = extractelement <32 x i8> %v, i8 %index6 %v7 = extractelement <32 x i8> %v, i8 %index7 %v8 = extractelement <32 x i8> %v, i8 %index8 %v9 = extractelement <32 x i8> %v, i8 %index9 %v10 = extractelement <32 x i8> %v, i8 %index10 %v11 = extractelement <32 x i8> %v, i8 %index11 %v12 = extractelement <32 x i8> %v, i8 %index12 %v13 = extractelement <32 x i8> %v, i8 %index13 %v14 = extractelement <32 x i8> %v, i8 %index14 %v15 = extractelement <32 x i8> %v, i8 %index15 %v16 = extractelement <32 x i8> %v, i8 %index16 %v17 = extractelement <32 x i8> %v, i8 %index17 %v18 = extractelement <32 x i8> %v, i8 %index18 %v19 = extractelement <32 x i8> %v, i8 %index19 %v20 = extractelement <32 x i8> %v, i8 %index20 %v21 = extractelement <32 x i8> %v, i8 %index21 %v22 = extractelement <32 x i8> %v, i8 %index22 %v23 = extractelement <32 x i8> %v, i8 %index23 %v24 = extractelement <32 x i8> %v, i8 %index24 %v25 = extractelement <32 x i8> %v, i8 %index25 %v26 = extractelement <32 x i8> %v, i8 %index26 %v27 = extractelement <32 x i8> %v, i8 %index27 %v28 = extractelement <32 x i8> %v, i8 %index28 %v29 = extractelement <32 x i8> %v, i8 %index29 %v30 = extractelement <32 x i8> %v, i8 %index30 %v31 = extractelement <32 x i8> %v, i8 %index31 %ret0 = insertelement <32 x i8> undef, i8 %v0, i32 0 %ret1 = insertelement <32 x i8> %ret0, i8 %v1, i32 1 %ret2 = insertelement <32 x i8> %ret1, i8 %v2, i32 2 %ret3 = insertelement <32 x i8> %ret2, i8 %v3, i32 3 %ret4 = insertelement <32 x i8> %ret3, i8 %v4, i32 4 %ret5 = insertelement <32 x i8> %ret4, i8 %v5, i32 5 %ret6 = insertelement <32 x i8> %ret5, i8 %v6, i32 6 %ret7 = insertelement <32 x i8> %ret6, i8 %v7, i32 7 %ret8 = insertelement <32 x i8> %ret7, i8 %v8, i32 8 %ret9 = insertelement <32 x i8> %ret8, i8 %v9, i32 9 %ret10 = insertelement <32 x i8> %ret9, i8 %v10, i32 10 %ret11 = insertelement <32 x i8> %ret10, i8 %v11, i32 11 %ret12 = insertelement <32 x i8> %ret11, i8 %v12, i32 12 %ret13 = insertelement <32 x i8> %ret12, i8 %v13, i32 13 %ret14 = insertelement <32 x i8> %ret13, i8 %v14, i32 14 %ret15 = insertelement <32 x i8> %ret14, i8 %v15, i32 15 %ret16 = insertelement <32 x i8> %ret15, i8 %v16, i32 16 %ret17 = insertelement <32 x i8> %ret16, i8 %v17, i32 17 %ret18 = insertelement <32 x i8> %ret17, i8 %v18, i32 18 %ret19 = insertelement <32 x i8> %ret18, i8 %v19, i32 19 %ret20 = insertelement <32 x i8> %ret19, i8 %v20, i32 20 %ret21 = insertelement <32 x i8> %ret20, i8 %v21, i32 21 %ret22 = insertelement <32 x i8> %ret21, i8 %v22, i32 22 %ret23 = insertelement <32 x i8> %ret22, i8 %v23, i32 23 %ret24 = insertelement <32 x i8> %ret23, i8 %v24, i32 24 %ret25 = insertelement <32 x i8> %ret24, i8 %v25, i32 25 %ret26 = insertelement <32 x i8> %ret25, i8 %v26, i32 26 %ret27 = insertelement <32 x i8> %ret26, i8 %v27, i32 27 %ret28 = insertelement <32 x i8> %ret27, i8 %v28, i32 28 %ret29 = insertelement <32 x i8> %ret28, i8 %v29, i32 29 %ret30 = insertelement <32 x i8> %ret29, i8 %v30, i32 30 %ret31 = insertelement <32 x i8> %ret30, i8 %v31, i32 31 ret <32 x i8> %ret31 } define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) nounwind { ; XOP-LABEL: var_shuffle_v4f64: ; XOP: # %bb.0: ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm3 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4f64: ; AVX1: # %bb.0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm4 ; AVX1-NEXT: vpermilpd %ymm4, %ymm2, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4f64: ; AVX2: # %bb.0: ; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,2,2,2] ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,3,2,3] ; AVX2-NEXT: vpermilpd %ymm1, %ymm3, %ymm3 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v4f64: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v4f64: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 %index1 = extractelement <4 x i64> %indices, i32 1 %index2 = extractelement <4 x i64> %indices, i32 2 %index3 = extractelement <4 x i64> %indices, i32 3 %v0 = extractelement <4 x double> %v, i64 %index0 %v1 = extractelement <4 x double> %v, i64 %index1 %v2 = extractelement <4 x double> %v, i64 %index2 %v3 = extractelement <4 x double> %v, i64 %index3 %ret0 = insertelement <4 x double> undef, double %v0, i32 0 %ret1 = insertelement <4 x double> %ret0, double %v1, i32 1 %ret2 = insertelement <4 x double> %ret1, double %v2, i32 2 %ret3 = insertelement <4 x double> %ret2, double %v3, i32 3 ret <4 x double> %ret3 } define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwind { ; XOP-LABEL: var_shuffle_v8f32: ; XOP: # %bb.0: ; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v8f32: ; AVX1: # %bb.0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX1-NEXT: vpermilps %ymm1, %ymm2, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; INT256-LABEL: var_shuffle_v8f32: ; INT256: # %bb.0: ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq %index0 = extractelement <8 x i32> %indices, i32 0 %index1 = extractelement <8 x i32> %indices, i32 1 %index2 = extractelement <8 x i32> %indices, i32 2 %index3 = extractelement <8 x i32> %indices, i32 3 %index4 = extractelement <8 x i32> %indices, i32 4 %index5 = extractelement <8 x i32> %indices, i32 5 %index6 = extractelement <8 x i32> %indices, i32 6 %index7 = extractelement <8 x i32> %indices, i32 7 %v0 = extractelement <8 x float> %v, i32 %index0 %v1 = extractelement <8 x float> %v, i32 %index1 %v2 = extractelement <8 x float> %v, i32 %index2 %v3 = extractelement <8 x float> %v, i32 %index3 %v4 = extractelement <8 x float> %v, i32 %index4 %v5 = extractelement <8 x float> %v, i32 %index5 %v6 = extractelement <8 x float> %v, i32 %index6 %v7 = extractelement <8 x float> %v, i32 %index7 %ret0 = insertelement <8 x float> undef, float %v0, i32 0 %ret1 = insertelement <8 x float> %ret0, float %v1, i32 1 %ret2 = insertelement <8 x float> %ret1, float %v2, i32 2 %ret3 = insertelement <8 x float> %ret2, float %v3, i32 3 %ret4 = insertelement <8 x float> %ret3, float %v4, i32 4 %ret5 = insertelement <8 x float> %ret4, float %v5, i32 5 %ret6 = insertelement <8 x float> %ret5, float %v6, i32 6 %ret7 = insertelement <8 x float> %ret6, float %v7, i32 7 ret <8 x float> %ret7 } ; ; PR35820 - Unequal source/destination vector sizes ; define <4 x i64> @var_shuffle_v4i64_from_v2i64(<2 x i64> %v, <4 x i64> %indices) nounwind { ; XOP-LABEL: var_shuffle_v4i64_from_v2i64: ; XOP: # %bb.0: ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm2 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm0, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX1: # %bb.0: ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,2,2,2] ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm3 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v4i64_from_v2i64: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 %index1 = extractelement <4 x i64> %indices, i32 1 %index2 = extractelement <4 x i64> %indices, i32 2 %index3 = extractelement <4 x i64> %indices, i32 3 %v0 = extractelement <2 x i64> %v, i64 %index0 %v1 = extractelement <2 x i64> %v, i64 %index1 %v2 = extractelement <2 x i64> %v, i64 %index2 %v3 = extractelement <2 x i64> %v, i64 %index3 %ret0 = insertelement <4 x i64> undef, i64 %v0, i32 0 %ret1 = insertelement <4 x i64> %ret0, i64 %v1, i32 1 %ret2 = insertelement <4 x i64> %ret1, i64 %v2, i32 2 %ret3 = insertelement <4 x i64> %ret2, i64 %v3, i32 3 ret <4 x i64> %ret3 } define <8 x i32> @var_shuffle_v8i32_from_v4i32(<4 x i32> %v, <8 x i32> %indices) unnamed_addr nounwind { ; XOP-LABEL: var_shuffle_v8i32_from_v4i32: ; XOP: # %bb.0: # %entry ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm0, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v8i32_from_v4i32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; INT256-LABEL: var_shuffle_v8i32_from_v4i32: ; INT256: # %bb.0: # %entry ; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq entry: %tmp1 = extractelement <8 x i32> %indices, i32 0 %vecext2.8 = extractelement <4 x i32> %v, i32 %tmp1 %tmp2 = extractelement <8 x i32> %indices, i32 1 %vecext2.9 = extractelement <4 x i32> %v, i32 %tmp2 %tmp3 = extractelement <8 x i32> %indices, i32 2 %vecext2.10 = extractelement <4 x i32> %v, i32 %tmp3 %tmp4 = extractelement <8 x i32> %indices, i32 3 %vecext2.11 = extractelement <4 x i32> %v, i32 %tmp4 %tmp5 = extractelement <8 x i32> %indices, i32 4 %vecext2.12 = extractelement <4 x i32> %v, i32 %tmp5 %tmp6 = extractelement <8 x i32> %indices, i32 5 %vecext2.13 = extractelement <4 x i32> %v, i32 %tmp6 %tmp7 = extractelement <8 x i32> %indices, i32 6 %vecext2.14 = extractelement <4 x i32> %v, i32 %tmp7 %tmp8 = extractelement <8 x i32> %indices, i32 7 %vecext2.15 = extractelement <4 x i32> %v, i32 %tmp8 %tmp9 = insertelement <8 x i32> undef, i32 %vecext2.8, i32 0 %tmp10 = insertelement <8 x i32> %tmp9, i32 %vecext2.9, i32 1 %tmp11 = insertelement <8 x i32> %tmp10, i32 %vecext2.10, i32 2 %tmp12 = insertelement <8 x i32> %tmp11, i32 %vecext2.11, i32 3 %tmp13 = insertelement <8 x i32> %tmp12, i32 %vecext2.12, i32 4 %tmp14 = insertelement <8 x i32> %tmp13, i32 %vecext2.13, i32 5 %tmp15 = insertelement <8 x i32> %tmp14, i32 %vecext2.14, i32 6 %tmp16 = insertelement <8 x i32> %tmp15, i32 %vecext2.15, i32 7 ret <8 x i32> %tmp16 } define <16 x i16> @var_shuffle_v16i16_from_v8i16(<8 x i16> %v, <16 x i16> %indices) nounwind { ; XOP-LABEL: var_shuffle_v16i16_from_v8i16: ; XOP: # %bb.0: ; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [256,256,256,256,256,256,256,256] ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [514,514,514,514,514,514,514,514] ; XOP-NEXT: vpmacsww %xmm2, %xmm3, %xmm1, %xmm4 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpmacsww %xmm2, %xmm3, %xmm1, %xmm1 ; XOP-NEXT: vpperm %xmm1, %xmm0, %xmm0, %xmm1 ; XOP-NEXT: vpperm %xmm4, %xmm0, %xmm0, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [514,514,514,514,514,514,514,514] ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,256,256,256,256,256,256,256] ; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm1, %xmm4 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm5 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm4 ; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLDQ-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v16i16_from_v8i16: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLBW-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v16i16_from_v8i16: ; VLVBMI: # %bb.0: ; VLVBMI-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; VLVBMI-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; VLVBMI-NEXT: retq %index0 = extractelement <16 x i16> %indices, i32 0 %index1 = extractelement <16 x i16> %indices, i32 1 %index2 = extractelement <16 x i16> %indices, i32 2 %index3 = extractelement <16 x i16> %indices, i32 3 %index4 = extractelement <16 x i16> %indices, i32 4 %index5 = extractelement <16 x i16> %indices, i32 5 %index6 = extractelement <16 x i16> %indices, i32 6 %index7 = extractelement <16 x i16> %indices, i32 7 %index8 = extractelement <16 x i16> %indices, i32 8 %index9 = extractelement <16 x i16> %indices, i32 9 %index10 = extractelement <16 x i16> %indices, i32 10 %index11 = extractelement <16 x i16> %indices, i32 11 %index12 = extractelement <16 x i16> %indices, i32 12 %index13 = extractelement <16 x i16> %indices, i32 13 %index14 = extractelement <16 x i16> %indices, i32 14 %index15 = extractelement <16 x i16> %indices, i32 15 %v0 = extractelement <8 x i16> %v, i16 %index0 %v1 = extractelement <8 x i16> %v, i16 %index1 %v2 = extractelement <8 x i16> %v, i16 %index2 %v3 = extractelement <8 x i16> %v, i16 %index3 %v4 = extractelement <8 x i16> %v, i16 %index4 %v5 = extractelement <8 x i16> %v, i16 %index5 %v6 = extractelement <8 x i16> %v, i16 %index6 %v7 = extractelement <8 x i16> %v, i16 %index7 %v8 = extractelement <8 x i16> %v, i16 %index8 %v9 = extractelement <8 x i16> %v, i16 %index9 %v10 = extractelement <8 x i16> %v, i16 %index10 %v11 = extractelement <8 x i16> %v, i16 %index11 %v12 = extractelement <8 x i16> %v, i16 %index12 %v13 = extractelement <8 x i16> %v, i16 %index13 %v14 = extractelement <8 x i16> %v, i16 %index14 %v15 = extractelement <8 x i16> %v, i16 %index15 %ret0 = insertelement <16 x i16> undef, i16 %v0, i32 0 %ret1 = insertelement <16 x i16> %ret0, i16 %v1, i32 1 %ret2 = insertelement <16 x i16> %ret1, i16 %v2, i32 2 %ret3 = insertelement <16 x i16> %ret2, i16 %v3, i32 3 %ret4 = insertelement <16 x i16> %ret3, i16 %v4, i32 4 %ret5 = insertelement <16 x i16> %ret4, i16 %v5, i32 5 %ret6 = insertelement <16 x i16> %ret5, i16 %v6, i32 6 %ret7 = insertelement <16 x i16> %ret6, i16 %v7, i32 7 %ret8 = insertelement <16 x i16> %ret7, i16 %v8, i32 8 %ret9 = insertelement <16 x i16> %ret8, i16 %v9, i32 9 %ret10 = insertelement <16 x i16> %ret9, i16 %v10, i32 10 %ret11 = insertelement <16 x i16> %ret10, i16 %v11, i32 11 %ret12 = insertelement <16 x i16> %ret11, i16 %v12, i32 12 %ret13 = insertelement <16 x i16> %ret12, i16 %v13, i32 13 %ret14 = insertelement <16 x i16> %ret13, i16 %v14, i32 14 %ret15 = insertelement <16 x i16> %ret14, i16 %v15, i32 15 ret <16 x i16> %ret15 } define <32 x i8> @var_shuffle_v32i8_from_v16i8(<16 x i8> %v, <32 x i8> %indices) nounwind { ; XOP-LABEL: var_shuffle_v32i8_from_v16i8: ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2 ; XOP-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm2 ; XOP-NEXT: vpperm %xmm1, %xmm0, %xmm0, %xmm0 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v32i8_from_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm5 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm3 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm4 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v32i8_from_v16i8: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v32i8_from_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v32i8_from_v16i8: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm2 ; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v32i8_from_v16i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VLBW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLBW-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm0, %ymm0 {%k1} ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v32i8_from_v16i8: ; VLVBMI: # %bb.0: ; VLVBMI-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; VLVBMI-NEXT: vpermb %ymm0, %ymm1, %ymm0 ; VLVBMI-NEXT: retq %index0 = extractelement <32 x i8> %indices, i32 0 %index1 = extractelement <32 x i8> %indices, i32 1 %index2 = extractelement <32 x i8> %indices, i32 2 %index3 = extractelement <32 x i8> %indices, i32 3 %index4 = extractelement <32 x i8> %indices, i32 4 %index5 = extractelement <32 x i8> %indices, i32 5 %index6 = extractelement <32 x i8> %indices, i32 6 %index7 = extractelement <32 x i8> %indices, i32 7 %index8 = extractelement <32 x i8> %indices, i32 8 %index9 = extractelement <32 x i8> %indices, i32 9 %index10 = extractelement <32 x i8> %indices, i32 10 %index11 = extractelement <32 x i8> %indices, i32 11 %index12 = extractelement <32 x i8> %indices, i32 12 %index13 = extractelement <32 x i8> %indices, i32 13 %index14 = extractelement <32 x i8> %indices, i32 14 %index15 = extractelement <32 x i8> %indices, i32 15 %index16 = extractelement <32 x i8> %indices, i32 16 %index17 = extractelement <32 x i8> %indices, i32 17 %index18 = extractelement <32 x i8> %indices, i32 18 %index19 = extractelement <32 x i8> %indices, i32 19 %index20 = extractelement <32 x i8> %indices, i32 20 %index21 = extractelement <32 x i8> %indices, i32 21 %index22 = extractelement <32 x i8> %indices, i32 22 %index23 = extractelement <32 x i8> %indices, i32 23 %index24 = extractelement <32 x i8> %indices, i32 24 %index25 = extractelement <32 x i8> %indices, i32 25 %index26 = extractelement <32 x i8> %indices, i32 26 %index27 = extractelement <32 x i8> %indices, i32 27 %index28 = extractelement <32 x i8> %indices, i32 28 %index29 = extractelement <32 x i8> %indices, i32 29 %index30 = extractelement <32 x i8> %indices, i32 30 %index31 = extractelement <32 x i8> %indices, i32 31 %v0 = extractelement <16 x i8> %v, i8 %index0 %v1 = extractelement <16 x i8> %v, i8 %index1 %v2 = extractelement <16 x i8> %v, i8 %index2 %v3 = extractelement <16 x i8> %v, i8 %index3 %v4 = extractelement <16 x i8> %v, i8 %index4 %v5 = extractelement <16 x i8> %v, i8 %index5 %v6 = extractelement <16 x i8> %v, i8 %index6 %v7 = extractelement <16 x i8> %v, i8 %index7 %v8 = extractelement <16 x i8> %v, i8 %index8 %v9 = extractelement <16 x i8> %v, i8 %index9 %v10 = extractelement <16 x i8> %v, i8 %index10 %v11 = extractelement <16 x i8> %v, i8 %index11 %v12 = extractelement <16 x i8> %v, i8 %index12 %v13 = extractelement <16 x i8> %v, i8 %index13 %v14 = extractelement <16 x i8> %v, i8 %index14 %v15 = extractelement <16 x i8> %v, i8 %index15 %v16 = extractelement <16 x i8> %v, i8 %index16 %v17 = extractelement <16 x i8> %v, i8 %index17 %v18 = extractelement <16 x i8> %v, i8 %index18 %v19 = extractelement <16 x i8> %v, i8 %index19 %v20 = extractelement <16 x i8> %v, i8 %index20 %v21 = extractelement <16 x i8> %v, i8 %index21 %v22 = extractelement <16 x i8> %v, i8 %index22 %v23 = extractelement <16 x i8> %v, i8 %index23 %v24 = extractelement <16 x i8> %v, i8 %index24 %v25 = extractelement <16 x i8> %v, i8 %index25 %v26 = extractelement <16 x i8> %v, i8 %index26 %v27 = extractelement <16 x i8> %v, i8 %index27 %v28 = extractelement <16 x i8> %v, i8 %index28 %v29 = extractelement <16 x i8> %v, i8 %index29 %v30 = extractelement <16 x i8> %v, i8 %index30 %v31 = extractelement <16 x i8> %v, i8 %index31 %ret0 = insertelement <32 x i8> undef, i8 %v0, i32 0 %ret1 = insertelement <32 x i8> %ret0, i8 %v1, i32 1 %ret2 = insertelement <32 x i8> %ret1, i8 %v2, i32 2 %ret3 = insertelement <32 x i8> %ret2, i8 %v3, i32 3 %ret4 = insertelement <32 x i8> %ret3, i8 %v4, i32 4 %ret5 = insertelement <32 x i8> %ret4, i8 %v5, i32 5 %ret6 = insertelement <32 x i8> %ret5, i8 %v6, i32 6 %ret7 = insertelement <32 x i8> %ret6, i8 %v7, i32 7 %ret8 = insertelement <32 x i8> %ret7, i8 %v8, i32 8 %ret9 = insertelement <32 x i8> %ret8, i8 %v9, i32 9 %ret10 = insertelement <32 x i8> %ret9, i8 %v10, i32 10 %ret11 = insertelement <32 x i8> %ret10, i8 %v11, i32 11 %ret12 = insertelement <32 x i8> %ret11, i8 %v12, i32 12 %ret13 = insertelement <32 x i8> %ret12, i8 %v13, i32 13 %ret14 = insertelement <32 x i8> %ret13, i8 %v14, i32 14 %ret15 = insertelement <32 x i8> %ret14, i8 %v15, i32 15 %ret16 = insertelement <32 x i8> %ret15, i8 %v16, i32 16 %ret17 = insertelement <32 x i8> %ret16, i8 %v17, i32 17 %ret18 = insertelement <32 x i8> %ret17, i8 %v18, i32 18 %ret19 = insertelement <32 x i8> %ret18, i8 %v19, i32 19 %ret20 = insertelement <32 x i8> %ret19, i8 %v20, i32 20 %ret21 = insertelement <32 x i8> %ret20, i8 %v21, i32 21 %ret22 = insertelement <32 x i8> %ret21, i8 %v22, i32 22 %ret23 = insertelement <32 x i8> %ret22, i8 %v23, i32 23 %ret24 = insertelement <32 x i8> %ret23, i8 %v24, i32 24 %ret25 = insertelement <32 x i8> %ret24, i8 %v25, i32 25 %ret26 = insertelement <32 x i8> %ret25, i8 %v26, i32 26 %ret27 = insertelement <32 x i8> %ret26, i8 %v27, i32 27 %ret28 = insertelement <32 x i8> %ret27, i8 %v28, i32 28 %ret29 = insertelement <32 x i8> %ret28, i8 %v29, i32 29 %ret30 = insertelement <32 x i8> %ret29, i8 %v30, i32 30 %ret31 = insertelement <32 x i8> %ret30, i8 %v31, i32 31 ret <32 x i8> %ret31 } define <4 x double> @var_shuffle_v4f64_from_v2f64(<2 x double> %v, <4 x i64> %indices) nounwind { ; XOP-LABEL: var_shuffle_v4f64_from_v2f64: ; XOP: # %bb.0: ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm2 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm0, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX1: # %bb.0: ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,2,2,2] ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm3 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 ; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v4f64_from_v2f64: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512VL-NEXT: vpermpd %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq %index0 = extractelement <4 x i64> %indices, i32 0 %index1 = extractelement <4 x i64> %indices, i32 1 %index2 = extractelement <4 x i64> %indices, i32 2 %index3 = extractelement <4 x i64> %indices, i32 3 %v0 = extractelement <2 x double> %v, i64 %index0 %v1 = extractelement <2 x double> %v, i64 %index1 %v2 = extractelement <2 x double> %v, i64 %index2 %v3 = extractelement <2 x double> %v, i64 %index3 %ret0 = insertelement <4 x double> undef, double %v0, i32 0 %ret1 = insertelement <4 x double> %ret0, double %v1, i32 1 %ret2 = insertelement <4 x double> %ret1, double %v2, i32 2 %ret3 = insertelement <4 x double> %ret2, double %v3, i32 3 ret <4 x double> %ret3 } define <8 x float> @var_shuffle_v8f32_from_v4f32(<4 x float> %v, <8 x i32> %indices) unnamed_addr nounwind { ; XOP-LABEL: var_shuffle_v8f32_from_v4f32: ; XOP: # %bb.0: # %entry ; XOP-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm0, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v8f32_from_v4f32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm2 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; INT256-LABEL: var_shuffle_v8f32_from_v4f32: ; INT256: # %bb.0: # %entry ; INT256-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: retq entry: %tmp1 = extractelement <8 x i32> %indices, i32 0 %vecext2.8 = extractelement <4 x float> %v, i32 %tmp1 %tmp2 = extractelement <8 x i32> %indices, i32 1 %vecext2.9 = extractelement <4 x float> %v, i32 %tmp2 %tmp3 = extractelement <8 x i32> %indices, i32 2 %vecext2.10 = extractelement <4 x float> %v, i32 %tmp3 %tmp4 = extractelement <8 x i32> %indices, i32 3 %vecext2.11 = extractelement <4 x float> %v, i32 %tmp4 %tmp5 = extractelement <8 x i32> %indices, i32 4 %vecext2.12 = extractelement <4 x float> %v, i32 %tmp5 %tmp6 = extractelement <8 x i32> %indices, i32 5 %vecext2.13 = extractelement <4 x float> %v, i32 %tmp6 %tmp7 = extractelement <8 x i32> %indices, i32 6 %vecext2.14 = extractelement <4 x float> %v, i32 %tmp7 %tmp8 = extractelement <8 x i32> %indices, i32 7 %vecext2.15 = extractelement <4 x float> %v, i32 %tmp8 %tmp9 = insertelement <8 x float> undef, float %vecext2.8, i32 0 %tmp10 = insertelement <8 x float> %tmp9, float %vecext2.9, i32 1 %tmp11 = insertelement <8 x float> %tmp10, float %vecext2.10, i32 2 %tmp12 = insertelement <8 x float> %tmp11, float %vecext2.11, i32 3 %tmp13 = insertelement <8 x float> %tmp12, float %vecext2.12, i32 4 %tmp14 = insertelement <8 x float> %tmp13, float %vecext2.13, i32 5 %tmp15 = insertelement <8 x float> %tmp14, float %vecext2.14, i32 6 %tmp16 = insertelement <8 x float> %tmp15, float %vecext2.15, i32 7 ret <8 x float> %tmp16 } define <4 x i32> @var_shuffle_v4i32_from_v8i32(<8 x i32> %v, <4 x i32> %indices) unnamed_addr nounwind { ; XOP-LABEL: var_shuffle_v4i32_from_v8i32: ; XOP: # %bb.0: # %entry ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vpermil2ps $0, %xmm1, %xmm2, %xmm0, %xmm0 ; XOP-NEXT: vzeroupper ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4i32_from_v8i32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpermilps %xmm1, %xmm2, %xmm2 ; AVX1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; INT256-LABEL: var_shuffle_v4i32_from_v8i32: ; INT256: # %bb.0: # %entry ; INT256-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 ; INT256-NEXT: vpermps %ymm0, %ymm1, %ymm0 ; INT256-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; INT256-NEXT: vzeroupper ; INT256-NEXT: retq entry: %tmp1 = extractelement <4 x i32> %indices, i32 0 %vecext2.8 = extractelement <8 x i32> %v, i32 %tmp1 %tmp2 = extractelement <4 x i32> %indices, i32 1 %vecext2.9 = extractelement <8 x i32> %v, i32 %tmp2 %tmp3 = extractelement <4 x i32> %indices, i32 2 %vecext2.10 = extractelement <8 x i32> %v, i32 %tmp3 %tmp4 = extractelement <4 x i32> %indices, i32 3 %vecext2.11 = extractelement <8 x i32> %v, i32 %tmp4 %tmp9 = insertelement <4 x i32> undef, i32 %vecext2.8, i32 0 %tmp10 = insertelement <4 x i32> %tmp9, i32 %vecext2.9, i32 1 %tmp11 = insertelement <4 x i32> %tmp10, i32 %vecext2.10, i32 2 %tmp12 = insertelement <4 x i32> %tmp11, i32 %vecext2.11, i32 3 ret <4 x i32> %tmp12 } ; ; PR50356 - correctly adjust the indices vector to match the source/destination size. ; define <4 x i64> @PR50356(<4 x i64> %0, <4 x i32> %1, <4 x i64> %2) unnamed_addr nounwind { ; XOP-LABEL: PR50356: ; XOP: # %bb.0: ; XOP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vpermil2pd $0, %xmm1, %xmm3, %xmm0, %xmm0 ; XOP-NEXT: vpcomltq %xmm2, %xmm0, %xmm0 ; XOP-NEXT: vextractf128 $1, %ymm2, %xmm1 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; XOP-NEXT: vpcomltq %xmm1, %xmm2, %xmm1 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; XOP-NEXT: vmovapd {{.*#+}} ymm1 = [34,68,102,136] ; XOP-NEXT: vblendvpd %ymm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: PR50356: ; AVX1: # %bb.0: ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpermilpd %xmm1, %xmm3, %xmm3 ; AVX1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vblendvpd %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [34,68,102,136] ; AVX1-NEXT: vblendvpd %ymm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR50356: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp ; AVX2-NEXT: movq %rsp, %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $64, %rsp ; AVX2-NEXT: vmovd %xmm1, %eax ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: andl $3, %eax ; AVX2-NEXT: vpextrd $1, %xmm1, %ecx ; AVX2-NEXT: andl $3, %ecx ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vmovapd {{.*#+}} ymm1 = [34,68,102,136] ; AVX2-NEXT: vblendvpd %ymm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq ; ; AVX512-LABEL: PR50356: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %rbp ; AVX512-NEXT: movq %rsp, %rbp ; AVX512-NEXT: andq $-32, %rsp ; AVX512-NEXT: subq $64, %rsp ; AVX512-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vmovaps %ymm0, (%rsp) ; AVX512-NEXT: andl $3, %eax ; AVX512-NEXT: vpextrd $1, %xmm1, %ecx ; AVX512-NEXT: andl $3, %ecx ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512-NEXT: vpcmpgtq %zmm0, %zmm2, %k1 ; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [17,51,85,119] ; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [34,68,102,136] ; AVX512-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: movq %rbp, %rsp ; AVX512-NEXT: popq %rbp ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: PR50356: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: pushq %rbp ; AVX512VL-NEXT: movq %rsp, %rbp ; AVX512VL-NEXT: andq $-32, %rsp ; AVX512VL-NEXT: subq $64, %rsp ; AVX512VL-NEXT: vmovd %xmm1, %eax ; AVX512VL-NEXT: vmovaps %ymm0, (%rsp) ; AVX512VL-NEXT: andl $3, %eax ; AVX512VL-NEXT: vpextrd $1, %xmm1, %ecx ; AVX512VL-NEXT: andl $3, %ecx ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512VL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512VL-NEXT: vpcmpgtq %ymm0, %ymm2, %k1 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm0 = [34,68,102,136] ; AVX512VL-NEXT: vmovdqa64 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 {%k1} ; AVX512VL-NEXT: movq %rbp, %rsp ; AVX512VL-NEXT: popq %rbp ; AVX512VL-NEXT: retq %v9 = and <4 x i32> %1, <i32 7, i32 7, i32 7, i32 7> %v10 = extractelement <4 x i32> %v9, i32 0 %v11 = extractelement <4 x i64> %0, i32 %v10 %v14 = extractelement <4 x i32> %v9, i32 1 %v15 = extractelement <4 x i64> %0, i32 %v14 %v27 = insertelement <4 x i64> zeroinitializer, i64 %v11, i32 0 %v28 = insertelement <4 x i64> %v27, i64 %v15, i32 1 %v36 = icmp slt <4 x i64> %v28, %2 %v37 = select <4 x i1> %v36, <4 x i64> <i64 17, i64 51, i64 85, i64 119>, <4 x i64> <i64 34, i64 68, i64 102, i64 136> ; 17 68 102 136 ret <4 x i64> %v37 } define <4 x i64> @var_shuffle_v4i64_with_v16i8_indices(<4 x i64> %v, <16 x i8> %indices) unnamed_addr nounwind { ; XOP-LABEL: var_shuffle_v4i64_with_v16i8_indices: ; XOP: # %bb.0: ; XOP-NEXT: vpsrld $16, %xmm1, %xmm2 ; XOP-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero ; XOP-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero ; XOP-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3] ; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vpaddq %xmm2, %xmm2, %xmm2 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm3, %ymm0, %ymm0 ; XOP-NEXT: retq ; ; AVX1-LABEL: var_shuffle_v4i64_with_v16i8_indices: ; AVX1: # %bb.0: ; AVX1-NEXT: vpsrld $16, %xmm1, %xmm2 ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4 ; AVX1-NEXT: vpermilpd %ymm4, %ymm3, %ymm3 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: vblendvpd %ymm1, %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4i64_with_v16i8_indices: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero ; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2,2,2,2] ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm0[2,3,2,3] ; AVX2-NEXT: vpermilpd %ymm1, %ymm3, %ymm3 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1] ; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v4i64_with_v16i8_indices: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero ; AVX512-NEXT: vpermq %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq ; ; AVX512VL-LABEL: var_shuffle_v4i64_with_v16i8_indices: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero ; AVX512VL-NEXT: vpermq %ymm0, %ymm1, %ymm0 ; AVX512VL-NEXT: retq %index0 = extractelement <16 x i8> %indices, i32 0 %index1 = extractelement <16 x i8> %indices, i32 1 %index2 = extractelement <16 x i8> %indices, i32 2 %index3 = extractelement <16 x i8> %indices, i32 3 %v0 = extractelement <4 x i64> %v, i8 %index0 %v1 = extractelement <4 x i64> %v, i8 %index1 %v2 = extractelement <4 x i64> %v, i8 %index2 %v3 = extractelement <4 x i64> %v, i8 %index3 %ret0 = insertelement <4 x i64> undef, i64 %v0, i32 0 %ret1 = insertelement <4 x i64> %ret0, i64 %v1, i32 1 %ret2 = insertelement <4 x i64> %ret1, i64 %v2, i32 2 %ret3 = insertelement <4 x i64> %ret2, i64 %v3, i32 3 ret <4 x i64> %ret3 }