; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX1 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512 ; These patterns are produced by LoopVectorizer for interleaved loads. define void @load_i64_stride4_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { ; SSE-LABEL: load_i64_stride4_vf2: ; SSE: # %bb.0: ; SSE-NEXT: movaps (%rdi), %xmm0 ; SSE-NEXT: movaps 16(%rdi), %xmm1 ; SSE-NEXT: movaps 32(%rdi), %xmm2 ; SSE-NEXT: movaps 48(%rdi), %xmm3 ; SSE-NEXT: movaps %xmm0, %xmm4 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] ; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] ; SSE-NEXT: movaps %xmm4, (%rsi) ; SSE-NEXT: movaps %xmm0, (%rdx) ; SSE-NEXT: movaps %xmm2, (%rcx) ; SSE-NEXT: movaps %xmm1, (%r8) ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i64_stride4_vf2: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovaps 32(%rdi), %xmm0 ; AVX1-NEXT: vmovaps (%rdi), %xmm1 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; AVX1-NEXT: vmovaps 48(%rdi), %xmm1 ; AVX1-NEXT: vmovaps 16(%rdi), %xmm3 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm3[0],xmm1[0] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm1[1] ; AVX1-NEXT: vmovaps %xmm2, (%rsi) ; AVX1-NEXT: vmovaps %xmm0, (%rdx) ; AVX1-NEXT: vmovaps %xmm4, (%rcx) ; AVX1-NEXT: vmovaps %xmm1, (%r8) ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_i64_stride4_vf2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovaps (%rdi), %xmm0 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm1 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 ; AVX2-NEXT: vmovaps (%rdi), %ymm3 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm1[0],ymm3[2],ymm1[2] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm1[1],ymm3[3],ymm1[3] ; AVX2-NEXT: vmovaps %xmm2, (%rsi) ; AVX2-NEXT: vmovaps %xmm0, (%rdx) ; AVX2-NEXT: vextractf128 $1, %ymm4, (%rcx) ; AVX2-NEXT: vextractf128 $1, %ymm1, (%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_i64_stride4_vf2: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovaps (%rdi), %xmm0 ; AVX512-NEXT: vmovaps 32(%rdi), %xmm1 ; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0] ; AVX512-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX512-NEXT: vmovaps 32(%rdi), %ymm1 ; AVX512-NEXT: vmovaps (%rdi), %ymm3 ; AVX512-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm3[0],ymm1[0],ymm3[2],ymm1[2] ; AVX512-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm1[1],ymm3[3],ymm1[3] ; AVX512-NEXT: vmovaps %xmm2, (%rsi) ; AVX512-NEXT: vmovaps %xmm0, (%rdx) ; AVX512-NEXT: vextractf128 $1, %ymm4, (%rcx) ; AVX512-NEXT: vextractf128 $1, %ymm1, (%r8) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <8 x i64>, ptr %in.vec, align 32 %strided.vec0 = shufflevector <8 x i64> %wide.vec, <8 x i64> poison, <2 x i32> <i32 0, i32 4> %strided.vec1 = shufflevector <8 x i64> %wide.vec, <8 x i64> poison, <2 x i32> <i32 1, i32 5> %strided.vec2 = shufflevector <8 x i64> %wide.vec, <8 x i64> poison, <2 x i32> <i32 2, i32 6> %strided.vec3 = shufflevector <8 x i64> %wide.vec, <8 x i64> poison, <2 x i32> <i32 3, i32 7> store <2 x i64> %strided.vec0, ptr %out.vec0, align 32 store <2 x i64> %strided.vec1, ptr %out.vec1, align 32 store <2 x i64> %strided.vec2, ptr %out.vec2, align 32 store <2 x i64> %strided.vec3, ptr %out.vec3, align 32 ret void } define void @load_i64_stride4_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { ; SSE-LABEL: load_i64_stride4_vf4: ; SSE: # %bb.0: ; SSE-NEXT: movaps 112(%rdi), %xmm8 ; SSE-NEXT: movaps 80(%rdi), %xmm1 ; SSE-NEXT: movaps (%rdi), %xmm2 ; SSE-NEXT: movaps 16(%rdi), %xmm3 ; SSE-NEXT: movaps 32(%rdi), %xmm4 ; SSE-NEXT: movaps 48(%rdi), %xmm9 ; SSE-NEXT: movaps 96(%rdi), %xmm6 ; SSE-NEXT: movaps 64(%rdi), %xmm7 ; SSE-NEXT: movaps %xmm7, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0] ; SSE-NEXT: movaps %xmm2, %xmm5 ; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm4[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm6[1] ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1] ; SSE-NEXT: movaps %xmm1, %xmm4 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm8[0] ; SSE-NEXT: movaps %xmm3, %xmm6 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm9[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm8[1] ; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm9[1] ; SSE-NEXT: movaps %xmm0, 16(%rsi) ; SSE-NEXT: movaps %xmm5, (%rsi) ; SSE-NEXT: movaps %xmm7, 16(%rdx) ; SSE-NEXT: movaps %xmm2, (%rdx) ; SSE-NEXT: movaps %xmm4, 16(%rcx) ; SSE-NEXT: movaps %xmm6, (%rcx) ; SSE-NEXT: movaps %xmm1, 16(%r8) ; SSE-NEXT: movaps %xmm3, (%r8) ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i64_stride4_vf4: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovaps (%rdi), %ymm0 ; AVX1-NEXT: vmovaps 32(%rdi), %ymm1 ; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm2 ; AVX1-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm3 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],mem[2,3] ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] ; AVX1-NEXT: vmovaps %ymm4, (%rsi) ; AVX1-NEXT: vmovaps %ymm2, (%rdx) ; AVX1-NEXT: vmovaps %ymm5, (%rcx) ; AVX1-NEXT: vmovaps %ymm0, (%r8) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_i64_stride4_vf4: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovaps (%rdi), %ymm0 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 ; AVX2-NEXT: vmovaps 64(%rdi), %ymm2 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm3 ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] ; AVX2-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-NEXT: vmovaps %ymm4, (%rdx) ; AVX2-NEXT: vmovaps %ymm3, (%rcx) ; AVX2-NEXT: vmovaps %ymm0, (%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_i64_stride4_vf4: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX512-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX512-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX512-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1] ; AVX512-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1] ; AVX512-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] ; AVX512-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] ; AVX512-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX512-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] ; AVX512-NEXT: vmovdqa %ymm2, (%rsi) ; AVX512-NEXT: vmovdqa %ymm4, (%rdx) ; AVX512-NEXT: vmovdqa %ymm3, (%rcx) ; AVX512-NEXT: vmovdqa %ymm0, (%r8) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <16 x i64>, ptr %in.vec, align 32 %strided.vec0 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12> %strided.vec1 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13> %strided.vec2 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14> %strided.vec3 = shufflevector <16 x i64> %wide.vec, <16 x i64> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15> store <4 x i64> %strided.vec0, ptr %out.vec0, align 32 store <4 x i64> %strided.vec1, ptr %out.vec1, align 32 store <4 x i64> %strided.vec2, ptr %out.vec2, align 32 store <4 x i64> %strided.vec3, ptr %out.vec3, align 32 ret void } define void @load_i64_stride4_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { ; SSE-LABEL: load_i64_stride4_vf8: ; SSE: # %bb.0: ; SSE-NEXT: movaps 112(%rdi), %xmm8 ; SSE-NEXT: movaps 80(%rdi), %xmm14 ; SSE-NEXT: movaps 240(%rdi), %xmm9 ; SSE-NEXT: movaps 208(%rdi), %xmm13 ; SSE-NEXT: movaps 176(%rdi), %xmm10 ; SSE-NEXT: movaps 144(%rdi), %xmm12 ; SSE-NEXT: movaps (%rdi), %xmm4 ; SSE-NEXT: movaps 16(%rdi), %xmm11 ; SSE-NEXT: movaps 32(%rdi), %xmm15 ; SSE-NEXT: movaps 224(%rdi), %xmm2 ; SSE-NEXT: movaps 192(%rdi), %xmm7 ; SSE-NEXT: movaps 96(%rdi), %xmm3 ; SSE-NEXT: movaps 64(%rdi), %xmm6 ; SSE-NEXT: movaps 160(%rdi), %xmm1 ; SSE-NEXT: movaps 128(%rdi), %xmm5 ; SSE-NEXT: movaps %xmm5, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1] ; SSE-NEXT: movaps %xmm6, %xmm1 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm3[1] ; SSE-NEXT: movaps %xmm7, %xmm3 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm2[1] ; SSE-NEXT: movaps %xmm4, %xmm2 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm15[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm15[1] ; SSE-NEXT: movaps %xmm12, %xmm15 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm10[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm10[1] ; SSE-NEXT: movaps %xmm14, %xmm10 ; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm8[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm8[1] ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm13, %xmm8 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm9[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm9[1] ; SSE-NEXT: movaps 48(%rdi), %xmm9 ; SSE-NEXT: movaps %xmm11, %xmm14 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm9[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm9[1] ; SSE-NEXT: movaps %xmm3, 48(%rsi) ; SSE-NEXT: movaps %xmm1, 16(%rsi) ; SSE-NEXT: movaps %xmm0, 32(%rsi) ; SSE-NEXT: movaps %xmm2, (%rsi) ; SSE-NEXT: movaps %xmm7, 48(%rdx) ; SSE-NEXT: movaps %xmm6, 16(%rdx) ; SSE-NEXT: movaps %xmm4, (%rdx) ; SSE-NEXT: movaps %xmm5, 32(%rdx) ; SSE-NEXT: movaps %xmm10, 16(%rcx) ; SSE-NEXT: movaps %xmm8, 48(%rcx) ; SSE-NEXT: movaps %xmm15, 32(%rcx) ; SSE-NEXT: movaps %xmm14, (%rcx) ; SSE-NEXT: movaps %xmm13, 48(%r8) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%r8) ; SSE-NEXT: movaps %xmm12, 32(%r8) ; SSE-NEXT: movaps %xmm11, (%r8) ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i64_stride4_vf8: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovaps 224(%rdi), %ymm10 ; AVX1-NEXT: vmovaps 192(%rdi), %ymm11 ; AVX1-NEXT: vmovaps 96(%rdi), %ymm13 ; AVX1-NEXT: vmovaps 64(%rdi), %ymm14 ; AVX1-NEXT: vmovaps 32(%rdi), %xmm6 ; AVX1-NEXT: vmovaps (%rdi), %xmm7 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm6[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 160(%rdi), %xmm0 ; AVX1-NEXT: vmovaps 128(%rdi), %xmm3 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm0[0] ; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 224(%rdi), %xmm1 ; AVX1-NEXT: vmovaps 192(%rdi), %xmm2 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm2[0],xmm1[0] ; AVX1-NEXT: vmovaps 96(%rdi), %xmm4 ; AVX1-NEXT: vmovaps 64(%rdi), %xmm5 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm15 = xmm5[0],xmm4[0] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm0[1] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm7[1],xmm6[1] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm1[1] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm4[1] ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm14[0],ymm13[0],ymm14[2],ymm13[2] ; AVX1-NEXT: vmovaps 48(%rdi), %xmm5 ; AVX1-NEXT: vmovaps 16(%rdi), %xmm6 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm4[4,5,6,7] ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm11[0],ymm10[0],ymm11[2],ymm10[2] ; AVX1-NEXT: vmovaps 176(%rdi), %xmm1 ; AVX1-NEXT: vmovaps 144(%rdi), %xmm0 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm0[0],xmm1[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm14[1],ymm13[1],ymm14[3],ymm13[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm6[1],xmm5[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm11[1],ymm10[1],ymm11[3],ymm10[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7] ; AVX1-NEXT: vmovaps %xmm15, 16(%rsi) ; AVX1-NEXT: vmovaps %xmm12, 48(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm1, 32(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm1, (%rsi) ; AVX1-NEXT: vmovaps %xmm2, 16(%rdx) ; AVX1-NEXT: vmovaps %xmm3, 48(%rdx) ; AVX1-NEXT: vmovaps %xmm8, (%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm1, 32(%rdx) ; AVX1-NEXT: vmovaps %ymm7, 32(%rcx) ; AVX1-NEXT: vmovaps %ymm4, (%rcx) ; AVX1-NEXT: vmovaps %ymm0, 32(%r8) ; AVX1-NEXT: vmovaps %ymm5, (%r8) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_i64_stride4_vf8: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovaps 160(%rdi), %ymm9 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm10 ; AVX2-NEXT: vmovaps 32(%rdi), %ymm12 ; AVX2-NEXT: vmovaps (%rdi), %ymm13 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm14 ; AVX2-NEXT: vmovaps 64(%rdi), %ymm15 ; AVX2-NEXT: vmovaps 224(%rdi), %xmm6 ; AVX2-NEXT: vmovaps 192(%rdi), %xmm7 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm6[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 160(%rdi), %xmm0 ; AVX2-NEXT: vmovaps 128(%rdi), %xmm1 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 96(%rdi), %xmm2 ; AVX2-NEXT: vmovaps (%rdi), %xmm3 ; AVX2-NEXT: vmovaps 32(%rdi), %xmm4 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm5 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm6[1] ; AVX2-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm5[0],xmm2[0] ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm1[1],xmm0[1] ; AVX2-NEXT: vmovlhps {{.*#+}} xmm6 = xmm3[0],xmm4[0] ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm2[1] ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm15[0],ymm14[0],ymm15[2],ymm14[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm13[0],ymm12[0],ymm13[2],ymm12[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm5[2,3],ymm4[2,3] ; AVX2-NEXT: vmovaps 224(%rdi), %ymm5 ; AVX2-NEXT: vmovaps 192(%rdi), %ymm0 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm0[0],ymm5[0],ymm0[2],ymm5[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm10[0],ymm9[0],ymm10[2],ymm9[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm11[2,3],ymm1[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm11 = ymm15[1],ymm14[1],ymm15[3],ymm14[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm12 = ymm13[1],ymm12[1],ymm13[3],ymm12[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm12[2,3],ymm11[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm5[1],ymm0[3],ymm5[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm10[1],ymm9[1],ymm10[3],ymm9[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm0[2,3] ; AVX2-NEXT: vmovaps %xmm7, 16(%rsi) ; AVX2-NEXT: vmovaps %xmm6, (%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm5, 32(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm5, 48(%rsi) ; AVX2-NEXT: vmovaps %xmm3, (%rdx) ; AVX2-NEXT: vmovaps %xmm2, 16(%rdx) ; AVX2-NEXT: vmovaps %xmm8, 32(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm2, 48(%rdx) ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx) ; AVX2-NEXT: vmovaps %ymm4, (%rcx) ; AVX2-NEXT: vmovaps %ymm0, 32(%r8) ; AVX2-NEXT: vmovaps %ymm11, (%r8) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_i64_stride4_vf8: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 ; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm2 ; AVX512-NEXT: vmovdqu64 192(%rdi), %zmm3 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,0,4,8,12] ; AVX512-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm5 ; AVX512-NEXT: vpermt2q %zmm3, %zmm4, %zmm5 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,1,5,9,13] ; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm6 ; AVX512-NEXT: vpermt2q %zmm3, %zmm5, %zmm6 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm5 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,2,6,10,14] ; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm7 ; AVX512-NEXT: vpermt2q %zmm3, %zmm6, %zmm7 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm6 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,3,7,11,15] ; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vpermt2q %zmm3, %zmm7, %zmm2 ; AVX512-NEXT: vpermt2q %zmm1, %zmm7, %zmm0 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] ; AVX512-NEXT: vmovdqu64 %zmm4, (%rsi) ; AVX512-NEXT: vmovdqu64 %zmm5, (%rdx) ; AVX512-NEXT: vmovdqu64 %zmm6, (%rcx) ; AVX512-NEXT: vmovdqu64 %zmm0, (%r8) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <32 x i64>, ptr %in.vec, align 32 %strided.vec0 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> %strided.vec1 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29> %strided.vec2 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30> %strided.vec3 = shufflevector <32 x i64> %wide.vec, <32 x i64> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31> store <8 x i64> %strided.vec0, ptr %out.vec0, align 32 store <8 x i64> %strided.vec1, ptr %out.vec1, align 32 store <8 x i64> %strided.vec2, ptr %out.vec2, align 32 store <8 x i64> %strided.vec3, ptr %out.vec3, align 32 ret void } define void @load_i64_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr %out.vec2, ptr %out.vec3) nounwind { ; SSE-LABEL: load_i64_stride4_vf16: ; SSE: # %bb.0: ; SSE-NEXT: subq $152, %rsp ; SSE-NEXT: movaps (%rdi), %xmm8 ; SSE-NEXT: movaps 416(%rdi), %xmm13 ; SSE-NEXT: movaps 384(%rdi), %xmm7 ; SSE-NEXT: movaps 288(%rdi), %xmm15 ; SSE-NEXT: movaps 256(%rdi), %xmm9 ; SSE-NEXT: movaps 160(%rdi), %xmm2 ; SSE-NEXT: movaps 128(%rdi), %xmm10 ; SSE-NEXT: movaps 480(%rdi), %xmm3 ; SSE-NEXT: movaps 448(%rdi), %xmm11 ; SSE-NEXT: movaps 352(%rdi), %xmm4 ; SSE-NEXT: movaps 320(%rdi), %xmm12 ; SSE-NEXT: movaps 224(%rdi), %xmm5 ; SSE-NEXT: movaps 192(%rdi), %xmm14 ; SSE-NEXT: movaps 96(%rdi), %xmm6 ; SSE-NEXT: movaps 64(%rdi), %xmm0 ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm6[0] ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm14, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm5[1] ; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm12, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm4[1] ; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm11, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm3[1] ; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm10, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm2[1] ; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm9, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm15[0] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm15[1] ; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps %xmm7, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm13[0] ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm13[1] ; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps 32(%rdi), %xmm0 ; SSE-NEXT: movaps %xmm8, %xmm15 ; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm0[1] ; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps 112(%rdi), %xmm0 ; SSE-NEXT: movaps 80(%rdi), %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm2 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0] ; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movaps 240(%rdi), %xmm0 ; SSE-NEXT: movaps 208(%rdi), %xmm12 ; SSE-NEXT: movaps %xmm12, %xmm14 ; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1] ; SSE-NEXT: movaps 368(%rdi), %xmm0 ; SSE-NEXT: movaps 336(%rdi), %xmm10 ; SSE-NEXT: movaps %xmm10, %xmm13 ; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm0[1] ; SSE-NEXT: movaps 496(%rdi), %xmm0 ; SSE-NEXT: movaps 464(%rdi), %xmm9 ; SSE-NEXT: movaps %xmm9, %xmm11 ; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1] ; SSE-NEXT: movaps 176(%rdi), %xmm0 ; SSE-NEXT: movaps 144(%rdi), %xmm7 ; SSE-NEXT: movaps %xmm7, %xmm8 ; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm0[1] ; SSE-NEXT: movaps 304(%rdi), %xmm0 ; SSE-NEXT: movaps 272(%rdi), %xmm5 ; SSE-NEXT: movaps %xmm5, %xmm6 ; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1] ; SSE-NEXT: movaps 432(%rdi), %xmm0 ; SSE-NEXT: movaps 400(%rdi), %xmm1 ; SSE-NEXT: movaps %xmm1, %xmm3 ; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE-NEXT: movaps 16(%rdi), %xmm2 ; SSE-NEXT: movaps 48(%rdi), %xmm0 ; SSE-NEXT: movaps %xmm2, %xmm4 ; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0] ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 96(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 64(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%rsi) ; SSE-NEXT: movaps %xmm15, (%rsi) ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 112(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 80(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 48(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%rsi) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 96(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 64(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, (%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 112(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 80(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 48(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%rdx) ; SSE-NEXT: movaps %xmm3, 96(%rcx) ; SSE-NEXT: movaps %xmm6, 64(%rcx) ; SSE-NEXT: movaps %xmm8, 32(%rcx) ; SSE-NEXT: movaps %xmm4, (%rcx) ; SSE-NEXT: movaps %xmm11, 112(%rcx) ; SSE-NEXT: movaps %xmm13, 80(%rcx) ; SSE-NEXT: movaps %xmm14, 48(%rcx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%rcx) ; SSE-NEXT: movaps %xmm1, 96(%r8) ; SSE-NEXT: movaps %xmm5, 64(%r8) ; SSE-NEXT: movaps %xmm7, 32(%r8) ; SSE-NEXT: movaps %xmm2, (%r8) ; SSE-NEXT: movaps %xmm9, 112(%r8) ; SSE-NEXT: movaps %xmm10, 80(%r8) ; SSE-NEXT: movaps %xmm12, 48(%r8) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%r8) ; SSE-NEXT: addq $152, %rsp ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i64_stride4_vf16: ; AVX1: # %bb.0: ; AVX1-NEXT: subq $296, %rsp # imm = 0x128 ; AVX1-NEXT: vmovaps 224(%rdi), %xmm8 ; AVX1-NEXT: vmovaps 192(%rdi), %xmm9 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm9[0],xmm8[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 96(%rdi), %xmm10 ; AVX1-NEXT: vmovaps 64(%rdi), %xmm11 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm11[0],xmm10[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 352(%rdi), %xmm4 ; AVX1-NEXT: vmovaps 320(%rdi), %xmm5 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm4[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 160(%rdi), %xmm12 ; AVX1-NEXT: vmovaps 128(%rdi), %xmm1 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm12[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 32(%rdi), %xmm2 ; AVX1-NEXT: vmovaps (%rdi), %xmm3 ; AVX1-NEXT: vmovaps 288(%rdi), %xmm6 ; AVX1-NEXT: vmovaps 256(%rdi), %xmm0 ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1] ; AVX1-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm6[0] ; AVX1-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm9[1],xmm8[1] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 416(%rdi), %xmm0 ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm12[1] ; AVX1-NEXT: vmovaps %xmm1, (%rsp) # 16-byte Spill ; AVX1-NEXT: vmovaps 480(%rdi), %xmm1 ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm11[1],xmm10[1] ; AVX1-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 448(%rdi), %xmm4 ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1] ; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm4[0],xmm1[0] ; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm1[1] ; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 384(%rdi), %xmm1 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmovaps 224(%rdi), %ymm13 ; AVX1-NEXT: vmovaps 192(%rdi), %ymm12 ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm12[0],ymm13[0],ymm12[2],ymm13[2] ; AVX1-NEXT: vmovaps 176(%rdi), %xmm11 ; AVX1-NEXT: vmovaps 144(%rdi), %xmm10 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm10[0],xmm11[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vmovaps 352(%rdi), %ymm0 ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vmovaps 320(%rdi), %ymm1 ; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] ; AVX1-NEXT: vmovaps 304(%rdi), %xmm15 ; AVX1-NEXT: vmovaps 272(%rdi), %xmm14 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm14[0],xmm15[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm6[4,5,6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vmovaps 480(%rdi), %ymm8 ; AVX1-NEXT: vmovaps 448(%rdi), %ymm7 ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm7[0],ymm8[0],ymm7[2],ymm8[2] ; AVX1-NEXT: vmovaps 432(%rdi), %xmm5 ; AVX1-NEXT: vmovaps 400(%rdi), %xmm4 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm4[0],xmm5[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm6[4,5,6,7] ; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vmovaps 96(%rdi), %ymm3 ; AVX1-NEXT: vmovaps 64(%rdi), %ymm2 ; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] ; AVX1-NEXT: vmovaps 48(%rdi), %xmm1 ; AVX1-NEXT: vmovaps 16(%rdi), %xmm0 ; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm0[0],xmm1[0] ; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm12[1],ymm13[1],ymm12[3],ymm13[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm10[1],xmm11[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm9[4,5,6,7] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm7[1],ymm8[1],ymm7[3],ymm8[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm4[1],xmm5[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload ; AVX1-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload ; AVX1-NEXT: # ymm2 = ymm2[1],mem[1],ymm2[3],mem[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm14[1],xmm15[1] ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 112(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 96(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 64(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, (%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 32(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 80(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 16(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 48(%rsi) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 96(%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 112(%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, (%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 16(%rdx) ; AVX1-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 32(%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 48(%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 64(%rdx) ; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-NEXT: vmovaps %xmm3, 80(%rdx) ; AVX1-NEXT: vmovaps %ymm6, (%rcx) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm3, 96(%rcx) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm3, 64(%rcx) ; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX1-NEXT: vmovaps %ymm3, 32(%rcx) ; AVX1-NEXT: vmovaps %ymm2, 64(%r8) ; AVX1-NEXT: vmovaps %ymm1, 96(%r8) ; AVX1-NEXT: vmovaps %ymm0, (%r8) ; AVX1-NEXT: vmovaps %ymm10, 32(%r8) ; AVX1-NEXT: addq $296, %rsp # imm = 0x128 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_i64_stride4_vf16: ; AVX2: # %bb.0: ; AVX2-NEXT: subq $296, %rsp # imm = 0x128 ; AVX2-NEXT: vmovaps 224(%rdi), %xmm11 ; AVX2-NEXT: vmovaps 192(%rdi), %xmm15 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm15[0],xmm11[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 96(%rdi), %xmm7 ; AVX2-NEXT: vmovaps (%rdi), %xmm9 ; AVX2-NEXT: vmovaps 64(%rdi), %xmm4 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm4[0],xmm7[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 352(%rdi), %xmm8 ; AVX2-NEXT: vmovaps 320(%rdi), %xmm6 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm8[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 160(%rdi), %xmm3 ; AVX2-NEXT: vmovaps 128(%rdi), %xmm2 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm2[0],xmm3[0] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 288(%rdi), %xmm1 ; AVX2-NEXT: vmovaps 256(%rdi), %xmm5 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm1[0] ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm6[1],xmm8[1] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 416(%rdi), %xmm0 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm1[1] ; AVX2-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 384(%rdi), %xmm1 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm15[1],xmm11[1] ; AVX2-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 480(%rdi), %xmm11 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1] ; AVX2-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 448(%rdi), %xmm2 ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm4[1],xmm7[1] ; AVX2-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovlhps {{.*#+}} xmm3 = xmm2[0],xmm11[0] ; AVX2-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm11[1] ; AVX2-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 32(%rdi), %xmm0 ; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm9[0],xmm0[0] ; AVX2-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm9[1],xmm0[1] ; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-NEXT: vmovaps 160(%rdi), %ymm9 ; AVX2-NEXT: vmovaps 128(%rdi), %ymm8 ; AVX2-NEXT: vmovaps 224(%rdi), %ymm5 ; AVX2-NEXT: vmovaps 192(%rdi), %ymm4 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm8[0],ymm9[0],ymm8[2],ymm9[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm11[2,3],ymm0[2,3] ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps 288(%rdi), %ymm0 ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps 256(%rdi), %ymm1 ; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps 352(%rdi), %ymm14 ; AVX2-NEXT: vmovaps 320(%rdi), %ymm13 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm13[0],ymm14[0],ymm13[2],ymm14[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm11[2,3] ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps 416(%rdi), %ymm10 ; AVX2-NEXT: vmovaps 384(%rdi), %ymm7 ; AVX2-NEXT: vmovaps 480(%rdi), %ymm6 ; AVX2-NEXT: vmovaps 448(%rdi), %ymm3 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm3[0],ymm6[0],ymm3[2],ymm6[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm11 = ymm7[0],ymm10[0],ymm7[2],ymm10[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm11[2,3],ymm15[2,3] ; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-NEXT: vmovaps 32(%rdi), %ymm11 ; AVX2-NEXT: vmovaps (%rdi), %ymm2 ; AVX2-NEXT: vmovaps 96(%rdi), %ymm1 ; AVX2-NEXT: vmovaps 64(%rdi), %ymm0 ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX2-NEXT: vunpcklpd {{.*#+}} ymm15 = ymm2[0],ymm11[0],ymm2[2],ymm11[2] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm15[2,3],ymm12[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm8[1],ymm9[1],ymm8[3],ymm9[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm4[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm2[1],ymm11[1],ymm2[3],ymm11[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm6[1],ymm3[3],ymm6[3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm7[1],ymm10[1],ymm7[3],ymm10[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3] ; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm13[1],ymm14[1],ymm13[3],ymm14[3] ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX2-NEXT: vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload ; AVX2-NEXT: # ymm3 = ymm3[1],mem[1],ymm3[3],mem[3] ; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[2,3],ymm2[2,3] ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 112(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 96(%rsi) ; AVX2-NEXT: vmovaps (%rsp), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 64(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, (%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 32(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 80(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 16(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 48(%rsi) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 96(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 112(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, (%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 16(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 32(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 48(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 64(%rdx) ; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-NEXT: vmovaps %xmm3, 80(%rdx) ; AVX2-NEXT: vmovaps %ymm12, (%rcx) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm3, 96(%rcx) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm3, 64(%rcx) ; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX2-NEXT: vmovaps %ymm3, 32(%rcx) ; AVX2-NEXT: vmovaps %ymm2, 64(%r8) ; AVX2-NEXT: vmovaps %ymm1, 96(%r8) ; AVX2-NEXT: vmovaps %ymm0, (%r8) ; AVX2-NEXT: vmovaps %ymm5, 32(%r8) ; AVX2-NEXT: addq $296, %rsp # imm = 0x128 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: load_i64_stride4_vf16: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 ; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm2 ; AVX512-NEXT: vmovdqu64 192(%rdi), %zmm3 ; AVX512-NEXT: vmovdqu64 320(%rdi), %zmm4 ; AVX512-NEXT: vmovdqu64 256(%rdi), %zmm5 ; AVX512-NEXT: vmovdqu64 448(%rdi), %zmm6 ; AVX512-NEXT: vmovdqu64 384(%rdi), %zmm7 ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [0,4,8,12,0,4,8,12] ; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm9 ; AVX512-NEXT: vpermt2q %zmm6, %zmm8, %zmm9 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm10 ; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm10 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm9[4,5,6,7] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm10 ; AVX512-NEXT: vpermt2q %zmm3, %zmm8, %zmm10 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm8 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm10[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [1,5,9,13,1,5,9,13] ; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm11 ; AVX512-NEXT: vpermt2q %zmm6, %zmm10, %zmm11 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm12 ; AVX512-NEXT: vpermt2q %zmm4, %zmm10, %zmm12 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm11 = zmm12[0,1,2,3],zmm11[4,5,6,7] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm12 ; AVX512-NEXT: vpermt2q %zmm3, %zmm10, %zmm12 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm10 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm12 = [2,6,10,14,2,6,10,14] ; AVX512-NEXT: # zmm12 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vmovdqa64 %zmm7, %zmm13 ; AVX512-NEXT: vpermt2q %zmm6, %zmm12, %zmm13 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm14 ; AVX512-NEXT: vpermt2q %zmm4, %zmm12, %zmm14 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm14[0,1,2,3],zmm13[4,5,6,7] ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm14 ; AVX512-NEXT: vpermt2q %zmm3, %zmm12, %zmm14 ; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm12 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],zmm14[4,5,6,7] ; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm14 = [3,7,11,15,3,7,11,15] ; AVX512-NEXT: # zmm14 = mem[0,1,2,3,0,1,2,3] ; AVX512-NEXT: vpermt2q %zmm6, %zmm14, %zmm7 ; AVX512-NEXT: vpermt2q %zmm4, %zmm14, %zmm5 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm7[4,5,6,7] ; AVX512-NEXT: vpermt2q %zmm3, %zmm14, %zmm2 ; AVX512-NEXT: vpermt2q %zmm1, %zmm14, %zmm0 ; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] ; AVX512-NEXT: vmovdqu64 %zmm9, 64(%rsi) ; AVX512-NEXT: vmovdqu64 %zmm8, (%rsi) ; AVX512-NEXT: vmovdqu64 %zmm11, 64(%rdx) ; AVX512-NEXT: vmovdqu64 %zmm10, (%rdx) ; AVX512-NEXT: vmovdqu64 %zmm13, 64(%rcx) ; AVX512-NEXT: vmovdqu64 %zmm12, (%rcx) ; AVX512-NEXT: vmovdqu64 %zmm4, 64(%r8) ; AVX512-NEXT: vmovdqu64 %zmm0, (%r8) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <64 x i64>, ptr %in.vec, align 32 %strided.vec0 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> %strided.vec1 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> %strided.vec2 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62> %strided.vec3 = shufflevector <64 x i64> %wide.vec, <64 x i64> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63> store <16 x i64> %strided.vec0, ptr %out.vec0, align 32 store <16 x i64> %strided.vec1, ptr %out.vec1, align 32 store <16 x i64> %strided.vec2, ptr %out.vec2, align 32 store <16 x i64> %strided.vec3, ptr %out.vec3, align 32 ret void }