# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=ALL,NO_AVX512F # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=ALL,AVX512ALL,AVX512F # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=ALL,AVX512ALL,AVX512VL --- | define <8 x i32> @test_load_v8i32_noalign(<8 x i32>* %p1) { %r = load <8 x i32>, <8 x i32>* %p1, align 1 ret <8 x i32> %r } define <8 x i32> @test_load_v8i32_align(<8 x i32>* %p1) { %r = load <8 x i32>, <8 x i32>* %p1, align 32 ret <8 x i32> %r } define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) { store <8 x i32> %val, <8 x i32>* %p1, align 1 ret void } define void @test_store_v8i32_align(<8 x i32> %val, <8 x i32>* %p1) { store <8 x i32> %val, <8 x i32>* %p1, align 32 ret void } ... --- name: test_load_v8i32_noalign # ALL-LABEL: name: test_load_v8i32_noalign alignment: 16 legalized: true regBankSelected: true # NO_AVX512F: registers: # NO_AVX512F-NEXT: - { id: 0, class: gr64, preferred-register: '' } # NO_AVX512F-NEXT: - { id: 1, class: vr256, preferred-register: '' } # # AVX512ALL: registers: # AVX512ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' } # AVX512ALL-NEXT: - { id: 1, class: vr256x, preferred-register: '' } registers: - { id: 0, class: gpr } - { id: 1, class: vecr } # NO_AVX512F: %0:gr64 = COPY $rdi # NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1, align 1) # NO_AVX512F-NEXT: $ymm0 = COPY %1 # NO_AVX512F-NEXT: RET 0, implicit $ymm0 # # AVX512F: %0:gr64 = COPY $rdi # AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1, align 1) # AVX512F-NEXT: $ymm0 = COPY %1 # AVX512F-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:gr64 = COPY $rdi # AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1, align 1) # AVX512VL-NEXT: $ymm0 = COPY %1 # AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): liveins: $rdi %0(p0) = COPY $rdi %1(<8 x s32>) = G_LOAD %0(p0) :: (load (<8 x s32>) from %ir.p1, align 1) $ymm0 = COPY %1(<8 x s32>) RET 0, implicit $ymm0 ... --- name: test_load_v8i32_align # ALL-LABEL: name: test_load_v8i32_align alignment: 16 legalized: true regBankSelected: true registers: - { id: 0, class: gpr } - { id: 1, class: vecr } # NO_AVX512F: %0:gr64 = COPY $rdi # NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1) # NO_AVX512F-NEXT: $ymm0 = COPY %1 # NO_AVX512F-NEXT: RET 0, implicit $ymm0 # # AVX512F: %0:gr64 = COPY $rdi # AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1) # AVX512F-NEXT: $ymm0 = COPY %1 # AVX512F-NEXT: RET 0, implicit $ymm0 # # AVX512VL: %0:gr64 = COPY $rdi # AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, $noreg, 0, $noreg :: (load (<8 x s32>) from %ir.p1) # AVX512VL-NEXT: $ymm0 = COPY %1 # AVX512VL-NEXT: RET 0, implicit $ymm0 body: | bb.1 (%ir-block.0): liveins: $rdi %0(p0) = COPY $rdi %1(<8 x s32>) = G_LOAD %0(p0) :: (load (<8 x s32>) from %ir.p1) $ymm0 = COPY %1(<8 x s32>) RET 0, implicit $ymm0 ... --- name: test_store_v8i32_noalign # ALL-LABEL: name: test_store_v8i32_noalign alignment: 16 legalized: true regBankSelected: true # NO_AVX512F: registers: # NO_AVX512F-NEXT: - { id: 0, class: vr256, preferred-register: '' } # NO_AVX512F-NEXT: - { id: 1, class: gr64, preferred-register: '' } # # AVX512ALL: registers: # AVX512ALL-NEXT: - { id: 0, class: vr256x, preferred-register: '' } # AVX512ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } registers: - { id: 0, class: vecr } - { id: 1, class: gpr } # NO_AVX512F: %0:vr256 = COPY $ymm0 # NO_AVX512F-NEXT: %1:gr64 = COPY $rdi # NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1, align 1) # NO_AVX512F-NEXT: RET 0 # # AVX512F: %0:vr256x = COPY $ymm0 # AVX512F-NEXT: %1:gr64 = COPY $rdi # AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1, align 1) # AVX512F-NEXT: RET 0 # # AVX512VL: %0:vr256x = COPY $ymm0 # AVX512VL-NEXT: %1:gr64 = COPY $rdi # AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1, align 1) # AVX512VL-NEXT: RET 0 body: | bb.1 (%ir-block.0): liveins: $rdi, $ymm0 %0(<8 x s32>) = COPY $ymm0 %1(p0) = COPY $rdi G_STORE %0(<8 x s32>), %1(p0) :: (store (<8 x s32>) into %ir.p1, align 1) RET 0 ... --- name: test_store_v8i32_align # ALL-LABEL: name: test_store_v8i32_align alignment: 16 legalized: true regBankSelected: true # NO_AVX512F: registers: # NO_AVX512F-NEXT: - { id: 0, class: vr256, preferred-register: '' } # NO_AVX512F-NEXT: - { id: 1, class: gr64, preferred-register: '' } # # AVX512ALL: registers: # AVX512ALL-NEXT: - { id: 0, class: vr256x, preferred-register: '' } # AVX512ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } registers: - { id: 0, class: vecr } - { id: 1, class: gpr } # NO_AVX512F: %0:vr256 = COPY $ymm0 # NO_AVX512F-NEXT: %1:gr64 = COPY $rdi # NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1) # NO_AVX512F-NEXT: RET 0 # # AVX512F: %0:vr256x = COPY $ymm0 # AVX512F-NEXT: %1:gr64 = COPY $rdi # AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1) # AVX512F-NEXT: RET 0 # # AVX512VL: %0:vr256x = COPY $ymm0 # AVX512VL-NEXT: %1:gr64 = COPY $rdi # AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store (<8 x s32>) into %ir.p1) # AVX512VL-NEXT: RET 0 body: | bb.1 (%ir-block.0): liveins: $rdi, $ymm0 %0(<8 x s32>) = COPY $ymm0 %1(p0) = COPY $rdi G_STORE %0(<8 x s32>), %1(p0) :: (store (<8 x s32>) into %ir.p1) RET 0 ...