# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s # # bb.0 # def %0 # / \ # bb.1 bb.2 <--------- # def %1 %2=phi(%0, %3) | # \ / | # bb.3 ------------------- # %3=phi(%1, %2) # # This case test tile PHIs depend each other, and the its def block is # not visited yet. --- name: foo alignment: 16 tracksRegLiveness: true registers: - { id: 1, class: tile } - { id: 2, class: tile } - { id: 3, class: tile } - { id: 4, class: tile } - { id: 12, class: gr32 } - { id: 13, class: gr32 } - { id: 16, class: gr8 } - { id: 17, class: gr16 } - { id: 18, class: gr16 } - { id: 19, class: gr64_nosp } - { id: 22, class: gr32 } - { id: 23, class: gr16 } - { id: 24, class: gr16 } liveins: - { reg: '$edi', virtual-reg: '%12' } frameInfo: maxAlignment: 1 machineFunctionInfo: {} body: | ; CHECK-LABEL: name: foo ; CHECK: bb.0.entry: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) ; CHECK-NEXT: liveins: $edi ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[V_SET0_:%[0-9]+]]:vr128 = V_SET0 ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 0, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1, align 4) ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 16, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 16, align 4) ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 32, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 32, align 4) ; CHECK-NEXT: MOVUPSmr %stack.1, 1, $noreg, 48, $noreg, [[V_SET0_]] :: (store (s512) into %stack.1 + 48, align 4) ; CHECK-NEXT: MOV8mi %stack.1, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.1, align 4) ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr32 = COPY killed [[COPY]] ; CHECK-NEXT: %r0:gr16 = MOV16ri 64 ; CHECK-NEXT: %c0:gr16 = MOV16ri 16 ; CHECK-NEXT: PLDTILECFGV %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.1, align 4) ; CHECK-NEXT: [[LEA64r:%[0-9]+]]:gr64_nosp = LEA64r %stack.0, 1, $noreg, 0, $noreg ; CHECK-NEXT: %t0:tile = PTILEZEROV %r0, %c0 ; CHECK-NEXT: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: TILESTORED %stack.0, 1, killed [[MOV64ri]], 0, $noreg, %t0 :: (store (s8192) into %stack.0) ; CHECK-NEXT: CMP32ri8 [[COPY1]], 0, implicit-def $eflags ; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags ; CHECK-NEXT: TEST8ri [[SETCCr]], 1, implicit-def $eflags ; CHECK-NEXT: JCC_1 %bb.2, 5, implicit $eflags ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 64 ; CHECK-NEXT: [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 16 ; CHECK-NEXT: PLDTILECFGV %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.1, align 4) ; CHECK-NEXT: [[LEA64r1:%[0-9]+]]:gr64_nosp = LEA64r %stack.2, 1, $noreg, 0, $noreg ; CHECK-NEXT: [[PTILEZEROV:%[0-9]+]]:tile = PTILEZEROV [[MOV16ri1]], [[MOV16ri]] ; CHECK-NEXT: [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: TILESTORED %stack.2, 1, killed [[MOV64ri1]], 0, $noreg, [[PTILEZEROV]] :: (store (s8192) into %stack.2) ; CHECK-NEXT: JMP_1 %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr16 = PHI %c0, %bb.0, %24, %bb.3 ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gr16 = PHI %r0, %bb.0, %23, %bb.3 ; CHECK-NEXT: [[PHI2:%[0-9]+]]:gr64_nosp = PHI [[LEA64r]], %bb.0, %22, %bb.3 ; CHECK-NEXT: PLDTILECFGV %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.1, align 4) ; CHECK-NEXT: [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[PHI1]], [[PHI]], [[PHI2]], 1, killed [[MOV64ri2]], 0, $noreg ; CHECK-NEXT: [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: TILESTORED %stack.3, 1, killed [[MOV64ri3]], 0, $noreg, [[PTILELOADDV]] :: (store (s8192) into %stack.3) ; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64_nosp = MOV32ri64 32 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: successors: %bb.2(0x80000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gr16 = PHI [[MOV16ri]], %bb.1, [[PHI]], %bb.2 ; CHECK-NEXT: [[PHI4:%[0-9]+]]:gr16 = PHI [[MOV16ri1]], %bb.1, [[PHI1]], %bb.2 ; CHECK-NEXT: [[PHI5:%[0-9]+]]:gr64_nosp = PHI [[LEA64r1]], %bb.1, [[PHI2]], %bb.2 ; CHECK-NEXT: PLDTILECFGV %stack.1, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.1, align 4) ; CHECK-NEXT: [[MOV64ri4:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[PHI4]], [[PHI3]], [[PHI5]], 1, killed [[MOV64ri4]], 0, $noreg ; CHECK-NEXT: [[MOV64ri5:%[0-9]+]]:gr64_nosp = MOV64ri 64 ; CHECK-NEXT: TILESTORED %stack.4, 1, killed [[MOV64ri5]], 0, $noreg, [[PTILELOADDV1]] :: (store (s8192) into %stack.4) ; CHECK-NEXT: [[MOV16ri2:%[0-9]+]]:gr16 = MOV16ri 64 ; CHECK-NEXT: [[MOV16ri3:%[0-9]+]]:gr16 = MOV16ri 16 ; CHECK-NEXT: JMP_1 %bb.2 bb.0.entry: liveins: $edi %12:gr32 = COPY $edi %13:gr32 = COPY killed %12 %r0:gr16 = MOV16ri 64 %c0:gr16 = MOV16ri 16 %t0:tile = PTILEZEROV killed %r0, killed %c0 CMP32ri8 %13, 0, implicit-def $eflags %16:gr8 = SETCCr 4, implicit $eflags TEST8ri %16, 1, implicit-def $eflags JCC_1 %bb.2, 5, implicit $eflags bb.1: %17:gr16 = MOV16ri 64 %18:gr16 = MOV16ri 16 %1:tile = PTILEZEROV killed %18, killed %17 JMP_1 %bb.3 bb.2: %2:tile = PHI %t0, %bb.0, %3, %bb.3 %19:gr64_nosp = MOV32ri64 32 bb.3: %3:tile = PHI %1, %bb.1, %2, %bb.2 %23:gr16 = MOV16ri 64 %24:gr16 = MOV16ri 16 JMP_1 %bb.2