Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-- -run-pass=fastpretileconfig -o - %s | FileCheck %s

--- |
  target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
  target triple = "x86_64-unknown-unknown"

  @buf = dso_local global [1024 x i8] zeroinitializer, align 16
  @buf2 = dso_local global [1024 x i8] zeroinitializer, align 16

  define dso_local void @test_api(i32 %cond, i16 signext %row, i16 signext %col) local_unnamed_addr #0 {
  entry:
    %tobool.not = icmp eq i32 %cond, 0
    br i1 %tobool.not, label %if.else, label %if.then

  if.then:                                          ; preds = %entry
    %0 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
    %1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
    %2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
    br label %if.end

  if.else:                                          ; preds = %entry
    %3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
    %4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
    %5 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
    br label %if.end

  if.end:                                           ; preds = %if.else, %if.then
    %a.sroa.1094.0.in = phi x86_amx [ %3, %if.else ], [ %0, %if.then ]
    %b.sroa.1069.0.in = phi x86_amx [ %4, %if.else ], [ %1, %if.then ]
    %c.sroa.1044.0.in = phi x86_amx [ %5, %if.else ], [ %2, %if.then ]
    %6 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %row, i16 %col, i16 8, x86_amx %c.sroa.1044.0.in, x86_amx %a.sroa.1094.0.in, x86_amx %b.sroa.1069.0.in)
    tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %6)
    ret void
  }

  declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) #1
  declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) #1
  declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) #1

  attributes #0 = { "target-features"="+amx-int8,+avx512f" }
  attributes #1 = { nounwind "target-features"="+amx-int8,+avx512f" }

...
---
name:            test_api
alignment:       16
tracksRegLiveness: true
registers:
  - { id: 0, class: tile }
  - { id: 1, class: tile }
  - { id: 2, class: tile }
  - { id: 3, class: tile }
  - { id: 4, class: tile }
  - { id: 5, class: tile }
  - { id: 6, class: tile }
  - { id: 7, class: tile }
  - { id: 8, class: tile }
  - { id: 9, class: gr32 }
  - { id: 10, class: gr32 }
  - { id: 11, class: gr32 }
  - { id: 12, class: gr16 }
  - { id: 13, class: gr16 }
  - { id: 14, class: gr64 }
  - { id: 15, class: gr64_nosp }
  - { id: 16, class: gr16 }
  - { id: 17, class: gr64 }
  - { id: 18, class: gr64_nosp }
  - { id: 19, class: gr16 }
  - { id: 20, class: gr16 }
  - { id: 21, class: tile }
  - { id: 22, class: gr64 }
  - { id: 23, class: gr64_nosp }
liveins:
  - { reg: '$edi', virtual-reg: '%9' }
  - { reg: '$esi', virtual-reg: '%10' }
  - { reg: '$edx', virtual-reg: '%11' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: test_api
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $edi, $esi, $edx
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[AVX512_512_SET0_:%[0-9]+]]:vr512 = AVX512_512_SET0
  ; CHECK-NEXT:   VMOVUPSZmr %stack.3, 1, $noreg, 0, $noreg, [[AVX512_512_SET0_]] :: (store (s512) into %stack.3, align 4)
  ; CHECK-NEXT:   MOV8mi %stack.3, 1, $noreg, 0, $noreg, 1 :: (store (s512) into %stack.3, align 4)
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr32 = COPY killed $edx
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr32 = COPY killed $esi
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gr32 = COPY killed $edi
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gr16 = COPY killed [[COPY]].sub_16bit
  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gr16 = COPY killed [[COPY1]].sub_16bit
  ; CHECK-NEXT:   TEST32rr killed [[COPY2]], [[COPY2]], implicit-def $eflags
  ; CHECK-NEXT:   JCC_1 %bb.2, 4, implicit killed $eflags
  ; CHECK-NEXT:   JMP_1 %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.if.then:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[MOV32ri64_:%[0-9]+]]:gr64 = MOV32ri64 @buf
  ; CHECK-NEXT:   [[MOV32ri64_1:%[0-9]+]]:gr64_nosp = MOV32ri64 32
  ; CHECK-NEXT:   [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 8
  ; CHECK-NEXT:   PLDTILECFGV %stack.3, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.3, align 4)
  ; CHECK-NEXT:   [[LEA64r:%[0-9]+]]:gr64_nosp = LEA64r %stack.2, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV:%[0-9]+]]:tile = PTILELOADDV [[COPY4]], [[MOV16ri]], [[MOV32ri64_]], 1, [[MOV32ri64_1]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.2, 1, killed [[MOV64ri]], 0, $noreg, [[PTILELOADDV]] :: (store (s8192) into %stack.2)
  ; CHECK-NEXT:   [[LEA64r1:%[0-9]+]]:gr64_nosp = LEA64r %stack.1, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV1:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri]], [[COPY3]], [[MOV32ri64_]], 1, [[MOV32ri64_1]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri1:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.1, 1, killed [[MOV64ri1]], 0, $noreg, [[PTILELOADDV1]] :: (store (s8192) into %stack.1)
  ; CHECK-NEXT:   [[LEA64r2:%[0-9]+]]:gr64_nosp = LEA64r %stack.0, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV2:%[0-9]+]]:tile = PTILELOADDV [[COPY4]], [[COPY3]], killed [[MOV32ri64_]], 1, killed [[MOV32ri64_1]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri2:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.0, 1, killed [[MOV64ri2]], 0, $noreg, [[PTILELOADDV2]] :: (store (s8192) into %stack.0)
  ; CHECK-NEXT:   JMP_1 %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.if.else:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[MOV32ri64_2:%[0-9]+]]:gr64 = MOV32ri64 @buf2
  ; CHECK-NEXT:   [[MOV32ri64_3:%[0-9]+]]:gr64_nosp = MOV32ri64 32
  ; CHECK-NEXT:   [[MOV16ri1:%[0-9]+]]:gr16 = MOV16ri 8
  ; CHECK-NEXT:   PLDTILECFGV %stack.3, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.3, align 4)
  ; CHECK-NEXT:   [[LEA64r3:%[0-9]+]]:gr64_nosp = LEA64r %stack.6, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV3:%[0-9]+]]:tile = PTILELOADDV [[COPY4]], [[MOV16ri1]], [[MOV32ri64_2]], 1, [[MOV32ri64_3]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri3:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.6, 1, killed [[MOV64ri3]], 0, $noreg, [[PTILELOADDV3]] :: (store (s8192) into %stack.6)
  ; CHECK-NEXT:   [[LEA64r4:%[0-9]+]]:gr64_nosp = LEA64r %stack.5, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV4:%[0-9]+]]:tile = PTILELOADDV [[MOV16ri1]], [[COPY3]], [[MOV32ri64_2]], 1, [[MOV32ri64_3]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri4:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.5, 1, killed [[MOV64ri4]], 0, $noreg, [[PTILELOADDV4]] :: (store (s8192) into %stack.5)
  ; CHECK-NEXT:   [[LEA64r5:%[0-9]+]]:gr64_nosp = LEA64r %stack.4, 1, $noreg, 0, $noreg
  ; CHECK-NEXT:   [[PTILELOADDV5:%[0-9]+]]:tile = PTILELOADDV [[COPY4]], [[COPY3]], killed [[MOV32ri64_2]], 1, killed [[MOV32ri64_3]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri5:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   TILESTORED %stack.4, 1, killed [[MOV64ri5]], 0, $noreg, [[PTILELOADDV5]] :: (store (s8192) into %stack.4)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.if.end:
  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gr16 = PHI [[MOV16ri]], %bb.1, [[MOV16ri1]], %bb.2
  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gr16 = PHI [[COPY4]], %bb.1, [[COPY4]], %bb.2
  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:gr64_nosp = PHI [[LEA64r]], %bb.1, [[LEA64r3]], %bb.2
  ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:gr16 = PHI [[COPY3]], %bb.1, [[COPY3]], %bb.2
  ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:gr16 = PHI [[MOV16ri]], %bb.1, [[MOV16ri1]], %bb.2
  ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:gr64_nosp = PHI [[LEA64r1]], %bb.1, [[LEA64r4]], %bb.2
  ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:gr16 = PHI [[COPY3]], %bb.1, [[COPY3]], %bb.2
  ; CHECK-NEXT:   [[PHI7:%[0-9]+]]:gr16 = PHI [[COPY4]], %bb.1, [[COPY4]], %bb.2
  ; CHECK-NEXT:   [[PHI8:%[0-9]+]]:gr64_nosp = PHI [[LEA64r2]], %bb.1, [[LEA64r5]], %bb.2
  ; CHECK-NEXT:   PLDTILECFGV %stack.3, 1, $noreg, 0, $noreg, implicit-def $tmm0, implicit-def $tmm1, implicit-def $tmm2, implicit-def $tmm3, implicit-def $tmm4, implicit-def $tmm5, implicit-def $tmm6, implicit-def $tmm7 :: (load (s512) from %stack.3, align 4)
  ; CHECK-NEXT:   [[MOV64ri6:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   [[PTILELOADDV6:%[0-9]+]]:tile = PTILELOADDV [[PHI1]], [[PHI]], [[PHI2]], 1, killed [[MOV64ri6]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri7:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   [[PTILELOADDV7:%[0-9]+]]:tile = PTILELOADDV [[PHI4]], [[PHI3]], [[PHI5]], 1, killed [[MOV64ri7]], 0, $noreg
  ; CHECK-NEXT:   [[MOV64ri8:%[0-9]+]]:gr64_nosp = MOV64ri 64
  ; CHECK-NEXT:   [[PTILELOADDV8:%[0-9]+]]:tile = PTILELOADDV [[PHI7]], [[PHI6]], [[PHI8]], 1, killed [[MOV64ri8]], 0, $noreg
  ; CHECK-NEXT:   [[MOV16ri2:%[0-9]+]]:gr16 = MOV16ri 8
  ; CHECK-NEXT:   [[PTDPBSSDV:%[0-9]+]]:tile = PTDPBSSDV [[COPY4]], [[COPY3]], killed [[MOV16ri2]], killed [[PTILELOADDV8]], killed [[PTILELOADDV6]], killed [[PTILELOADDV7]]
  ; CHECK-NEXT:   [[MOV32ri64_4:%[0-9]+]]:gr64 = MOV32ri64 @buf
  ; CHECK-NEXT:   [[MOV32ri64_5:%[0-9]+]]:gr64_nosp = MOV32ri64 32
  ; CHECK-NEXT:   PTILESTOREDV killed [[COPY4]], killed [[COPY3]], killed [[MOV32ri64_4]], 1, killed [[MOV32ri64_5]], 0, $noreg, killed [[PTDPBSSDV]]
  ; CHECK-NEXT:   RET 0
  bb.0.entry:
    successors: %bb.2(0x30000000), %bb.1(0x50000000)
    liveins: $edi, $esi, $edx


    %11:gr32 = COPY killed $edx
    %10:gr32 = COPY killed $esi
    %9:gr32 = COPY killed $edi
    %13:gr16 = COPY killed %11.sub_16bit
    %12:gr16 = COPY killed %10.sub_16bit
    TEST32rr killed %9, %9, implicit-def $eflags
    JCC_1 %bb.2, 4, implicit killed $eflags
    JMP_1 %bb.1

  bb.1.if.then:
    %14:gr64 = MOV32ri64 @buf
    %15:gr64_nosp = MOV32ri64 32
    %16:gr16 = MOV16ri 8
    %0:tile = PTILELOADDV %12, %16, %14, 1, %15, 0, $noreg
    %1:tile = PTILELOADDV killed %16, %13, %14, 1, %15, 0, $noreg
    %2:tile = PTILELOADDV %12, %13, killed %14, 1, killed %15, 0, $noreg
    JMP_1 %bb.3

  bb.2.if.else:
    %17:gr64 = MOV32ri64 @buf2
    %18:gr64_nosp = MOV32ri64 32
    %19:gr16 = MOV16ri 8
    %3:tile = PTILELOADDV %12, %19, %17, 1, %18, 0, $noreg
    %4:tile = PTILELOADDV killed %19, %13, %17, 1, %18, 0, $noreg
    %5:tile = PTILELOADDV %12, %13, killed %17, 1, killed %18, 0, $noreg

  bb.3.if.end:


    %6:tile = PHI %0, %bb.1, %3, %bb.2
    %7:tile = PHI %1, %bb.1, %4, %bb.2
    %8:tile = PHI %2, %bb.1, %5, %bb.2
    %20:gr16 = MOV16ri 8
    %21:tile = PTDPBSSDV %12, %13, killed %20, killed %8, killed %6, killed %7
    %22:gr64 = MOV32ri64 @buf
    %23:gr64_nosp = MOV32ri64 32
    PTILESTOREDV killed %12, killed %13, killed %22, 1, killed %23, 0, $noreg, killed %21
    RET 0

...