Compiler projects using llvm
# RUN: llc -mtriple=arm-none-eabi -run-pass=arm-cp-islands %s -o - | FileCheck %s
#
# This checks alignment of a block when a CPE is placed before/after a
# block (as e.g. opposed to splitting up a block), and also make sure
# we don't decrease alignment.
#
--- |
  ; ModuleID = '<stdin>'
  source_filename = "<stdin>"
  target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
  target triple = "arm-arm--eabi"

  declare i32 @llvm.arm.space(i32, i32) #0

  define dso_local i32 @CP() #1 {
  entry:
    %res = alloca half, align 2
    store half 0xH706B, half* %res, align 2
    %0 = load half, half* %res, align 2
    %tobool = fcmp une half %0, 0xH0000
    br i1 %tobool, label %LA, label %END

  LA:                                               ; preds = %entry
    %1 = call i32 @llvm.arm.space(i32 1000, i32 undef)
    br label %END

  END:                                              ; preds = %LA, %entry
    %2 = call i32 @llvm.arm.space(i32 100, i32 undef)
    ret i32 42
  }

  ; Function Attrs: nounwind
  declare void @llvm.stackprotector(i8*, i8**) #2

  attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" }
  attributes #1 = { "target-features"="+v8.2a,+fullfp16" }
  attributes #2 = { nounwind }

...
---
name:            CP
alignment:       4
exposesReturnsTwice: false
legalized:       false
regBankSelected: false
selected:        false
tracksRegLiveness: true
registers:
liveins:
frameInfo:
  isFrameAddressTaken: false
  isReturnAddressTaken: false
  hasStackMap:     false
  hasPatchPoint:   false
  stackSize:       4
  offsetAdjustment: 0
  maxAlignment:    2
  adjustsStack:    false
  hasCalls:        false
  stackProtector:  ''
  maxCallFrameSize: 0
  hasOpaqueSPAdjustment: false
  hasVAStart:      false
  hasMustTailInVarArgFunc: false
  savePoint:       ''
  restorePoint:    ''
fixedStack:
stack:
  - { id: 0, name: res, type: default, offset: -2, size: 2, alignment: 2,
      stack-id: default, callee-saved-register: '', callee-saved-restored: true,
      local-offset: -2, debug-info-variable: '', debug-info-expression: '',
      debug-info-location: '' }
constants:
  - id:              0
    value:           half 0xH706B
    alignment:       2
    isTargetSpecific: false


#CHECK:  bb.{{.*}} (align 2):
#CHECK:    successors:
#CHECK:    CONSTPOOL_ENTRY 1, %const{{.*}}, 2
#
# We don't want to decrease alignment if the block already has been
# aligned; this can e.g. be an existing CPE that has been carefully
# aligned. Here BB.1.LA has already an 8-byte alignment, and we are
# checking we don't set it to 4:
#
#CHECK:  bb.{{.*}}.LA (align 8):

body:             |
  bb.0.entry:
    successors: %bb.1(0x50000000), %bb.2(0x30000000)

    $sp = frame-setup SUBri $sp, 4, 14, $noreg, $noreg
    frame-setup CFI_INSTRUCTION def_cfa_offset 4
    renamable $s0 = VLDRH %const.0, 0, 14, $noreg :: (load (s16) from constant-pool)
    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv
    VSTRH killed renamable $s0, $sp, 1, 14, $noreg :: (store (s16) into %ir.res)
    FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv
    Bcc %bb.2, 0, killed $cpsr

  bb.1.LA (align 8):
    successors: %bb.2(0x80000000)

    dead renamable $r0 = SPACE 1000, undef renamable $r0

  bb.2.END:
    dead renamable $r0 = SPACE 100, undef renamable $r0
    $r0 = MOVi 42, 14, $noreg, $noreg
    $sp = ADDri $sp, 4, 14, $noreg, $noreg
    BX_RET 14, $noreg, implicit killed $r0

...