Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s -check-prefixes=CHECK,NOOUTLINE
; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -mattr=+outline-atomics | FileCheck %s -check-prefixes=CHECK,OUTLINE
; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -mattr=+lse | FileCheck %s -check-prefixes=CHECK,LSE

@var = global i128 0

define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
; NOOUTLINE-LABEL: val_compare_and_swap:
; NOOUTLINE:       // %bb.0:
; NOOUTLINE-NEXT:  .LBB0_1: // =>This Inner Loop Header: Depth=1
; NOOUTLINE-NEXT:    ldaxp x8, x1, [x0]
; NOOUTLINE-NEXT:    cmp x8, x2
; NOOUTLINE-NEXT:    cset w9, ne
; NOOUTLINE-NEXT:    cmp x1, x3
; NOOUTLINE-NEXT:    cinc w9, w9, ne
; NOOUTLINE-NEXT:    cbz w9, .LBB0_3
; NOOUTLINE-NEXT:  // %bb.2: // in Loop: Header=BB0_1 Depth=1
; NOOUTLINE-NEXT:    stxp w9, x8, x1, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB0_1
; NOOUTLINE-NEXT:    b .LBB0_4
; NOOUTLINE-NEXT:  .LBB0_3: // in Loop: Header=BB0_1 Depth=1
; NOOUTLINE-NEXT:    stxp w9, x4, x5, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB0_1
; NOOUTLINE-NEXT:  .LBB0_4:
; NOOUTLINE-NEXT:    mov x0, x8
; NOOUTLINE-NEXT:    ret
;
; OUTLINE-LABEL: val_compare_and_swap:
; OUTLINE:       // %bb.0:
; OUTLINE-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE-NEXT:    .cfi_def_cfa_offset 16
; OUTLINE-NEXT:    .cfi_offset w30, -16
; OUTLINE-NEXT:    mov x1, x3
; OUTLINE-NEXT:    mov x8, x0
; OUTLINE-NEXT:    mov x0, x2
; OUTLINE-NEXT:    mov x2, x4
; OUTLINE-NEXT:    mov x3, x5
; OUTLINE-NEXT:    mov x4, x8
; OUTLINE-NEXT:    bl __aarch64_cas16_acq
; OUTLINE-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE-NEXT:    ret
;
; LSE-LABEL: val_compare_and_swap:
; LSE:       // %bb.0:
; LSE-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
; LSE-NEXT:    // kill: def $x4 killed $x4 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
; LSE-NEXT:    caspa x2, x3, x4, x5, [x0]
; LSE-NEXT:    mov x0, x2
; LSE-NEXT:    mov x1, x3
; LSE-NEXT:    ret
  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
  %val = extractvalue { i128, i1 } %pair, 0
  ret i128 %val
}

define i128 @val_compare_and_swap_seqcst(i128* %p, i128 %oldval, i128 %newval) {
; NOOUTLINE-LABEL: val_compare_and_swap_seqcst:
; NOOUTLINE:       // %bb.0:
; NOOUTLINE-NEXT:  .LBB1_1: // =>This Inner Loop Header: Depth=1
; NOOUTLINE-NEXT:    ldaxp x8, x1, [x0]
; NOOUTLINE-NEXT:    cmp x8, x2
; NOOUTLINE-NEXT:    cset w9, ne
; NOOUTLINE-NEXT:    cmp x1, x3
; NOOUTLINE-NEXT:    cinc w9, w9, ne
; NOOUTLINE-NEXT:    cbz w9, .LBB1_3
; NOOUTLINE-NEXT:  // %bb.2: // in Loop: Header=BB1_1 Depth=1
; NOOUTLINE-NEXT:    stlxp w9, x8, x1, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB1_1
; NOOUTLINE-NEXT:    b .LBB1_4
; NOOUTLINE-NEXT:  .LBB1_3: // in Loop: Header=BB1_1 Depth=1
; NOOUTLINE-NEXT:    stlxp w9, x4, x5, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB1_1
; NOOUTLINE-NEXT:  .LBB1_4:
; NOOUTLINE-NEXT:    mov x0, x8
; NOOUTLINE-NEXT:    ret
;
; OUTLINE-LABEL: val_compare_and_swap_seqcst:
; OUTLINE:       // %bb.0:
; OUTLINE-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE-NEXT:    .cfi_def_cfa_offset 16
; OUTLINE-NEXT:    .cfi_offset w30, -16
; OUTLINE-NEXT:    mov x1, x3
; OUTLINE-NEXT:    mov x8, x0
; OUTLINE-NEXT:    mov x0, x2
; OUTLINE-NEXT:    mov x2, x4
; OUTLINE-NEXT:    mov x3, x5
; OUTLINE-NEXT:    mov x4, x8
; OUTLINE-NEXT:    bl __aarch64_cas16_acq_rel
; OUTLINE-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE-NEXT:    ret
;
; LSE-LABEL: val_compare_and_swap_seqcst:
; LSE:       // %bb.0:
; LSE-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
; LSE-NEXT:    // kill: def $x4 killed $x4 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
; LSE-NEXT:    caspal x2, x3, x4, x5, [x0]
; LSE-NEXT:    mov x0, x2
; LSE-NEXT:    mov x1, x3
; LSE-NEXT:    ret
  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval seq_cst seq_cst
  %val = extractvalue { i128, i1 } %pair, 0
  ret i128 %val
}

define i128 @val_compare_and_swap_release(i128* %p, i128 %oldval, i128 %newval) {
; NOOUTLINE-LABEL: val_compare_and_swap_release:
; NOOUTLINE:       // %bb.0:
; NOOUTLINE-NEXT:  .LBB2_1: // =>This Inner Loop Header: Depth=1
; NOOUTLINE-NEXT:    ldxp x8, x1, [x0]
; NOOUTLINE-NEXT:    cmp x8, x2
; NOOUTLINE-NEXT:    cset w9, ne
; NOOUTLINE-NEXT:    cmp x1, x3
; NOOUTLINE-NEXT:    cinc w9, w9, ne
; NOOUTLINE-NEXT:    cbz w9, .LBB2_3
; NOOUTLINE-NEXT:  // %bb.2: // in Loop: Header=BB2_1 Depth=1
; NOOUTLINE-NEXT:    stlxp w9, x8, x1, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB2_1
; NOOUTLINE-NEXT:    b .LBB2_4
; NOOUTLINE-NEXT:  .LBB2_3: // in Loop: Header=BB2_1 Depth=1
; NOOUTLINE-NEXT:    stlxp w9, x4, x5, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB2_1
; NOOUTLINE-NEXT:  .LBB2_4:
; NOOUTLINE-NEXT:    mov x0, x8
; NOOUTLINE-NEXT:    ret
;
; OUTLINE-LABEL: val_compare_and_swap_release:
; OUTLINE:       // %bb.0:
; OUTLINE-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE-NEXT:    .cfi_def_cfa_offset 16
; OUTLINE-NEXT:    .cfi_offset w30, -16
; OUTLINE-NEXT:    mov x1, x3
; OUTLINE-NEXT:    mov x8, x0
; OUTLINE-NEXT:    mov x0, x2
; OUTLINE-NEXT:    mov x2, x4
; OUTLINE-NEXT:    mov x3, x5
; OUTLINE-NEXT:    mov x4, x8
; OUTLINE-NEXT:    bl __aarch64_cas16_rel
; OUTLINE-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE-NEXT:    ret
;
; LSE-LABEL: val_compare_and_swap_release:
; LSE:       // %bb.0:
; LSE-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
; LSE-NEXT:    // kill: def $x4 killed $x4 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
; LSE-NEXT:    caspl x2, x3, x4, x5, [x0]
; LSE-NEXT:    mov x0, x2
; LSE-NEXT:    mov x1, x3
; LSE-NEXT:    ret
  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release monotonic
  %val = extractvalue { i128, i1 } %pair, 0
  ret i128 %val
}

define i128 @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval) {
; NOOUTLINE-LABEL: val_compare_and_swap_monotonic:
; NOOUTLINE:       // %bb.0:
; NOOUTLINE-NEXT:  .LBB3_1: // =>This Inner Loop Header: Depth=1
; NOOUTLINE-NEXT:    ldxp x8, x1, [x0]
; NOOUTLINE-NEXT:    cmp x8, x2
; NOOUTLINE-NEXT:    cset w9, ne
; NOOUTLINE-NEXT:    cmp x1, x3
; NOOUTLINE-NEXT:    cinc w9, w9, ne
; NOOUTLINE-NEXT:    cbz w9, .LBB3_3
; NOOUTLINE-NEXT:  // %bb.2: // in Loop: Header=BB3_1 Depth=1
; NOOUTLINE-NEXT:    stxp w9, x8, x1, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB3_1
; NOOUTLINE-NEXT:    b .LBB3_4
; NOOUTLINE-NEXT:  .LBB3_3: // in Loop: Header=BB3_1 Depth=1
; NOOUTLINE-NEXT:    stxp w9, x4, x5, [x0]
; NOOUTLINE-NEXT:    cbnz w9, .LBB3_1
; NOOUTLINE-NEXT:  .LBB3_4:
; NOOUTLINE-NEXT:    mov x0, x8
; NOOUTLINE-NEXT:    ret
;
; OUTLINE-LABEL: val_compare_and_swap_monotonic:
; OUTLINE:       // %bb.0:
; OUTLINE-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE-NEXT:    .cfi_def_cfa_offset 16
; OUTLINE-NEXT:    .cfi_offset w30, -16
; OUTLINE-NEXT:    mov x1, x3
; OUTLINE-NEXT:    mov x8, x0
; OUTLINE-NEXT:    mov x0, x2
; OUTLINE-NEXT:    mov x2, x4
; OUTLINE-NEXT:    mov x3, x5
; OUTLINE-NEXT:    mov x4, x8
; OUTLINE-NEXT:    bl __aarch64_cas16_relax
; OUTLINE-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE-NEXT:    ret
;
; LSE-LABEL: val_compare_and_swap_monotonic:
; LSE:       // %bb.0:
; LSE-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
; LSE-NEXT:    // kill: def $x4 killed $x4 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
; LSE-NEXT:    casp x2, x3, x4, x5, [x0]
; LSE-NEXT:    mov x0, x2
; LSE-NEXT:    mov x1, x3
; LSE-NEXT:    ret
  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic monotonic
  %val = extractvalue { i128, i1 } %pair, 0
  ret i128 %val
}

define void @fetch_and_nand(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_nand:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB4_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldxp x9, x8, [x0]
; CHECK-NEXT:    and x10, x9, x2
; CHECK-NEXT:    and x11, x8, x3
; CHECK-NEXT:    mvn x11, x11
; CHECK-NEXT:    mvn x10, x10
; CHECK-NEXT:    stlxp w12, x10, x11, [x0]
; CHECK-NEXT:    cbnz w12, .LBB4_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw nand i128* %p, i128 %bits release
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_or(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_or:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB5_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    orr x10, x8, x3
; CHECK-NEXT:    orr x11, x9, x2
; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
; CHECK-NEXT:    cbnz w12, .LBB5_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw or i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_add(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_add:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB6_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    adds x10, x9, x2
; CHECK-NEXT:    adc x11, x8, x3
; CHECK-NEXT:    stlxp w12, x10, x11, [x0]
; CHECK-NEXT:    cbnz w12, .LBB6_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw add i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_sub(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_sub:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB7_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    subs x10, x9, x2
; CHECK-NEXT:    sbc x11, x8, x3
; CHECK-NEXT:    stlxp w12, x10, x11, [x0]
; CHECK-NEXT:    cbnz w12, .LBB7_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw sub i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_min:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB8_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    cmp x9, x2
; CHECK-NEXT:    cset w10, ls
; CHECK-NEXT:    cmp x8, x3
; CHECK-NEXT:    cset w11, le
; CHECK-NEXT:    csel w10, w10, w11, eq
; CHECK-NEXT:    cmp w10, #0
; CHECK-NEXT:    csel x10, x8, x3, ne
; CHECK-NEXT:    csel x11, x9, x2, ne
; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
; CHECK-NEXT:    cbnz w12, .LBB8_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw min i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_max:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB9_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    cmp x9, x2
; CHECK-NEXT:    cset w10, hi
; CHECK-NEXT:    cmp x8, x3
; CHECK-NEXT:    cset w11, gt
; CHECK-NEXT:    csel w10, w10, w11, eq
; CHECK-NEXT:    cmp w10, #0
; CHECK-NEXT:    csel x10, x8, x3, ne
; CHECK-NEXT:    csel x11, x9, x2, ne
; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
; CHECK-NEXT:    cbnz w12, .LBB9_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw max i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umin:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB10_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    cmp x9, x2
; CHECK-NEXT:    cset w10, ls
; CHECK-NEXT:    cmp x8, x3
; CHECK-NEXT:    cset w11, ls
; CHECK-NEXT:    csel w10, w10, w11, eq
; CHECK-NEXT:    cmp w10, #0
; CHECK-NEXT:    csel x10, x8, x3, ne
; CHECK-NEXT:    csel x11, x9, x2, ne
; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
; CHECK-NEXT:    cbnz w12, .LBB10_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw umin i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-LABEL: fetch_and_umax:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB11_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x9, x8, [x0]
; CHECK-NEXT:    cmp x9, x2
; CHECK-NEXT:    cset w10, hi
; CHECK-NEXT:    cmp x8, x3
; CHECK-NEXT:    cset w11, hi
; CHECK-NEXT:    csel w10, w10, w11, eq
; CHECK-NEXT:    cmp w10, #0
; CHECK-NEXT:    csel x10, x8, x3, ne
; CHECK-NEXT:    csel x11, x9, x2, ne
; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
; CHECK-NEXT:    cbnz w12, .LBB11_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    adrp x10, :got:var
; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var]
; CHECK-NEXT:    stp x9, x8, [x10]
; CHECK-NEXT:    ret

  %val = atomicrmw umax i128* %p, i128 %bits seq_cst
  store i128 %val, i128* @var, align 16
  ret void
}

define i128 @atomic_load_seq_cst(i128* %p) {
; CHECK-LABEL: atomic_load_seq_cst:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov x8, x0
; CHECK-NEXT:  .LBB12_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp x0, x1, [x8]
; CHECK-NEXT:    stlxp w9, x0, x1, [x8]
; CHECK-NEXT:    cbnz w9, .LBB12_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    ret
   %r = load atomic i128, i128* %p seq_cst, align 16
   ret i128 %r
}

define i128 @atomic_load_relaxed(i64, i64, i128* %p) {
; CHECK-LABEL: atomic_load_relaxed:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB13_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldxp x0, x1, [x2]
; CHECK-NEXT:    stxp w8, x0, x1, [x2]
; CHECK-NEXT:    cbnz w8, .LBB13_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    ret
    %r = load atomic i128, i128* %p monotonic, align 16
    ret i128 %r
}


define void @atomic_store_seq_cst(i128 %in, i128* %p) {
; CHECK-LABEL: atomic_store_seq_cst:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB14_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldaxp xzr, x8, [x2]
; CHECK-NEXT:    stlxp w8, x0, x1, [x2]
; CHECK-NEXT:    cbnz w8, .LBB14_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    ret
   store atomic i128 %in, i128* %p seq_cst, align 16
   ret void
}

define void @atomic_store_release(i128 %in, i128* %p) {
; CHECK-LABEL: atomic_store_release:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB15_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldxp xzr, x8, [x2]
; CHECK-NEXT:    stlxp w8, x0, x1, [x2]
; CHECK-NEXT:    cbnz w8, .LBB15_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    ret
   store atomic i128 %in, i128* %p release, align 16
   ret void
}

define void @atomic_store_relaxed(i128 %in, i128* %p) {
; CHECK-LABEL: atomic_store_relaxed:
; CHECK:       // %bb.0:
; CHECK-NEXT:  .LBB16_1: // %atomicrmw.start
; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    ldxp xzr, x8, [x2]
; CHECK-NEXT:    stxp w8, x0, x1, [x2]
; CHECK-NEXT:    cbnz w8, .LBB16_1
; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
; CHECK-NEXT:    ret
   store atomic i128 %in, i128* %p unordered, align 16
   ret void
}

; Since we store the original value to ensure no tearing for the unsuccessful
; case, the register used must not be xzr.
define void @cmpxchg_dead(i128* %ptr, i128 %desired, i128 %new) {
; NOOUTLINE-LABEL: cmpxchg_dead:
; NOOUTLINE:       // %bb.0:
; NOOUTLINE-NEXT:  .LBB17_1: // =>This Inner Loop Header: Depth=1
; NOOUTLINE-NEXT:    ldxp x8, x9, [x0]
; NOOUTLINE-NEXT:    cmp x8, x2
; NOOUTLINE-NEXT:    cset w10, ne
; NOOUTLINE-NEXT:    cmp x9, x3
; NOOUTLINE-NEXT:    cinc w10, w10, ne
; NOOUTLINE-NEXT:    cbz w10, .LBB17_3
; NOOUTLINE-NEXT:  // %bb.2: // in Loop: Header=BB17_1 Depth=1
; NOOUTLINE-NEXT:    stxp w10, x8, x9, [x0]
; NOOUTLINE-NEXT:    cbnz w10, .LBB17_1
; NOOUTLINE-NEXT:    b .LBB17_4
; NOOUTLINE-NEXT:  .LBB17_3: // in Loop: Header=BB17_1 Depth=1
; NOOUTLINE-NEXT:    stxp w10, x4, x5, [x0]
; NOOUTLINE-NEXT:    cbnz w10, .LBB17_1
; NOOUTLINE-NEXT:  .LBB17_4:
; NOOUTLINE-NEXT:    ret
;
; OUTLINE-LABEL: cmpxchg_dead:
; OUTLINE:       // %bb.0:
; OUTLINE-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE-NEXT:    .cfi_def_cfa_offset 16
; OUTLINE-NEXT:    .cfi_offset w30, -16
; OUTLINE-NEXT:    mov x1, x3
; OUTLINE-NEXT:    mov x8, x0
; OUTLINE-NEXT:    mov x0, x2
; OUTLINE-NEXT:    mov x2, x4
; OUTLINE-NEXT:    mov x3, x5
; OUTLINE-NEXT:    mov x4, x8
; OUTLINE-NEXT:    bl __aarch64_cas16_relax
; OUTLINE-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE-NEXT:    ret
;
; LSE-LABEL: cmpxchg_dead:
; LSE:       // %bb.0:
; LSE-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
; LSE-NEXT:    // kill: def $x4 killed $x4 killed $x4_x5 def $x4_x5
; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
; LSE-NEXT:    casp x2, x3, x4, x5, [x0]
; LSE-NEXT:    ret
  cmpxchg i128* %ptr, i128 %desired, i128 %new monotonic monotonic
  ret void
}