Compiler projects using llvm
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s

// CHECK-LABEL: @test1(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT:    store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT:    ret i32 [[TMP4]]
//
int test1(int *a) {
  a = __builtin_assume_aligned(a, 32, 0ull);
  return a[0];
}

// CHECK-LABEL: @test2(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT:    store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT:    ret i32 [[TMP4]]
//
int test2(int *a) {
  a = __builtin_assume_aligned(a, 32, 0);
  return a[0];
}

// CHECK-LABEL: @test3(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT:    store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ]
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT:    ret i32 [[TMP4]]
//
int test3(int *a) {
  a = __builtin_assume_aligned(a, 32);
  return a[0];
}

// CHECK-LABEL: @test4(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT:    store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    store i32 [[B:%.*]], i32* [[B_ADDR]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
// CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP2]] to i64
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ]
// CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT:    store i32* [[TMP3]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 0
// CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT:    ret i32 [[TMP5]]
//
int test4(int *a, int b) {
  a = __builtin_assume_aligned(a, 32, b);
  return a[0];
}

int *m1(void) __attribute__((assume_aligned(64)));

// CHECK-LABEL: @test5(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[CALL:%.*]] = call align 64 i32* @m1()
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT:    ret i32 [[TMP0]]
//
int test5(void) {
  return *m1();
}

int *m2(void) __attribute__((assume_aligned(64, 12)));

// CHECK-LABEL: @test6(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2()
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ]
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
// CHECK-NEXT:    ret i32 [[TMP0]]
//
int test6(void) {
  return *m2();
}

// CHECK-LABEL: @pr43638(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT:    store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
// CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 4294967296) ]
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
// CHECK-NEXT:    ret i32 [[TMP4]]
//
int pr43638(int *a) {
  a = __builtin_assume_aligned(a, 4294967296);
return a[0];
}