; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instcombine -S | FileCheck %s ; ; Verify that the result of memrchr calls with past-the-end pointers used ; don't cause trouble and are optimally folded. declare i32 @memcmp(i8*, i8*, i64) @a5 = constant [5 x i8] c"12345"; ; Fold memcmp(a5, a5 + 5, n) to 0 on the assumption that n is 0 otherwise ; the call would be undefined. define i32 @fold_memcmp_a5_a5p5_n(i64 %n) { ; CHECK-LABEL: @fold_memcmp_a5_a5p5_n( ; CHECK-NEXT: ret i32 0 ; %pa5_p0 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 0 %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 %cmp = call i32 @memcmp(i8* %pa5_p0, i8* %pa5_p5, i64 %n) ret i32 %cmp } ; Same as above but for memcmp(a5 + 5, a5 + 5, n). define i32 @fold_memcmp_a5p5_a5p5_n(i64 %n) { ; CHECK-LABEL: @fold_memcmp_a5p5_a5p5_n( ; CHECK-NEXT: ret i32 0 ; %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 %qa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 %cmp = call i32 @memcmp(i8* %pa5_p5, i8* %qa5_p5, i64 %n) ret i32 %cmp } ; TODO: Likewise, fold memcmp(a5 + i, a5 + 5, n) to 0 on same basis. define i32 @fold_memcmp_a5pi_a5p5_n(i32 %i, i64 %n) { ; CHECK-LABEL: @fold_memcmp_a5pi_a5p5_n( ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64 ; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], [5 x i8]* @a5, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(i8* [[PA5_PI]], i8* getelementptr inbounds ([5 x i8], [5 x i8]* @a5, i64 1, i64 0), i64 [[N:%.*]]) ; CHECK-NEXT: ret i32 [[CMP]] ; %pa5_pi = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 %i %pa5_p5 = getelementptr [5 x i8], [5 x i8]* @a5, i32 0, i32 5 %cmp = call i32 @memcmp(i8* %pa5_pi, i8* %pa5_p5, i64 %n) ret i32 %cmp }