; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=X64 --check-prefix=X64_1LD ; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=X64 --check-prefix=X64_2LD declare i32 @memcmp(i8* nocapture, i8* nocapture, i64) define i32 @cmp2(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp2( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) ; X64-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) ; X64-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32 ; X64-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32 ; X64-NEXT: [[TMP9:%.*]] = sub i32 [[TMP7]], [[TMP8]] ; X64-NEXT: ret i32 [[TMP9]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2) ret i32 %call } define i32 @cmp2_align2(i8* nocapture readonly align 2 %x, i8* nocapture readonly align 2 %y) { ; X64-LABEL: @cmp2_align2( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 ; X64-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 ; X64-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) ; X64-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) ; X64-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32 ; X64-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32 ; X64-NEXT: [[TMP9:%.*]] = sub i32 [[TMP7]], [[TMP8]] ; X64-NEXT: ret i32 [[TMP9]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2) ret i32 %call } define i32 @cmp3(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp3( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[TMP1:%.*]] = icmp ult i16 [[TMP7:%.*]], [[TMP8:%.*]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64-NEXT: [[TMP5:%.*]] = load i16, i16* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i16, i16* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i16 @llvm.bswap.i16(i16 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i16 @llvm.bswap.i16(i16 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i16 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 2 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 ; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]], align 1 ; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 ; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32 ; X64-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]] ; X64-NEXT: br label [[ENDBLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3) ret i32 %call } define i32 @cmp4(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp4( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) ; X64-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) ; X64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]] ; X64-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] ; X64-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 ; X64-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 ; X64-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]] ; X64-NEXT: ret i32 [[TMP11]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4) ret i32 %call } define i32 @cmp5(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp5( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP7:%.*]], [[TMP8:%.*]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 ; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]], align 1 ; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 ; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32 ; X64-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]] ; X64-NEXT: br label [[ENDBLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5) ret i32 %call } define i32 @cmp6(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp6( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i16* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i16* ; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]], align 1 ; X64-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]]) ; X64-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]]) ; X64-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i32 ; X64-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i32 ; X64-NEXT: [[TMP20:%.*]] = icmp eq i32 [[TMP18]], [[TMP19]] ; X64-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6) ret i32 %call } define i32 @cmp7(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp7( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i32 @llvm.bswap.i32(i32 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i32 @llvm.bswap.i32(i32 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i32* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i32* ; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i32 @llvm.bswap.i32(i32 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i32 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7) ret i32 %call } define i32 @cmp8(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp8( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) ; X64-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]]) ; X64-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP5]], [[TMP6]] ; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]] ; X64-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 ; X64-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 ; X64-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]] ; X64-NEXT: ret i32 [[TMP11]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 8) ret i32 %call } define i32 @cmp9(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp9( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP7:%.*]], [[TMP8:%.*]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 ; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]], align 1 ; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 ; X64-NEXT: [[TMP15:%.*]] = zext i8 [[TMP13]] to i32 ; X64-NEXT: [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]] ; X64-NEXT: br label [[ENDBLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP16]], [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9) ret i32 %call } define i32 @cmp10(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp10( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i16* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i16* ; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]], align 1 ; X64-NEXT: [[TMP16:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP14]]) ; X64-NEXT: [[TMP17:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP15]]) ; X64-NEXT: [[TMP18]] = zext i16 [[TMP16]] to i64 ; X64-NEXT: [[TMP19]] = zext i16 [[TMP17]] to i64 ; X64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[TMP18]], [[TMP19]] ; X64-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10) ret i32 %call } define i32 @cmp11(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp11( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i64* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11) ret i32 %call } define i32 @cmp12(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp12( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i32* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i32* ; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 1 ; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) ; X64-NEXT: [[TMP17:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP15]]) ; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64 ; X64-NEXT: [[TMP19]] = zext i32 [[TMP17]] to i64 ; X64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[TMP18]], [[TMP19]] ; X64-NEXT: br i1 [[TMP20]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12) ret i32 %call } define i32 @cmp13(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp13( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 5 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 5 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i64* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13) ret i32 %call } define i32 @cmp14(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp14( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 6 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 6 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i64* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14) ret i32 %call } define i32 @cmp15(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp15( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 7 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 7 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i64* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15) ret i32 %call } define i32 @cmp16(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp16( ; X64-NEXT: br label [[LOADBB:%.*]] ; X64: res_block: ; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1:%.*]] ] ; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] ; X64-NEXT: [[TMP1:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] ; X64-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 1 ; X64-NEXT: br label [[ENDBLOCK:%.*]] ; X64: loadbb: ; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]], align 1 ; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]], align 1 ; X64-NEXT: [[TMP7]] = call i64 @llvm.bswap.i64(i64 [[TMP5]]) ; X64-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: ; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to i64* ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP11]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 1 ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]], align 1 ; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) ; X64-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) ; X64-NEXT: [[TMP18:%.*]] = icmp eq i64 [[TMP16]], [[TMP17]] ; X64-NEXT: br i1 [[TMP18]], label [[ENDBLOCK]], label [[RES_BLOCK]] ; X64: endblock: ; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP2]], [[RES_BLOCK]] ] ; X64-NEXT: ret i32 [[PHI_RES]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) ret i32 %call } define i32 @cmp_eq2(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp_eq2( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]] ; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 ; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0 ; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 2) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq3(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq3( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] ; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq3( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i16 ; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i16 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i16 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i16 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i16 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 3) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq4(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp_eq4( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 ; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0 ; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 4) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq5(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq5( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] ; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq5( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i32 ; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i32 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 5) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq6(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq6( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq6( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i32 ; X64_2LD-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i32 ; X64_2LD-NEXT: [[TMP14:%.*]] = xor i32 [[TMP12]], [[TMP13]] ; X64_2LD-NEXT: [[TMP15:%.*]] = or i32 [[TMP5]], [[TMP14]] ; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq6_align4(i8* nocapture readonly align 4 %x, i8* nocapture readonly align 4 %y) { ; X64_1LD-LABEL: @cmp_eq6_align4( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 4 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 4 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq6_align4( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 4 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 4 ; X64_2LD-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i32 ; X64_2LD-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i32 ; X64_2LD-NEXT: [[TMP14:%.*]] = xor i32 [[TMP12]], [[TMP13]] ; X64_2LD-NEXT: [[TMP15:%.*]] = or i32 [[TMP5]], [[TMP14]] ; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 6) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq7(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq7( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i32* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i32* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq7( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i32* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i32* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i32 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 7) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq8(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp_eq8( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 ; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0 ; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 8) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq9( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] ; X64_1LD-NEXT: br i1 [[TMP10]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq9( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i64 ; X64_2LD-NEXT: [[TMP11:%.*]] = zext i8 [[TMP9]] to i64 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 9) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq10( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i16 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq10( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i16* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i16* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = zext i16 [[TMP10]] to i64 ; X64_2LD-NEXT: [[TMP13:%.*]] = zext i16 [[TMP11]] to i64 ; X64_2LD-NEXT: [[TMP14:%.*]] = xor i64 [[TMP12]], [[TMP13]] ; X64_2LD-NEXT: [[TMP15:%.*]] = or i64 [[TMP5]], [[TMP14]] ; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0 ; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 10) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq11( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq11( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 11) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq12( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i32* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i32* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq12( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i32* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i32* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = zext i32 [[TMP10]] to i64 ; X64_2LD-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64 ; X64_2LD-NEXT: [[TMP14:%.*]] = xor i64 [[TMP12]], [[TMP13]] ; X64_2LD-NEXT: [[TMP15:%.*]] = or i64 [[TMP5]], [[TMP14]] ; X64_2LD-NEXT: [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0 ; X64_2LD-NEXT: [[TMP17:%.*]] = zext i1 [[TMP16]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP17]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 12) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq13( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 5 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 5 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq13( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 5 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 5 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 13) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq14( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 6 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 6 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq14( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 6 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 6 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 14) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64_1LD-LABEL: @cmp_eq15( ; X64_1LD-NEXT: br label [[LOADBB:%.*]] ; X64_1LD: res_block: ; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] ; X64_1LD: loadbb: ; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_1LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_1LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: ; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 7 ; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 7 ; X64_1LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i64 [[TMP10]], [[TMP11]] ; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] ; X64_1LD: endblock: ; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] ; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 ; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_1LD-NEXT: ret i32 [[CONV]] ; ; X64_2LD-LABEL: @cmp_eq15( ; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* ; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 1 ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] ; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 7 ; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 7 ; X64_2LD-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP6]] to i64* ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP7]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 1 ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 1 ; X64_2LD-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] ; X64_2LD-NEXT: [[TMP13:%.*]] = or i64 [[TMP5]], [[TMP12]] ; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0 ; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 ; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 ; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64_2LD-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 15) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv } define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X64-LABEL: @cmp_eq16( ; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* ; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* ; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 1 ; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 1 ; X64-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]] ; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 ; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0 ; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X64-NEXT: ret i32 [[CONV]] ; %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) %cmp = icmp eq i32 %call, 0 %conv = zext i1 %cmp to i32 ret i32 %conv }