; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -S | FileCheck %s define i16 @test(i16 %call37) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CALL:%.*]] = load i16, i16* undef, align 2 ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> <i16 poison, i16 0, i16 0, i16 0, i16 poison, i16 0, i16 0, i16 0>, i16 [[CALL37:%.*]], i32 4 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[CALL]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> <i16 0, i16 0, i16 0, i16 poison, i16 0, i16 0, i16 poison, i16 0>, i16 [[CALL37]], i32 3 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[CALL37]], i32 6 ; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <8 x i16> [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <8 x i16> [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i1> [[TMP4]], <8 x i1> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 12, i32 5, i32 14, i32 7> ; CHECK-NEXT: [[TMP7:%.*]] = zext <8 x i1> [[TMP6]] to <8 x i16> ; CHECK-NEXT: [[TMP8:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP7]]) ; CHECK-NEXT: [[OP_EXTRA:%.*]] = add i16 [[TMP8]], 0 ; CHECK-NEXT: ret i16 [[OP_EXTRA]] ; entry: %call = load i16, i16* undef, align 2 %0 = icmp slt i16 %call, 0 %cond = zext i1 %0 to i16 %1 = add i16 %cond, 0 %2 = icmp slt i16 0, 0 %cond32 = zext i1 %2 to i16 %3 = add i16 %1, %cond32 %.not = icmp sgt i16 0, %call37 %cond55 = zext i1 %.not to i16 %4 = icmp sgt i16 %call37, 0 %cond76 = zext i1 %4 to i16 %5 = icmp slt i16 0, 0 %cond97 = zext i1 %5 to i16 %.not206 = icmp sgt i16 0, %call37 %cond120 = zext i1 %.not206 to i16 %6 = icmp sgt i16 0, 0 %cond141 = zext i1 %6 to i16 %7 = icmp slt i16 0, 0 %cond162 = zext i1 %7 to i16 %8 = add i16 %3, %cond97 %9 = add i16 %8, %cond55 %10 = add i16 %9, %cond76 %11 = add i16 %10, %cond162 %12 = add i16 %11, %cond120 %13 = add i16 %12, %cond141 ret i16 %13 }