// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: aarch64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o /dev/null %s
// A simple used,unused... macro, long enough to represent any SVE builtin.
// CHECK-LABEL: @test_svdupq_lane_s8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z19test_svdupq_lane_s8u10__SVInt8_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svint8_t
// CHECK-LABEL: @test_svdupq_lane_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s16u11__SVInt16_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
svint16_t
// CHECK-LABEL: @test_svdupq_lane_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s32u11__SVInt32_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
svint32_t
// CHECK-LABEL: @test_svdupq_lane_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s64u11__SVInt64_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
svint64_t
// CHECK-LABEL: @test_svdupq_lane_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z19test_svdupq_lane_u8u11__SVUint8_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svuint8_t
// CHECK-LABEL: @test_svdupq_lane_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u16u12__SVUint16_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
svuint16_t
// CHECK-LABEL: @test_svdupq_lane_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u32u12__SVUint32_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
svuint32_t
// CHECK-LABEL: @test_svdupq_lane_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u64u12__SVUint64_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
svuint64_t
// CHECK-LABEL: @test_svdupq_lane_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f16u13__SVFloat16_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
svfloat16_t
// CHECK-LABEL: @test_svdupq_lane_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f32u13__SVFloat32_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
svfloat32_t
// CHECK-LABEL: @test_svdupq_lane_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f64u13__SVFloat64_tm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], i64 [[INDEX:%.*]])
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
svfloat64_t
// CHECK-LABEL: @test_svdupq_n_s8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
// CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
// CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
// CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
// CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
// CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
//
// CPP-CHECK-LABEL: @_Z16test_svdupq_n_s8aaaaaaaaaaaaaaaa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
// CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
// CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
// CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
// CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
// CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
//
svint8_t
// CHECK-LABEL: @test_svdupq_n_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
// CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_s16ssssssss(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
// CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
//
svint16_t
// CHECK-LABEL: @test_svdupq_n_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
// CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_s32iiii(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
svint32_t
// CHECK-LABEL: @test_svdupq_n_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_s64ll(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
//
svint64_t
// CHECK-LABEL: @test_svdupq_n_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
// CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
// CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
// CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
// CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
// CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
//
// CPP-CHECK-LABEL: @_Z16test_svdupq_n_u8hhhhhhhhhhhhhhhh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
// CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
// CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
// CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
// CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
// CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
//
svuint8_t
// CHECK-LABEL: @test_svdupq_n_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
// CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_u16tttttttt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> undef, i16 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP7]], i64 0)
// CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
//
svuint16_t
// CHECK-LABEL: @test_svdupq_n_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
// CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_u32jjjj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> undef, i32 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP3]], i64 0)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
svuint32_t
// CHECK-LABEL: @test_svdupq_n_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_u64mm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP1]], i64 0)
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
//
svuint64_t
// CHECK-LABEL: @test_svdupq_n_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x half> undef, half [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[X4:%.*]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
// CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_f16DhDhDhDhDhDhDhDh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x half> undef, half [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[X4:%.*]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> [[TMP7]], i64 0)
// CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
//
svfloat16_t
// CHECK-LABEL: @test_svdupq_n_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
// CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_f32ffff(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> undef, float [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> [[TMP3]], i64 0)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
//
svfloat32_t
// CHECK-LABEL: @test_svdupq_n_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_f64dd(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> [[TMP1]], i64 0)
// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
//
svfloat64_t
// CHECK-LABEL: @test_svdupq_n_b8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL8:%.*]] = zext i1 [[X8:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[X9:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL10:%.*]] = zext i1 [[X10:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[X11:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL12:%.*]] = zext i1 [[X12:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[X13:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[X14:%.*]] to i8
// CHECK-NEXT: [[FROMBOOL15:%.*]] = zext i1 [[X15:%.*]] to i8
// CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[FROMBOOL]], i64 0
// CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[FROMBOOL1]], i64 1
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[FROMBOOL2]], i64 2
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[FROMBOOL3]], i64 3
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[FROMBOOL4]], i64 4
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[FROMBOOL5]], i64 5
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[FROMBOOL6]], i64 6
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[FROMBOOL7]], i64 7
// CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[FROMBOOL8]], i64 8
// CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[FROMBOOL9]], i64 9
// CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[FROMBOOL10]], i64 10
// CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[FROMBOOL11]], i64 11
// CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[FROMBOOL12]], i64 12
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[FROMBOOL13]], i64 13
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15
// CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
// CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP17]], i64 0)
// CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP19]]
//
// CPP-CHECK-LABEL: @_Z16test_svdupq_n_b8bbbbbbbbbbbbbbbb(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL8:%.*]] = zext i1 [[X8:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[X9:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL10:%.*]] = zext i1 [[X10:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[X11:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL12:%.*]] = zext i1 [[X12:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[X13:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[X14:%.*]] to i8
// CPP-CHECK-NEXT: [[FROMBOOL15:%.*]] = zext i1 [[X15:%.*]] to i8
// CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> undef, i8 [[FROMBOOL]], i64 0
// CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[FROMBOOL1]], i64 1
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[FROMBOOL2]], i64 2
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[FROMBOOL3]], i64 3
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[FROMBOOL4]], i64 4
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[FROMBOOL5]], i64 5
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[FROMBOOL6]], i64 6
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[FROMBOOL7]], i64 7
// CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[FROMBOOL8]], i64 8
// CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[FROMBOOL9]], i64 9
// CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[FROMBOOL10]], i64 10
// CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[FROMBOOL11]], i64 11
// CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[FROMBOOL12]], i64 12
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[FROMBOOL13]], i64 13
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15
// CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
// CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[TMP15]], i64 0)
// CPP-CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP17]], i64 0)
// CPP-CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP19]]
//
svbool_t
// CHECK-LABEL: @test_svdupq_n_b16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i16
// CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i16
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[X2:%.*]] to i16
// CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[X3:%.*]] to i16
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[X4:%.*]] to i16
// CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[X5:%.*]] to i16
// CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[X6:%.*]] to i16
// CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[X7:%.*]] to i16
// CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x i16> undef, i16 [[TMP0]], i64 0
// CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x i16> [[TMP8]], i16 [[TMP1]], i64 1
// CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x i16> [[TMP9]], i16 [[TMP2]], i64 2
// CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x i16> [[TMP10]], i16 [[TMP3]], i64 3
// CHECK-NEXT: [[TMP12:%.*]] = insertelement <8 x i16> [[TMP11]], i16 [[TMP4]], i64 4
// CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP5]], i64 5
// CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
// CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
// CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
// CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP17]], i64 0)
// CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP16]], <vscale x 8 x i16> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: [[TMP20:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP20]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_b16bbbbbbbb(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[X2:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[X3:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[X4:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[X5:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[X6:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[X7:%.*]] to i16
// CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x i16> undef, i16 [[TMP0]], i64 0
// CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x i16> [[TMP8]], i16 [[TMP1]], i64 1
// CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x i16> [[TMP9]], i16 [[TMP2]], i64 2
// CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x i16> [[TMP10]], i16 [[TMP3]], i64 3
// CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <8 x i16> [[TMP11]], i16 [[TMP4]], i64 4
// CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP5]], i64 5
// CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
// CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
// CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
// CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> [[TMP15]], i64 0)
// CPP-CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP17]], i64 0)
// CPP-CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP16]], <vscale x 8 x i16> [[TMP18]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP20:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP20]]
//
svbool_t
// CHECK-LABEL: @test_svdupq_n_b32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[X2:%.*]] to i32
// CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[X3:%.*]] to i32
// CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP0]], i64 0
// CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP1]], i64 1
// CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
// CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
// CHECK-NEXT: [[TMP10:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP9]], i64 0)
// CHECK-NEXT: [[TMP11:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> [[TMP10]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: [[TMP12:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP12]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_b32bbbb(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i32
// CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i32
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[X2:%.*]] to i32
// CPP-CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[X3:%.*]] to i32
// CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP0]], i64 0
// CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP1]], i64 1
// CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
// CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
// CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> [[TMP7]], i64 0)
// CPP-CHECK-NEXT: [[TMP10:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP9]], i64 0)
// CPP-CHECK-NEXT: [[TMP11:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> [[TMP10]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP12:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP12]]
//
svbool_t
// CHECK-LABEL: @test_svdupq_n_b64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i64
// CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i64
// CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0
// CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
// CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
// CHECK-NEXT: [[TMP7:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP4]], <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]]
//
// CPP-CHECK-LABEL: @_Z17test_svdupq_n_b64bb(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[X0:%.*]] to i64
// CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[X1:%.*]] to i64
// CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0
// CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> [[TMP3]], i64 0)
// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP5]], i64 0)
// CPP-CHECK-NEXT: [[TMP7:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP4]], <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]]
//
svbool_t