; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi soft %s -o - | FileCheck %s -check-prefix SOFT ; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi hard %s -o - | FileCheck %s -check-prefix HARD define i64 @test_i64_f64(double %p) { ; SOFT-LABEL: test_i64_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define i64 @test_i64_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_i64_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define i64 @test_i64_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_i64_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define i64 @test_i64_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_i64_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define i64 @test_i64_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_i64_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define i64 @test_i64_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_i64_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r0, r2, d16 ; SOFT-NEXT: adds r1, r0, r0 ; SOFT-NEXT: adc r0, r2, r2 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_i64_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vmov r0, r2, d16 ; HARD-NEXT: adds r1, r0, r0 ; HARD-NEXT: adc r0, r2, r2 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to i64 %3 = add i64 %2, %2 ret i64 %3 } define double @test_f64_i64(i64 %p) { ; SOFT-LABEL: test_f64_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to double %3 = fadd double %2, %2 ret double %3 } define double @test_f64_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_f64_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to double %3 = fadd double %2, %2 ret double %3 } define double @test_f64_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_f64_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to double %3 = fadd double %2, %2 ret double %3 } define double @test_f64_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_f64_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to double %3 = fadd double %2, %2 ret double %3 } define double @test_f64_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_f64_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to double %3 = fadd double %2, %2 ret double %3 } define double @test_f64_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_f64_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_f64_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to double %3 = fadd double %2, %2 ret double %3 } define <1 x i64> @test_v1i64_i64(i64 %p) { ; SOFT-LABEL: test_v1i64_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <1 x i64> @test_v1i64_f64(double %p) { ; SOFT-LABEL: test_v1i64_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <1 x i64> @test_v1i64_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_v1i64_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <1 x i64> @test_v1i64_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_v1i64_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <1 x i64> @test_v1i64_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_v1i64_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <1 x i64> @test_v1i64_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_v1i64_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v1i64_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vadd.i64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to <1 x i64> %3 = add <1 x i64> %2, %2 ret <1 x i64> %3 } define <2 x float> @test_v2f32_i64(i64 %p) { ; SOFT-LABEL: test_v2f32_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x float> @test_v2f32_f64(double %p) { ; SOFT-LABEL: test_v2f32_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x float> @test_v2f32_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_v2f32_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x float> @test_v2f32_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_v2f32_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x float> @test_v2f32_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_v2f32_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev32.16 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev32.16 d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x float> @test_v2f32_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_v2f32_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev32.8 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f32_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev32.8 d16, d16 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to <2 x float> %3 = fadd <2 x float> %2, %2 ret <2 x float> %3 } define <2 x i32> @test_v2i32_i64(i64 %p) { ; SOFT-LABEL: test_v2i32_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <2 x i32> @test_v2i32_f64(double %p) { ; SOFT-LABEL: test_v2i32_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <2 x i32> @test_v2i32_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_v2i32_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vrev64.32 d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <2 x i32> @test_v2i32_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_v2i32_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <2 x i32> @test_v2i32_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_v2i32_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev32.16 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev32.16 d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <2 x i32> @test_v2i32_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_v2i32_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev32.8 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i32_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev32.8 d16, d16 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev64.32 d0, d16 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to <2 x i32> %3 = add <2 x i32> %2, %2 ret <2 x i32> %3 } define <4 x i16> @test_v4i16_i64(i64 %p) { ; SOFT-LABEL: test_v4i16_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <4 x i16> @test_v4i16_f64(double %p) { ; SOFT-LABEL: test_v4i16_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <4 x i16> @test_v4i16_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_v4i16_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vrev64.16 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <4 x i16> @test_v4i16_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_v4i16_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev32.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev32.16 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <4 x i16> @test_v4i16_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_v4i16_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev32.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev32.16 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <4 x i16> @test_v4i16_v8i8(<8 x i8> %p) { ; SOFT-LABEL: test_v4i16_v8i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev16.8 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i16_v8i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 d16, d0 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev16.8 d16, d16 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev64.16 d0, d16 ; HARD-NEXT: bx lr %1 = add <8 x i8> %p, %p %2 = bitcast <8 x i8> %1 to <4 x i16> %3 = add <4 x i16> %2, %2 ret <4 x i16> %3 } define <8 x i8> @test_v8i8_i64(i64 %p) { ; SOFT-LABEL: test_v8i8_i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: adds r1, r1, r1 ; SOFT-NEXT: adc r0, r0, r0 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_i64: ; HARD: @ %bb.0: ; HARD-NEXT: adds r1, r1, r1 ; HARD-NEXT: adc r0, r0, r0 ; HARD-NEXT: vmov d16, r1, r0 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = add i64 %p, %p %2 = bitcast i64 %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define <8 x i8> @test_v8i8_f64(double %p) { ; SOFT-LABEL: test_v8i8_f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.f64 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = fadd double %p, %p %2 = bitcast double %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define <8 x i8> @test_v8i8_v1i64(<1 x i64> %p) { ; SOFT-LABEL: test_v8i8_v1i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_v1i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 d16, d0, d0 ; HARD-NEXT: vrev64.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = add <1 x i64> %p, %p %2 = bitcast <1 x i64> %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define <8 x i8> @test_v8i8_v2f32(<2 x float> %p) { ; SOFT-LABEL: test_v8i8_v2f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.f32 d16, d16, d16 ; SOFT-NEXT: vrev32.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_v2f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.f32 d16, d16, d16 ; HARD-NEXT: vrev32.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = fadd <2 x float> %p, %p %2 = bitcast <2 x float> %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define <8 x i8> @test_v8i8_v2i32(<2 x i32> %p) { ; SOFT-LABEL: test_v8i8_v2i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 d16, d16 ; SOFT-NEXT: vadd.i32 d16, d16, d16 ; SOFT-NEXT: vrev32.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_v2i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 d16, d0 ; HARD-NEXT: vadd.i32 d16, d16, d16 ; HARD-NEXT: vrev32.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = add <2 x i32> %p, %p %2 = bitcast <2 x i32> %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define <8 x i8> @test_v8i8_v4i16(<4 x i16> %p) { ; SOFT-LABEL: test_v8i8_v4i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 d16, d16 ; SOFT-NEXT: vadd.i16 d16, d16, d16 ; SOFT-NEXT: vrev16.8 d16, d16 ; SOFT-NEXT: vadd.i8 d16, d16, d16 ; SOFT-NEXT: vrev64.8 d16, d16 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i8_v4i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 d16, d0 ; HARD-NEXT: vadd.i16 d16, d16, d16 ; HARD-NEXT: vrev16.8 d16, d16 ; HARD-NEXT: vadd.i8 d16, d16, d16 ; HARD-NEXT: vrev64.8 d0, d16 ; HARD-NEXT: bx lr %1 = add <4 x i16> %p, %p %2 = bitcast <4 x i16> %1 to <8 x i8> %3 = add <8 x i8> %2, %2 ret <8 x i8> %3 } define fp128 @test_f128_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_f128_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vrev64.32 q8, q9 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define fp128 @test_f128_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_f128_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define fp128 @test_f128_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_f128_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define fp128 @test_f128_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_f128_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define fp128 @test_f128_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_f128_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define fp128 @test_f128_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_f128_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vmov r2, r3, d17 ; SOFT-NEXT: vmov r0, r1, d16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_f128_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vmov r2, r3, d17 ; HARD-NEXT: vmov r0, r1, d16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to fp128 %3 = fadd fp128 %2, %2 ret fp128 %3 } define <2 x double> @test_v2f64_f128(fp128 %p) { ; SOFT-LABEL: test_v2f64_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v2f64_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x double> @test_v2f64_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_v2f64_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f64_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x double> @test_v2f64_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_v2f64_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f64_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x double> @test_v2f64_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_v2f64_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f64_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x double> @test_v2f64_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_v2f64_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f64_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q8, q8 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x double> @test_v2f64_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_v2f64_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.f64 d18, d16, d16 ; SOFT-NEXT: vadd.f64 d16, d17, d17 ; SOFT-NEXT: vmov r1, r0, d18 ; SOFT-NEXT: vmov r3, r2, d16 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2f64_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q8, q8 ; HARD-NEXT: vadd.f64 d1, d17, d17 ; HARD-NEXT: vadd.f64 d0, d16, d16 ; HARD-NEXT: bx lr %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to <2 x double> %3 = fadd <2 x double> %2, %2 ret <2 x double> %3 } define <2 x i64> @test_v2i64_f128(fp128 %p) { ; SOFT-LABEL: test_v2i64_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v2i64_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <2 x i64> @test_v2i64_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_v2i64_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vadd.i64 q8, q9, q9 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i64_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: bx lr %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <2 x i64> @test_v2i64_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_v2i64_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i64_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: bx lr %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <2 x i64> @test_v2i64_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_v2i64_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i64_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: bx lr %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <2 x i64> @test_v2i64_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_v2i64_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i64_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q8, q8 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: bx lr %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <2 x i64> @test_v2i64_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_v2i64_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v2i64_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q8, q8 ; HARD-NEXT: vadd.i64 q0, q8, q8 ; HARD-NEXT: bx lr %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to <2 x i64> %3 = add <2 x i64> %2, %2 ret <2 x i64> %3 } define <4 x float> @test_v4f32_f128(fp128 %p) { ; SOFT-LABEL: test_v4f32_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v4f32_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x float> @test_v4f32_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_v4f32_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vrev64.32 q8, q9 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4f32_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x float> @test_v4f32_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_v4f32_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4f32_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x float> @test_v4f32_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_v4f32_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4f32_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x float> @test_v4f32_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_v4f32_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4f32_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x float> @test_v4f32_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_v4f32_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4f32_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to <4 x float> %3 = fadd <4 x float> %2, %2 ret <4 x float> %3 } define <4 x i32> @test_v4i32_f128(fp128 %p) { ; SOFT-LABEL: test_v4i32_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v4i32_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_v4i32_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vrev64.32 q8, q9 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i32_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_v4i32_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i32_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vrev64.32 q8, q8 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_v4i32_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i32_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_v4i32_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i32_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <4 x i32> @test_v4i32_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_v4i32_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v4i32_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev64.32 q0, q8 ; HARD-NEXT: bx lr %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to <4 x i32> %3 = add <4 x i32> %2, %2 ret <4 x i32> %3 } define <8 x i16> @test_v8i16_f128(fp128 %p) { ; SOFT-LABEL: test_v8i16_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v8i16_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <8 x i16> @test_v8i16_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_v8i16_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vrev64.16 q8, q9 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i16_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.16 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <8 x i16> @test_v8i16_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_v8i16_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i16_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vrev64.16 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: bx lr %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <8 x i16> @test_v8i16_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_v8i16_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i16_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <8 x i16> @test_v8i16_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_v8i16_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev32.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i16_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev32.16 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: bx lr %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <8 x i16> @test_v8i16_v16i8(<16 x i8> %p) { ; SOFT-LABEL: test_v8i16_v16i8: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev16.8 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v8i16_v16i8: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.8 q8, q0 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev16.8 q8, q8 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev64.16 q0, q8 ; HARD-NEXT: bx lr %1 = add <16 x i8> %p, %p %2 = bitcast <16 x i8> %1 to <8 x i16> %3 = add <8 x i16> %2, %2 ret <8 x i16> %3 } define <16 x i8> @test_v16i8_f128(fp128 %p) { ; SOFT-LABEL: test_v16i8_f128: ; SOFT: @ %bb.0: ; SOFT-NEXT: .save {r11, lr} ; SOFT-NEXT: push {r11, lr} ; SOFT-NEXT: .pad #16 ; SOFT-NEXT: sub sp, sp, #16 ; SOFT-NEXT: stm sp, {r0, r1, r2, r3} ; SOFT-NEXT: bl __addtf3 ; SOFT-NEXT: vmov.32 d17[0], r2 ; SOFT-NEXT: vmov.32 d16[0], r0 ; SOFT-NEXT: vmov.32 d17[1], r3 ; SOFT-NEXT: vmov.32 d16[1], r1 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: add sp, sp, #16 ; SOFT-NEXT: pop {r11, pc} ; ; HARD-LABEL: test_v16i8_f128: ; HARD: @ %bb.0: ; HARD-NEXT: .save {r11, lr} ; HARD-NEXT: push {r11, lr} ; HARD-NEXT: .pad #16 ; HARD-NEXT: sub sp, sp, #16 ; HARD-NEXT: stm sp, {r0, r1, r2, r3} ; HARD-NEXT: bl __addtf3 ; HARD-NEXT: vmov.32 d17[0], r2 ; HARD-NEXT: vmov.32 d16[0], r0 ; HARD-NEXT: vmov.32 d17[1], r3 ; HARD-NEXT: vmov.32 d16[1], r1 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: add sp, sp, #16 ; HARD-NEXT: pop {r11, pc} %1 = fadd fp128 %p, %p %2 = bitcast fp128 %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 } define <16 x i8> @test_v16i8_v2f64(<2 x double> %p) { ; SOFT-LABEL: test_v16i8_v2f64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d16, r3, r2 ; SOFT-NEXT: vmov d17, r1, r0 ; SOFT-NEXT: vadd.f64 d19, d16, d16 ; SOFT-NEXT: vadd.f64 d18, d17, d17 ; SOFT-NEXT: vrev64.8 q8, q9 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v16i8_v2f64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.f64 d17, d1, d1 ; HARD-NEXT: vadd.f64 d16, d0, d0 ; HARD-NEXT: vrev64.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <2 x double> %p, %p %2 = bitcast <2 x double> %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 } define <16 x i8> @test_v16i8_v2i64(<2 x i64> %p) { ; SOFT-LABEL: test_v16i8_v2i64: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vadd.i64 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v16i8_v2i64: ; HARD: @ %bb.0: ; HARD-NEXT: vadd.i64 q8, q0, q0 ; HARD-NEXT: vrev64.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: bx lr %1 = add <2 x i64> %p, %p %2 = bitcast <2 x i64> %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 } define <16 x i8> @test_v16i8_v4f32(<4 x float> %p) { ; SOFT-LABEL: test_v16i8_v4f32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.f32 q8, q8, q8 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v16i8_v4f32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.f32 q8, q8, q8 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: bx lr %1 = fadd <4 x float> %p, %p %2 = bitcast <4 x float> %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 } define <16 x i8> @test_v16i8_v4i32(<4 x i32> %p) { ; SOFT-LABEL: test_v16i8_v4i32: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.32 q8, q8 ; SOFT-NEXT: vadd.i32 q8, q8, q8 ; SOFT-NEXT: vrev32.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v16i8_v4i32: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.32 q8, q0 ; HARD-NEXT: vadd.i32 q8, q8, q8 ; HARD-NEXT: vrev32.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: bx lr %1 = add <4 x i32> %p, %p %2 = bitcast <4 x i32> %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 } define <16 x i8> @test_v16i8_v8i16(<8 x i16> %p) { ; SOFT-LABEL: test_v16i8_v8i16: ; SOFT: @ %bb.0: ; SOFT-NEXT: vmov d17, r3, r2 ; SOFT-NEXT: vmov d16, r1, r0 ; SOFT-NEXT: vrev64.16 q8, q8 ; SOFT-NEXT: vadd.i16 q8, q8, q8 ; SOFT-NEXT: vrev16.8 q8, q8 ; SOFT-NEXT: vadd.i8 q8, q8, q8 ; SOFT-NEXT: vrev64.8 q8, q8 ; SOFT-NEXT: vmov r1, r0, d16 ; SOFT-NEXT: vmov r3, r2, d17 ; SOFT-NEXT: bx lr ; ; HARD-LABEL: test_v16i8_v8i16: ; HARD: @ %bb.0: ; HARD-NEXT: vrev64.16 q8, q0 ; HARD-NEXT: vadd.i16 q8, q8, q8 ; HARD-NEXT: vrev16.8 q8, q8 ; HARD-NEXT: vadd.i8 q8, q8, q8 ; HARD-NEXT: vrev64.8 q0, q8 ; HARD-NEXT: bx lr %1 = add <8 x i16> %p, %p %2 = bitcast <8 x i16> %1 to <16 x i8> %3 = add <16 x i8> %2, %2 ret <16 x i8> %3 }