# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } ... --- name: fadd_v4f32_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fadd_v4f32_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<4 x s32>) from %ir.b) ; P5600: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FADD]](<4 x s32>), [[COPY2]](p0) :: (store (<4 x s32>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>) from %ir.a) %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load (<4 x s32>) from %ir.b) %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.fadd.w), %3(<4 x s32>), %4(<4 x s32>) G_STORE %5(<4 x s32>), %2(p0) :: (store (<4 x s32>) into %ir.c) RetRA ... --- name: fadd_v2f64_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fadd_v2f64_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<2 x s64>) from %ir.b) ; P5600: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FADD]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>) from %ir.a) %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load (<2 x s64>) from %ir.b) %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.fadd.d), %3(<2 x s64>), %4(<2 x s64>) G_STORE %5(<2 x s64>), %2(p0) :: (store (<2 x s64>) into %ir.c) RetRA ... --- name: fsub_v4f32_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fsub_v4f32_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<4 x s32>) from %ir.b) ; P5600: [[FSUB:%[0-9]+]]:_(<4 x s32>) = G_FSUB [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FSUB]](<4 x s32>), [[COPY2]](p0) :: (store (<4 x s32>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>) from %ir.a) %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load (<4 x s32>) from %ir.b) %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.fsub.w), %3(<4 x s32>), %4(<4 x s32>) G_STORE %5(<4 x s32>), %2(p0) :: (store (<4 x s32>) into %ir.c) RetRA ... --- name: fsub_v2f64_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fsub_v2f64_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<2 x s64>) from %ir.b) ; P5600: [[FSUB:%[0-9]+]]:_(<2 x s64>) = G_FSUB [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FSUB]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>) from %ir.a) %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load (<2 x s64>) from %ir.b) %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.fsub.d), %3(<2 x s64>), %4(<2 x s64>) G_STORE %5(<2 x s64>), %2(p0) :: (store (<2 x s64>) into %ir.c) RetRA ... --- name: fmul_v4f32_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fmul_v4f32_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<4 x s32>) from %ir.b) ; P5600: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FMUL]](<4 x s32>), [[COPY2]](p0) :: (store (<4 x s32>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>) from %ir.a) %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load (<4 x s32>) from %ir.b) %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.fmul.w), %3(<4 x s32>), %4(<4 x s32>) G_STORE %5(<4 x s32>), %2(p0) :: (store (<4 x s32>) into %ir.c) RetRA ... --- name: fmul_v2f64_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fmul_v2f64_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<2 x s64>) from %ir.b) ; P5600: [[FMUL:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FMUL]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>) from %ir.a) %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load (<2 x s64>) from %ir.b) %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.fmul.d), %3(<2 x s64>), %4(<2 x s64>) G_STORE %5(<2 x s64>), %2(p0) :: (store (<2 x s64>) into %ir.c) RetRA ... --- name: fdiv_v4f32_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fdiv_v4f32_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<4 x s32>) from %ir.b) ; P5600: [[FDIV:%[0-9]+]]:_(<4 x s32>) = G_FDIV [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FDIV]](<4 x s32>), [[COPY2]](p0) :: (store (<4 x s32>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>) from %ir.a) %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load (<4 x s32>) from %ir.b) %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.fdiv.w), %3(<4 x s32>), %4(<4 x s32>) G_STORE %5(<4 x s32>), %2(p0) :: (store (<4 x s32>) into %ir.c) RetRA ... --- name: fdiv_v2f64_builtin alignment: 4 tracksRegLiveness: true body: | bb.1.entry: liveins: $a0, $a1, $a2 ; P5600-LABEL: name: fdiv_v2f64_builtin ; P5600: liveins: $a0, $a1, $a2 ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2 ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.a) ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<2 x s64>) from %ir.b) ; P5600: [[FDIV:%[0-9]+]]:_(<2 x s64>) = G_FDIV [[LOAD]], [[LOAD1]] ; P5600: G_STORE [[FDIV]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>) into %ir.c) ; P5600: RetRA %0:_(p0) = COPY $a0 %1:_(p0) = COPY $a1 %2:_(p0) = COPY $a2 %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>) from %ir.a) %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load (<2 x s64>) from %ir.b) %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.fdiv.d), %3(<2 x s64>), %4(<2 x s64>) G_STORE %5(<2 x s64>), %2(p0) :: (store (<2 x s64>) into %ir.c) RetRA ...