Compiler projects using llvm
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu                                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SSE
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512F
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
--- |

  define float @test_fsub_float(float %arg1, float %arg2) {
    %ret = fsub float %arg1, %arg2
    ret float %ret
  }

  define double @test_fsub_double(double %arg1, double %arg2) {
    %ret = fsub double %arg1, %arg2
    ret double %ret
  }

...
---
name:            test_fsub_float
alignment:       16
legalized:       true
regBankSelected: true
#
registers:
  - { id: 0, class: vecr, preferred-register: '' }
  - { id: 1, class: vecr, preferred-register: '' }
  - { id: 2, class: vecr, preferred-register: '' }
  - { id: 3, class: vecr, preferred-register: '' }
  - { id: 4, class: vecr, preferred-register: '' }
  - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
constants:
#
#
body:             |
  bb.1 (%ir-block.0):
    liveins: $xmm0, $xmm1

    ; SSE-LABEL: name: test_fsub_float
    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
    ; SSE: %4:fr32 = nofpexcept SUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
    ; SSE: $xmm0 = COPY [[COPY4]]
    ; SSE: RET 0, implicit $xmm0
    ; AVX-LABEL: name: test_fsub_float
    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
    ; AVX: %4:fr32 = nofpexcept VSUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
    ; AVX: $xmm0 = COPY [[COPY4]]
    ; AVX: RET 0, implicit $xmm0
    ; AVX512F-LABEL: name: test_fsub_float
    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
    ; AVX512F: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
    ; AVX512F: $xmm0 = COPY [[COPY4]]
    ; AVX512F: RET 0, implicit $xmm0
    ; AVX512VL-LABEL: name: test_fsub_float
    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
    ; AVX512VL: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
    ; AVX512VL: $xmm0 = COPY [[COPY4]]
    ; AVX512VL: RET 0, implicit $xmm0
    %2:vecr(s128) = COPY $xmm0
    %0:vecr(s32) = G_TRUNC %2(s128)
    %3:vecr(s128) = COPY $xmm1
    %1:vecr(s32) = G_TRUNC %3(s128)
    %4:vecr(s32) = G_FSUB %0, %1
    %5:vecr(s128) = G_ANYEXT %4(s32)
    $xmm0 = COPY %5(s128)
    RET 0, implicit $xmm0

...
---
name:            test_fsub_double
alignment:       16
legalized:       true
regBankSelected: true
#
registers:
  - { id: 0, class: vecr, preferred-register: '' }
  - { id: 1, class: vecr, preferred-register: '' }
  - { id: 2, class: vecr, preferred-register: '' }
  - { id: 3, class: vecr, preferred-register: '' }
  - { id: 4, class: vecr, preferred-register: '' }
  - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
constants:
#
#
body:             |
  bb.1 (%ir-block.0):
    liveins: $xmm0, $xmm1

    ; SSE-LABEL: name: test_fsub_double
    ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
    ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
    ; SSE: %4:fr64 = nofpexcept SUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
    ; SSE: $xmm0 = COPY [[COPY4]]
    ; SSE: RET 0, implicit $xmm0
    ; AVX-LABEL: name: test_fsub_double
    ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
    ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
    ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
    ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
    ; AVX: %4:fr64 = nofpexcept VSUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
    ; AVX: $xmm0 = COPY [[COPY4]]
    ; AVX: RET 0, implicit $xmm0
    ; AVX512F-LABEL: name: test_fsub_double
    ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
    ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
    ; AVX512F: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
    ; AVX512F: $xmm0 = COPY [[COPY4]]
    ; AVX512F: RET 0, implicit $xmm0
    ; AVX512VL-LABEL: name: test_fsub_double
    ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
    ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
    ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
    ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
    ; AVX512VL: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
    ; AVX512VL: $xmm0 = COPY [[COPY4]]
    ; AVX512VL: RET 0, implicit $xmm0
    %2:vecr(s128) = COPY $xmm0
    %0:vecr(s64) = G_TRUNC %2(s128)
    %3:vecr(s128) = COPY $xmm1
    %1:vecr(s64) = G_TRUNC %3(s128)
    %4:vecr(s64) = G_FSUB %0, %1
    %5:vecr(s128) = G_ANYEXT %4(s64)
    $xmm0 = COPY %5(s128)
    RET 0, implicit $xmm0

...