# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel-abort=1 -run-pass=instruction-select %s -o - | FileCheck %s ... --- name: uaddo_s32 alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $w0, $w1, $x2 ; CHECK-LABEL: name: uaddo_s32 ; CHECK: liveins: $w0, $w1, $x2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $w0 = COPY [[ADDSWrr]] ; CHECK-NEXT: $w1 = COPY [[CSINCWr]] ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1 %0:gpr(s32) = COPY $w0 %1:gpr(s32) = COPY $w1 %3:gpr(s32), %4:gpr(s32) = G_UADDO %0, %1 $w0 = COPY %3 $w1 = COPY %4 RET_ReallyLR implicit $w0, implicit $w1 ... --- name: uaddo_s64 alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $x0, $x1, $x2 ; CHECK-LABEL: name: uaddo_s64 ; CHECK: liveins: $x0, $x1, $x2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $x0 = COPY [[ADDSXrr]] ; CHECK-NEXT: $w1 = COPY [[CSINCWr]] ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1 %0:gpr(s64) = COPY $x0 %1:gpr(s64) = COPY $x1 %3:gpr(s64), %4:gpr(s32) = G_UADDO %0, %1 $x0 = COPY %3 $w1 = COPY %4 RET_ReallyLR implicit $x0, implicit $w1 ... --- name: uaddo_s32_imm alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $w0, $w1, $x2 ; Check that we get ADDSWri when we can fold in a constant. ; ; CHECK-LABEL: name: uaddo_s32_imm ; CHECK: liveins: $w0, $w1, $x2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %copy:gpr32sp = COPY $w0 ; CHECK-NEXT: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $w0 = COPY %add ; CHECK-NEXT: RET_ReallyLR implicit $w0 %copy:gpr(s32) = COPY $w0 %constant:gpr(s32) = G_CONSTANT i32 16 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant $w0 = COPY %add(s32) RET_ReallyLR implicit $w0 ... --- name: uaddo_s32_shifted alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $w0, $w1, $x2 ; Check that we get ADDSWrs when we can fold in a shift. ; ; CHECK-LABEL: name: uaddo_s32_shifted ; CHECK: liveins: $w0, $w1, $x2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %copy1:gpr32 = COPY $w0 ; CHECK-NEXT: %copy2:gpr32 = COPY $w1 ; CHECK-NEXT: %add:gpr32 = ADDSWrs %copy1, %copy2, 16, implicit-def $nzcv ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $w0 = COPY %add ; CHECK-NEXT: RET_ReallyLR implicit $w0 %copy1:gpr(s32) = COPY $w0 %copy2:gpr(s32) = COPY $w1 %constant:gpr(s32) = G_CONSTANT i32 16 %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32) %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy1, %shift $w0 = COPY %add(s32) RET_ReallyLR implicit $w0 ... --- name: uaddo_s32_neg_imm alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $w0, $w1, $x2 ; Check that we get SUBSWri when we can fold in a negative constant. ; ; CHECK-LABEL: name: uaddo_s32_neg_imm ; CHECK: liveins: $w0, $w1, $x2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %copy:gpr32sp = COPY $w0 ; CHECK-NEXT: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $w0 = COPY %add ; CHECK-NEXT: RET_ReallyLR implicit $w0 %copy:gpr(s32) = COPY $w0 %constant:gpr(s32) = G_CONSTANT i32 -16 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant $w0 = COPY %add(s32) RET_ReallyLR implicit $w0 ... --- name: uaddo_arith_extended alignment: 4 legalized: true regBankSelected: true tracksRegLiveness: true body: | bb.1.entry: liveins: $w0, $x0 ; Check that we get ADDSXrx. ; CHECK-LABEL: name: uaddo_arith_extended ; CHECK: liveins: $w0, $x0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0 ; CHECK-NEXT: %reg1:gpr32 = COPY $w0 ; CHECK-NEXT: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv ; CHECK-NEXT: %flags:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv ; CHECK-NEXT: $x0 = COPY %add ; CHECK-NEXT: RET_ReallyLR implicit $x0 %reg0:gpr(s64) = COPY $x0 %reg1:gpr(s32) = COPY $w0 %ext:gpr(s64) = G_ZEXT %reg1(s32) %cst:gpr(s64) = G_CONSTANT i64 2 %shift:gpr(s64) = G_SHL %ext, %cst(s64) %add:gpr(s64), %flags:gpr(s32) = G_UADDO %reg0, %shift $x0 = COPY %add(s64) RET_ReallyLR implicit $x0