; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I ; RUN: llc -mtriple=riscv64 -verify-machineinstrs -enable-legalize-types-checking < %s \ ; RUN: | FileCheck %s -check-prefix=RV64I ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBB ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBB ; NOTE: -enable-legalize-types-checking is on one command line due to a previous ; assertion failure on an expensive checks build for @rotr_32_mask_multiple. ; These IR sequences are idioms for rotates. If rotate instructions are ; supported, they will be turned into ISD::ROTL or ISD::ROTR. define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl_32: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a2, a1 ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i32 32, %y %b = shl i32 %x, %y %c = lshr i32 %x, %z %d = or i32 %b, %c ret i32 %d } define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr_32: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a2, a1 ; RV32I-NEXT: srl a1, a0, a1 ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i32 32, %y %b = lshr i32 %x, %y %c = shl i32 %x, %z %d = or i32 %b, %c ret i32 %d } define i64 @rotl_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotl_64: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a2, -32 ; RV32I-NEXT: mv a4, a1 ; RV32I-NEXT: bltz a5, .LBB2_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a5 ; RV32I-NEXT: j .LBB2_3 ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: sll a1, a4, a2 ; RV32I-NEXT: xori a3, a2, 31 ; RV32I-NEXT: srli a6, a0, 1 ; RV32I-NEXT: srl a3, a6, a3 ; RV32I-NEXT: or a1, a1, a3 ; RV32I-NEXT: .LBB2_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: li a3, 32 ; RV32I-NEXT: sub a7, a3, a2 ; RV32I-NEXT: srl a3, a4, a6 ; RV32I-NEXT: bltz a7, .LBB2_6 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: bltz a5, .LBB2_7 ; RV32I-NEXT: .LBB2_5: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB2_6: ; RV32I-NEXT: srl a6, a0, a6 ; RV32I-NEXT: li a7, 64 ; RV32I-NEXT: sub a7, a7, a2 ; RV32I-NEXT: xori a7, a7, 31 ; RV32I-NEXT: slli a4, a4, 1 ; RV32I-NEXT: sll a4, a4, a7 ; RV32I-NEXT: or a4, a6, a4 ; RV32I-NEXT: or a1, a1, a3 ; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: bgez a5, .LBB2_5 ; RV32I-NEXT: .LBB2_7: ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a3, a3, a0 ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: addi a5, a2, -32 ; RV32ZBB-NEXT: mv a4, a1 ; RV32ZBB-NEXT: bltz a5, .LBB2_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: sll a1, a0, a5 ; RV32ZBB-NEXT: j .LBB2_3 ; RV32ZBB-NEXT: .LBB2_2: ; RV32ZBB-NEXT: sll a1, a4, a2 ; RV32ZBB-NEXT: xori a3, a2, 31 ; RV32ZBB-NEXT: srli a6, a0, 1 ; RV32ZBB-NEXT: srl a3, a6, a3 ; RV32ZBB-NEXT: or a1, a1, a3 ; RV32ZBB-NEXT: .LBB2_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: li a3, 32 ; RV32ZBB-NEXT: sub a7, a3, a2 ; RV32ZBB-NEXT: srl a3, a4, a6 ; RV32ZBB-NEXT: bltz a7, .LBB2_6 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: bltz a5, .LBB2_7 ; RV32ZBB-NEXT: .LBB2_5: ; RV32ZBB-NEXT: mv a0, a3 ; RV32ZBB-NEXT: ret ; RV32ZBB-NEXT: .LBB2_6: ; RV32ZBB-NEXT: srl a6, a0, a6 ; RV32ZBB-NEXT: li a7, 64 ; RV32ZBB-NEXT: sub a7, a7, a2 ; RV32ZBB-NEXT: xori a7, a7, 31 ; RV32ZBB-NEXT: slli a4, a4, 1 ; RV32ZBB-NEXT: sll a4, a4, a7 ; RV32ZBB-NEXT: or a4, a6, a4 ; RV32ZBB-NEXT: or a1, a1, a3 ; RV32ZBB-NEXT: mv a3, a4 ; RV32ZBB-NEXT: bgez a5, .LBB2_5 ; RV32ZBB-NEXT: .LBB2_7: ; RV32ZBB-NEXT: sll a0, a0, a2 ; RV32ZBB-NEXT: or a3, a3, a0 ; RV32ZBB-NEXT: mv a0, a3 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i64 64, %y %b = shl i64 %x, %y %c = lshr i64 %x, %z %d = or i64 %b, %c ret i64 %d } define i64 @rotr_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotr_64: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a2, -32 ; RV32I-NEXT: mv a4, a0 ; RV32I-NEXT: bltz a5, .LBB3_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a5 ; RV32I-NEXT: j .LBB3_3 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: srl a0, a4, a2 ; RV32I-NEXT: xori a3, a2, 31 ; RV32I-NEXT: slli a6, a1, 1 ; RV32I-NEXT: sll a3, a6, a3 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: .LBB3_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: li a3, 32 ; RV32I-NEXT: sub a7, a3, a2 ; RV32I-NEXT: sll a3, a4, a6 ; RV32I-NEXT: bltz a7, .LBB3_6 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: bltz a5, .LBB3_7 ; RV32I-NEXT: .LBB3_5: ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB3_6: ; RV32I-NEXT: sll a6, a1, a6 ; RV32I-NEXT: li a7, 64 ; RV32I-NEXT: sub a7, a7, a2 ; RV32I-NEXT: xori a7, a7, 31 ; RV32I-NEXT: srli a4, a4, 1 ; RV32I-NEXT: srl a4, a4, a7 ; RV32I-NEXT: or a4, a6, a4 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: bgez a5, .LBB3_5 ; RV32I-NEXT: .LBB3_7: ; RV32I-NEXT: srl a1, a1, a2 ; RV32I-NEXT: or a3, a3, a1 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: addi a5, a2, -32 ; RV32ZBB-NEXT: mv a4, a0 ; RV32ZBB-NEXT: bltz a5, .LBB3_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: srl a0, a1, a5 ; RV32ZBB-NEXT: j .LBB3_3 ; RV32ZBB-NEXT: .LBB3_2: ; RV32ZBB-NEXT: srl a0, a4, a2 ; RV32ZBB-NEXT: xori a3, a2, 31 ; RV32ZBB-NEXT: slli a6, a1, 1 ; RV32ZBB-NEXT: sll a3, a6, a3 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: .LBB3_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: li a3, 32 ; RV32ZBB-NEXT: sub a7, a3, a2 ; RV32ZBB-NEXT: sll a3, a4, a6 ; RV32ZBB-NEXT: bltz a7, .LBB3_6 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: bltz a5, .LBB3_7 ; RV32ZBB-NEXT: .LBB3_5: ; RV32ZBB-NEXT: mv a1, a3 ; RV32ZBB-NEXT: ret ; RV32ZBB-NEXT: .LBB3_6: ; RV32ZBB-NEXT: sll a6, a1, a6 ; RV32ZBB-NEXT: li a7, 64 ; RV32ZBB-NEXT: sub a7, a7, a2 ; RV32ZBB-NEXT: xori a7, a7, 31 ; RV32ZBB-NEXT: srli a4, a4, 1 ; RV32ZBB-NEXT: srl a4, a4, a7 ; RV32ZBB-NEXT: or a4, a6, a4 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: mv a3, a4 ; RV32ZBB-NEXT: bgez a5, .LBB3_5 ; RV32ZBB-NEXT: .LBB3_7: ; RV32ZBB-NEXT: srl a1, a1, a2 ; RV32ZBB-NEXT: or a3, a3, a1 ; RV32ZBB-NEXT: mv a1, a3 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i64 64, %y %b = lshr i64 %x, %y %c = shl i64 %x, %z %d = or i64 %b, %c ret i64 %d } define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl_32_mask: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a2, a1 ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: sllw a1, a0, a1 ; RV64I-NEXT: srlw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32_mask: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i32 0, %y %and = and i32 %z, 31 %b = shl i32 %x, %y %c = lshr i32 %x, %and %d = or i32 %b, %c ret i32 %d } define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl_32_mask_and_63_and_31: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a2, a0, a1 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask_and_63_and_31: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32_mask_and_63_and_31: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a1 ; RV64ZBB-NEXT: ret %a = and i32 %y, 63 %b = shl i32 %x, %a %c = sub i32 0, %y %d = and i32 %c, 31 %e = lshr i32 %x, %d %f = or i32 %b, %e ret i32 %f } define i32 @rotl_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotl_32_mask_or_64_or_32: ; RV32I: # %bb.0: ; RV32I-NEXT: ori a2, a1, 64 ; RV32I-NEXT: sll a2, a0, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: ori a1, a1, 32 ; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask_or_64_or_32: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask_or_64_or_32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32_mask_or_64_or_32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a1 ; RV64ZBB-NEXT: ret %a = or i32 %y, 64 %b = shl i32 %x, %a %c = sub i32 0, %y %d = or i32 %c, 32 %e = lshr i32 %x, %d %f = or i32 %b, %e ret i32 %f } define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr_32_mask: ; RV32I: # %bb.0: ; RV32I-NEXT: neg a2, a1 ; RV32I-NEXT: srl a1, a0, a1 ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask: ; RV64I: # %bb.0: ; RV64I-NEXT: negw a2, a1 ; RV64I-NEXT: srlw a1, a0, a1 ; RV64I-NEXT: sllw a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32_mask: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i32 0, %y %and = and i32 %z, 31 %b = lshr i32 %x, %y %c = shl i32 %x, %and %d = or i32 %b, %c ret i32 %d } define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr_32_mask_and_63_and_31: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a2, a0, a1 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: sll a0, a0, a1 ; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask_and_63_and_31: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask_and_63_and_31: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32_mask_and_63_and_31: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a1 ; RV64ZBB-NEXT: ret %a = and i32 %y, 63 %b = lshr i32 %x, %a %c = sub i32 0, %y %d = and i32 %c, 31 %e = shl i32 %x, %d %f = or i32 %b, %e ret i32 %f } define i32 @rotr_32_mask_or_64_or_32(i32 %x, i32 %y) nounwind { ; RV32I-LABEL: rotr_32_mask_or_64_or_32: ; RV32I: # %bb.0: ; RV32I-NEXT: ori a2, a1, 64 ; RV32I-NEXT: srl a2, a0, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: ori a1, a1, 32 ; RV32I-NEXT: sll a0, a0, a1 ; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask_or_64_or_32: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 ; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask_or_64_or_32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32_mask_or_64_or_32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a1 ; RV64ZBB-NEXT: ret %a = or i32 %y, 64 %b = lshr i32 %x, %a %c = sub i32 0, %y %d = or i32 %c, 32 %e = shl i32 %x, %d %f = or i32 %b, %e ret i32 %f } define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotl_64_mask: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a2, -32 ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bltz a5, .LBB10_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a5 ; RV32I-NEXT: j .LBB10_3 ; RV32I-NEXT: .LBB10_2: ; RV32I-NEXT: sll a1, a3, a2 ; RV32I-NEXT: xori a4, a2, 31 ; RV32I-NEXT: srli a6, a0, 1 ; RV32I-NEXT: srl a4, a6, a4 ; RV32I-NEXT: or a1, a1, a4 ; RV32I-NEXT: .LBB10_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: andi a4, a6, 63 ; RV32I-NEXT: addi a7, a4, -32 ; RV32I-NEXT: bltz a7, .LBB10_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: srl a4, a3, a7 ; RV32I-NEXT: bltz a5, .LBB10_6 ; RV32I-NEXT: j .LBB10_7 ; RV32I-NEXT: .LBB10_5: ; RV32I-NEXT: srl a7, a0, a6 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: slli t0, a3, 1 ; RV32I-NEXT: sll a4, t0, a4 ; RV32I-NEXT: or a4, a7, a4 ; RV32I-NEXT: srl a3, a3, a6 ; RV32I-NEXT: or a1, a1, a3 ; RV32I-NEXT: bgez a5, .LBB10_7 ; RV32I-NEXT: .LBB10_6: ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a4, a4, a0 ; RV32I-NEXT: .LBB10_7: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: sll a1, a0, a1 ; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: addi a5, a2, -32 ; RV32ZBB-NEXT: mv a3, a1 ; RV32ZBB-NEXT: bltz a5, .LBB10_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: sll a1, a0, a5 ; RV32ZBB-NEXT: j .LBB10_3 ; RV32ZBB-NEXT: .LBB10_2: ; RV32ZBB-NEXT: sll a1, a3, a2 ; RV32ZBB-NEXT: xori a4, a2, 31 ; RV32ZBB-NEXT: srli a6, a0, 1 ; RV32ZBB-NEXT: srl a4, a6, a4 ; RV32ZBB-NEXT: or a1, a1, a4 ; RV32ZBB-NEXT: .LBB10_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: andi a4, a6, 63 ; RV32ZBB-NEXT: addi a7, a4, -32 ; RV32ZBB-NEXT: bltz a7, .LBB10_5 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: srl a4, a3, a7 ; RV32ZBB-NEXT: bltz a5, .LBB10_6 ; RV32ZBB-NEXT: j .LBB10_7 ; RV32ZBB-NEXT: .LBB10_5: ; RV32ZBB-NEXT: srl a7, a0, a6 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: slli t0, a3, 1 ; RV32ZBB-NEXT: sll a4, t0, a4 ; RV32ZBB-NEXT: or a4, a7, a4 ; RV32ZBB-NEXT: srl a3, a3, a6 ; RV32ZBB-NEXT: or a1, a1, a3 ; RV32ZBB-NEXT: bgez a5, .LBB10_7 ; RV32ZBB-NEXT: .LBB10_6: ; RV32ZBB-NEXT: sll a0, a0, a2 ; RV32ZBB-NEXT: or a4, a4, a0 ; RV32ZBB-NEXT: .LBB10_7: ; RV32ZBB-NEXT: mv a0, a4 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i64 0, %y %and = and i64 %z, 63 %b = shl i64 %x, %y %c = lshr i64 %x, %and %d = or i64 %b, %c ret i64 %d } define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotl_64_mask_and_127_and_63: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a4, a2, 127 ; RV32I-NEXT: addi a5, a4, -32 ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bltz a5, .LBB11_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sll a1, a0, a5 ; RV32I-NEXT: j .LBB11_3 ; RV32I-NEXT: .LBB11_2: ; RV32I-NEXT: sll a1, a3, a2 ; RV32I-NEXT: srli a6, a0, 1 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: srl a4, a6, a4 ; RV32I-NEXT: or a1, a1, a4 ; RV32I-NEXT: .LBB11_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: andi a4, a6, 63 ; RV32I-NEXT: addi a7, a4, -32 ; RV32I-NEXT: bltz a7, .LBB11_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: srl a4, a3, a7 ; RV32I-NEXT: bltz a5, .LBB11_6 ; RV32I-NEXT: j .LBB11_7 ; RV32I-NEXT: .LBB11_5: ; RV32I-NEXT: srl a7, a0, a6 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: slli t0, a3, 1 ; RV32I-NEXT: sll a4, t0, a4 ; RV32I-NEXT: or a4, a7, a4 ; RV32I-NEXT: srl a3, a3, a6 ; RV32I-NEXT: or a1, a1, a3 ; RV32I-NEXT: bgez a5, .LBB11_7 ; RV32I-NEXT: .LBB11_6: ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a4, a4, a0 ; RV32I-NEXT: .LBB11_7: ; RV32I-NEXT: mv a0, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a2, a0, a1 ; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask_and_127_and_63: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: andi a4, a2, 127 ; RV32ZBB-NEXT: addi a5, a4, -32 ; RV32ZBB-NEXT: mv a3, a1 ; RV32ZBB-NEXT: bltz a5, .LBB11_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: sll a1, a0, a5 ; RV32ZBB-NEXT: j .LBB11_3 ; RV32ZBB-NEXT: .LBB11_2: ; RV32ZBB-NEXT: sll a1, a3, a2 ; RV32ZBB-NEXT: srli a6, a0, 1 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: srl a4, a6, a4 ; RV32ZBB-NEXT: or a1, a1, a4 ; RV32ZBB-NEXT: .LBB11_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: andi a4, a6, 63 ; RV32ZBB-NEXT: addi a7, a4, -32 ; RV32ZBB-NEXT: bltz a7, .LBB11_5 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: srl a4, a3, a7 ; RV32ZBB-NEXT: bltz a5, .LBB11_6 ; RV32ZBB-NEXT: j .LBB11_7 ; RV32ZBB-NEXT: .LBB11_5: ; RV32ZBB-NEXT: srl a7, a0, a6 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: slli t0, a3, 1 ; RV32ZBB-NEXT: sll a4, t0, a4 ; RV32ZBB-NEXT: or a4, a7, a4 ; RV32ZBB-NEXT: srl a3, a3, a6 ; RV32ZBB-NEXT: or a1, a1, a3 ; RV32ZBB-NEXT: bgez a5, .LBB11_7 ; RV32ZBB-NEXT: .LBB11_6: ; RV32ZBB-NEXT: sll a0, a0, a2 ; RV32ZBB-NEXT: or a4, a4, a0 ; RV32ZBB-NEXT: .LBB11_7: ; RV32ZBB-NEXT: mv a0, a4 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask_and_127_and_63: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a1 ; RV64ZBB-NEXT: ret %a = and i64 %y, 127 %b = shl i64 %x, %a %c = sub i64 0, %y %d = and i64 %c, 63 %e = lshr i64 %x, %d %f = or i64 %b, %e ret i64 %f } define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotl_64_mask_or_128_or_64: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a3, a0, a2 ; RV32I-NEXT: neg a0, a2 ; RV32I-NEXT: srl a0, a1, a0 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask_or_128_or_64: ; RV64I: # %bb.0: ; RV64I-NEXT: ori a2, a1, 128 ; RV64I-NEXT: sll a2, a0, a2 ; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: ori a1, a1, 64 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask_or_128_or_64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sll a3, a0, a2 ; RV32ZBB-NEXT: neg a0, a2 ; RV32ZBB-NEXT: srl a0, a1, a0 ; RV32ZBB-NEXT: mv a1, a3 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask_or_128_or_64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a1 ; RV64ZBB-NEXT: ret %a = or i64 %y, 128 %b = shl i64 %x, %a %c = sub i64 0, %y %d = or i64 %c, 64 %e = lshr i64 %x, %d %f = or i64 %b, %e ret i64 %f } define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotr_64_mask: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a5, a2, -32 ; RV32I-NEXT: mv a3, a0 ; RV32I-NEXT: bltz a5, .LBB13_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a5 ; RV32I-NEXT: j .LBB13_3 ; RV32I-NEXT: .LBB13_2: ; RV32I-NEXT: srl a0, a3, a2 ; RV32I-NEXT: xori a4, a2, 31 ; RV32I-NEXT: slli a6, a1, 1 ; RV32I-NEXT: sll a4, a6, a4 ; RV32I-NEXT: or a0, a0, a4 ; RV32I-NEXT: .LBB13_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: andi a4, a6, 63 ; RV32I-NEXT: addi a7, a4, -32 ; RV32I-NEXT: bltz a7, .LBB13_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: sll a4, a3, a7 ; RV32I-NEXT: bltz a5, .LBB13_6 ; RV32I-NEXT: j .LBB13_7 ; RV32I-NEXT: .LBB13_5: ; RV32I-NEXT: sll a7, a1, a6 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: srli t0, a3, 1 ; RV32I-NEXT: srl a4, t0, a4 ; RV32I-NEXT: or a4, a7, a4 ; RV32I-NEXT: sll a3, a3, a6 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: bgez a5, .LBB13_7 ; RV32I-NEXT: .LBB13_6: ; RV32I-NEXT: srl a1, a1, a2 ; RV32I-NEXT: or a4, a4, a1 ; RV32I-NEXT: .LBB13_7: ; RV32I-NEXT: mv a1, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask: ; RV64I: # %bb.0: ; RV64I-NEXT: neg a2, a1 ; RV64I-NEXT: srl a1, a0, a1 ; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: addi a5, a2, -32 ; RV32ZBB-NEXT: mv a3, a0 ; RV32ZBB-NEXT: bltz a5, .LBB13_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: srl a0, a1, a5 ; RV32ZBB-NEXT: j .LBB13_3 ; RV32ZBB-NEXT: .LBB13_2: ; RV32ZBB-NEXT: srl a0, a3, a2 ; RV32ZBB-NEXT: xori a4, a2, 31 ; RV32ZBB-NEXT: slli a6, a1, 1 ; RV32ZBB-NEXT: sll a4, a6, a4 ; RV32ZBB-NEXT: or a0, a0, a4 ; RV32ZBB-NEXT: .LBB13_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: andi a4, a6, 63 ; RV32ZBB-NEXT: addi a7, a4, -32 ; RV32ZBB-NEXT: bltz a7, .LBB13_5 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: sll a4, a3, a7 ; RV32ZBB-NEXT: bltz a5, .LBB13_6 ; RV32ZBB-NEXT: j .LBB13_7 ; RV32ZBB-NEXT: .LBB13_5: ; RV32ZBB-NEXT: sll a7, a1, a6 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: srli t0, a3, 1 ; RV32ZBB-NEXT: srl a4, t0, a4 ; RV32ZBB-NEXT: or a4, a7, a4 ; RV32ZBB-NEXT: sll a3, a3, a6 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: bgez a5, .LBB13_7 ; RV32ZBB-NEXT: .LBB13_6: ; RV32ZBB-NEXT: srl a1, a1, a2 ; RV32ZBB-NEXT: or a4, a4, a1 ; RV32ZBB-NEXT: .LBB13_7: ; RV32ZBB-NEXT: mv a1, a4 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a1 ; RV64ZBB-NEXT: ret %z = sub i64 0, %y %and = and i64 %z, 63 %b = lshr i64 %x, %y %c = shl i64 %x, %and %d = or i64 %b, %c ret i64 %d } define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotr_64_mask_and_127_and_63: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a4, a2, 127 ; RV32I-NEXT: addi a5, a4, -32 ; RV32I-NEXT: mv a3, a0 ; RV32I-NEXT: bltz a5, .LBB14_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srl a0, a1, a5 ; RV32I-NEXT: j .LBB14_3 ; RV32I-NEXT: .LBB14_2: ; RV32I-NEXT: srl a0, a3, a2 ; RV32I-NEXT: slli a6, a1, 1 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: sll a4, a6, a4 ; RV32I-NEXT: or a0, a0, a4 ; RV32I-NEXT: .LBB14_3: ; RV32I-NEXT: neg a6, a2 ; RV32I-NEXT: andi a4, a6, 63 ; RV32I-NEXT: addi a7, a4, -32 ; RV32I-NEXT: bltz a7, .LBB14_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: sll a4, a3, a7 ; RV32I-NEXT: bltz a5, .LBB14_6 ; RV32I-NEXT: j .LBB14_7 ; RV32I-NEXT: .LBB14_5: ; RV32I-NEXT: sll a7, a1, a6 ; RV32I-NEXT: xori a4, a4, 31 ; RV32I-NEXT: srli t0, a3, 1 ; RV32I-NEXT: srl a4, t0, a4 ; RV32I-NEXT: or a4, a7, a4 ; RV32I-NEXT: sll a3, a3, a6 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: bgez a5, .LBB14_7 ; RV32I-NEXT: .LBB14_6: ; RV32I-NEXT: srl a1, a1, a2 ; RV32I-NEXT: or a4, a4, a1 ; RV32I-NEXT: .LBB14_7: ; RV32I-NEXT: mv a1, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask_and_127_and_63: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a2, a0, a1 ; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask_and_127_and_63: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: andi a4, a2, 127 ; RV32ZBB-NEXT: addi a5, a4, -32 ; RV32ZBB-NEXT: mv a3, a0 ; RV32ZBB-NEXT: bltz a5, .LBB14_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: srl a0, a1, a5 ; RV32ZBB-NEXT: j .LBB14_3 ; RV32ZBB-NEXT: .LBB14_2: ; RV32ZBB-NEXT: srl a0, a3, a2 ; RV32ZBB-NEXT: slli a6, a1, 1 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: sll a4, a6, a4 ; RV32ZBB-NEXT: or a0, a0, a4 ; RV32ZBB-NEXT: .LBB14_3: ; RV32ZBB-NEXT: neg a6, a2 ; RV32ZBB-NEXT: andi a4, a6, 63 ; RV32ZBB-NEXT: addi a7, a4, -32 ; RV32ZBB-NEXT: bltz a7, .LBB14_5 ; RV32ZBB-NEXT: # %bb.4: ; RV32ZBB-NEXT: sll a4, a3, a7 ; RV32ZBB-NEXT: bltz a5, .LBB14_6 ; RV32ZBB-NEXT: j .LBB14_7 ; RV32ZBB-NEXT: .LBB14_5: ; RV32ZBB-NEXT: sll a7, a1, a6 ; RV32ZBB-NEXT: xori a4, a4, 31 ; RV32ZBB-NEXT: srli t0, a3, 1 ; RV32ZBB-NEXT: srl a4, t0, a4 ; RV32ZBB-NEXT: or a4, a7, a4 ; RV32ZBB-NEXT: sll a3, a3, a6 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: bgez a5, .LBB14_7 ; RV32ZBB-NEXT: .LBB14_6: ; RV32ZBB-NEXT: srl a1, a1, a2 ; RV32ZBB-NEXT: or a4, a4, a1 ; RV32ZBB-NEXT: .LBB14_7: ; RV32ZBB-NEXT: mv a1, a4 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask_and_127_and_63: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a1 ; RV64ZBB-NEXT: ret %a = and i64 %y, 127 %b = lshr i64 %x, %a %c = sub i64 0, %y %d = and i64 %c, 63 %e = shl i64 %x, %d %f = or i64 %b, %e ret i64 %f } define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind { ; RV32I-LABEL: rotr_64_mask_or_128_or_64: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a3, a1, a2 ; RV32I-NEXT: neg a1, a2 ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask_or_128_or_64: ; RV64I: # %bb.0: ; RV64I-NEXT: ori a2, a1, 128 ; RV64I-NEXT: srl a2, a0, a2 ; RV64I-NEXT: neg a1, a1 ; RV64I-NEXT: ori a1, a1, 64 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask_or_128_or_64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: srl a3, a1, a2 ; RV32ZBB-NEXT: neg a1, a2 ; RV32ZBB-NEXT: sll a1, a0, a1 ; RV32ZBB-NEXT: mv a0, a3 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask_or_128_or_64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a1 ; RV64ZBB-NEXT: ret %a = or i64 %y, 128 %b = lshr i64 %x, %a %c = sub i64 0, %y %d = or i64 %c, 64 %e = shl i64 %x, %d %f = or i64 %b, %e ret i64 %f } ; Test that we're able to remove a mask on the rotate amount that has more than ; one use. define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_shared: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a3, a0, a2 ; RV32I-NEXT: neg a4, a2 ; RV32I-NEXT: srl a0, a0, a4 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a2 ; RV64I-NEXT: negw a4, a2 ; RV64I-NEXT: srlw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sllw a1, a1, a2 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask_shared: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a2 ; RV32ZBB-NEXT: sll a1, a1, a2 ; RV32ZBB-NEXT: add a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32_mask_shared: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a2 ; RV64ZBB-NEXT: sllw a1, a1, a2 ; RV64ZBB-NEXT: addw a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i32 %amt, 31 %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) %2 = shl i32 %b, %maskedamt %3 = add i32 %1, %2 ret i32 %3 } declare i32 @llvm.fshl.i32(i32, i32, i32) define signext i64 @rotl_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_shared: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a5, a4, 26 ; RV32I-NEXT: srli a5, a5, 31 ; RV32I-NEXT: mv a7, a0 ; RV32I-NEXT: bnez a5, .LBB17_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a7, a1 ; RV32I-NEXT: .LBB17_2: ; RV32I-NEXT: andi a6, a4, 63 ; RV32I-NEXT: sll t0, a7, a4 ; RV32I-NEXT: bnez a5, .LBB17_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: .LBB17_4: ; RV32I-NEXT: srli a0, a1, 1 ; RV32I-NEXT: not t1, a4 ; RV32I-NEXT: srl a0, a0, t1 ; RV32I-NEXT: or a5, t0, a0 ; RV32I-NEXT: sll a1, a1, a4 ; RV32I-NEXT: srli a0, a7, 1 ; RV32I-NEXT: srl a7, a0, t1 ; RV32I-NEXT: addi a0, a6, -32 ; RV32I-NEXT: or a1, a1, a7 ; RV32I-NEXT: bltz a0, .LBB17_6 ; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: sll a3, a2, a0 ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: j .LBB17_7 ; RV32I-NEXT: .LBB17_6: ; RV32I-NEXT: sll a0, a3, a4 ; RV32I-NEXT: srli a3, a2, 1 ; RV32I-NEXT: xori a6, a6, 31 ; RV32I-NEXT: srl a3, a3, a6 ; RV32I-NEXT: or a3, a0, a3 ; RV32I-NEXT: sll a0, a2, a4 ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: .LBB17_7: ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: add a2, a5, a3 ; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a3, a0, a2 ; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srl a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sll a1, a1, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask_shared: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: slli a5, a4, 26 ; RV32ZBB-NEXT: srli a5, a5, 31 ; RV32ZBB-NEXT: mv a7, a0 ; RV32ZBB-NEXT: bnez a5, .LBB17_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: mv a7, a1 ; RV32ZBB-NEXT: .LBB17_2: ; RV32ZBB-NEXT: andi a6, a4, 63 ; RV32ZBB-NEXT: sll t0, a7, a4 ; RV32ZBB-NEXT: bnez a5, .LBB17_4 ; RV32ZBB-NEXT: # %bb.3: ; RV32ZBB-NEXT: mv a1, a0 ; RV32ZBB-NEXT: .LBB17_4: ; RV32ZBB-NEXT: srli a0, a1, 1 ; RV32ZBB-NEXT: not t1, a4 ; RV32ZBB-NEXT: srl a0, a0, t1 ; RV32ZBB-NEXT: or a5, t0, a0 ; RV32ZBB-NEXT: sll a1, a1, a4 ; RV32ZBB-NEXT: srli a0, a7, 1 ; RV32ZBB-NEXT: srl a7, a0, t1 ; RV32ZBB-NEXT: addi a0, a6, -32 ; RV32ZBB-NEXT: or a1, a1, a7 ; RV32ZBB-NEXT: bltz a0, .LBB17_6 ; RV32ZBB-NEXT: # %bb.5: ; RV32ZBB-NEXT: sll a3, a2, a0 ; RV32ZBB-NEXT: mv a0, a1 ; RV32ZBB-NEXT: j .LBB17_7 ; RV32ZBB-NEXT: .LBB17_6: ; RV32ZBB-NEXT: sll a0, a3, a4 ; RV32ZBB-NEXT: srli a3, a2, 1 ; RV32ZBB-NEXT: xori a6, a6, 31 ; RV32ZBB-NEXT: srl a3, a3, a6 ; RV32ZBB-NEXT: or a3, a0, a3 ; RV32ZBB-NEXT: sll a0, a2, a4 ; RV32ZBB-NEXT: add a0, a1, a0 ; RV32ZBB-NEXT: .LBB17_7: ; RV32ZBB-NEXT: sltu a1, a0, a1 ; RV32ZBB-NEXT: add a2, a5, a3 ; RV32ZBB-NEXT: add a1, a2, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask_shared: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a2 ; RV64ZBB-NEXT: sll a1, a1, a2 ; RV64ZBB-NEXT: add a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i64 %amt, 63 %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %maskedamt) %2 = shl i64 %b, %maskedamt %3 = add i64 %1, %2 ret i64 %3 } declare i64 @llvm.fshl.i64(i64, i64, i64) define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_shared: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a3, a0, a2 ; RV32I-NEXT: neg a4, a2 ; RV32I-NEXT: sll a0, a0, a4 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a2 ; RV64I-NEXT: negw a4, a2 ; RV64I-NEXT: sllw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sllw a1, a1, a2 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask_shared: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a2 ; RV32ZBB-NEXT: sll a1, a1, a2 ; RV32ZBB-NEXT: add a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32_mask_shared: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a2 ; RV64ZBB-NEXT: sllw a1, a1, a2 ; RV64ZBB-NEXT: addw a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i32 %amt, 31 %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) %2 = shl i32 %b, %maskedamt %3 = add i32 %1, %2 ret i32 %3 } declare i32 @llvm.fshr.i32(i32, i32, i32) define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 signext %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_shared: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a7, a4, 32 ; RV32I-NEXT: mv a6, a1 ; RV32I-NEXT: beqz a7, .LBB19_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a6, a0 ; RV32I-NEXT: .LBB19_2: ; RV32I-NEXT: andi a5, a4, 63 ; RV32I-NEXT: srl t0, a6, a4 ; RV32I-NEXT: beqz a7, .LBB19_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB19_4: ; RV32I-NEXT: slli a1, a0, 1 ; RV32I-NEXT: not a7, a4 ; RV32I-NEXT: sll a1, a1, a7 ; RV32I-NEXT: or a1, a1, t0 ; RV32I-NEXT: srl t0, a0, a4 ; RV32I-NEXT: slli a0, a6, 1 ; RV32I-NEXT: sll a6, a0, a7 ; RV32I-NEXT: addi a0, a5, -32 ; RV32I-NEXT: or a6, a6, t0 ; RV32I-NEXT: bltz a0, .LBB19_6 ; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: sll a3, a2, a0 ; RV32I-NEXT: mv a0, a6 ; RV32I-NEXT: j .LBB19_7 ; RV32I-NEXT: .LBB19_6: ; RV32I-NEXT: sll a0, a3, a4 ; RV32I-NEXT: srli a3, a2, 1 ; RV32I-NEXT: xori a5, a5, 31 ; RV32I-NEXT: srl a3, a3, a5 ; RV32I-NEXT: or a3, a0, a3 ; RV32I-NEXT: sll a0, a2, a4 ; RV32I-NEXT: add a0, a6, a0 ; RV32I-NEXT: .LBB19_7: ; RV32I-NEXT: sltu a2, a0, a6 ; RV32I-NEXT: add a1, a1, a3 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask_shared: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a3, a0, a2 ; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sll a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sll a1, a1, a2 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask_shared: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: andi a7, a4, 32 ; RV32ZBB-NEXT: mv a6, a1 ; RV32ZBB-NEXT: beqz a7, .LBB19_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: mv a6, a0 ; RV32ZBB-NEXT: .LBB19_2: ; RV32ZBB-NEXT: andi a5, a4, 63 ; RV32ZBB-NEXT: srl t0, a6, a4 ; RV32ZBB-NEXT: beqz a7, .LBB19_4 ; RV32ZBB-NEXT: # %bb.3: ; RV32ZBB-NEXT: mv a0, a1 ; RV32ZBB-NEXT: .LBB19_4: ; RV32ZBB-NEXT: slli a1, a0, 1 ; RV32ZBB-NEXT: not a7, a4 ; RV32ZBB-NEXT: sll a1, a1, a7 ; RV32ZBB-NEXT: or a1, a1, t0 ; RV32ZBB-NEXT: srl t0, a0, a4 ; RV32ZBB-NEXT: slli a0, a6, 1 ; RV32ZBB-NEXT: sll a6, a0, a7 ; RV32ZBB-NEXT: addi a0, a5, -32 ; RV32ZBB-NEXT: or a6, a6, t0 ; RV32ZBB-NEXT: bltz a0, .LBB19_6 ; RV32ZBB-NEXT: # %bb.5: ; RV32ZBB-NEXT: sll a3, a2, a0 ; RV32ZBB-NEXT: mv a0, a6 ; RV32ZBB-NEXT: j .LBB19_7 ; RV32ZBB-NEXT: .LBB19_6: ; RV32ZBB-NEXT: sll a0, a3, a4 ; RV32ZBB-NEXT: srli a3, a2, 1 ; RV32ZBB-NEXT: xori a5, a5, 31 ; RV32ZBB-NEXT: srl a3, a3, a5 ; RV32ZBB-NEXT: or a3, a0, a3 ; RV32ZBB-NEXT: sll a0, a2, a4 ; RV32ZBB-NEXT: add a0, a6, a0 ; RV32ZBB-NEXT: .LBB19_7: ; RV32ZBB-NEXT: sltu a2, a0, a6 ; RV32ZBB-NEXT: add a1, a1, a3 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask_shared: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a2 ; RV64ZBB-NEXT: sll a1, a1, a2 ; RV64ZBB-NEXT: add a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i64 %amt, 63 %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %maskedamt) %2 = shl i64 %b, %maskedamt %3 = add i64 %1, %2 ret i64 %3 } declare i64 @llvm.fshr.i64(i64, i64, i64) define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotl_32_mask_multiple: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a3, a0, a2 ; RV32I-NEXT: neg a4, a2 ; RV32I-NEXT: srl a0, a0, a4 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: sll a2, a1, a2 ; RV32I-NEXT: srl a1, a1, a4 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_32_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a2 ; RV64I-NEXT: negw a4, a2 ; RV64I-NEXT: srlw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sllw a2, a1, a2 ; RV64I-NEXT: srlw a1, a1, a4 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_32_mask_multiple: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: rol a0, a0, a2 ; RV32ZBB-NEXT: rol a1, a1, a2 ; RV32ZBB-NEXT: add a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_32_mask_multiple: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rolw a0, a0, a2 ; RV64ZBB-NEXT: rolw a1, a1, a2 ; RV64ZBB-NEXT: addw a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i32 %amt, 31 %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt) %3 = add i32 %1, %2 ret i32 %3 } define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV32I-LABEL: rotl_64_mask_multiple: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a5, a4, 26 ; RV32I-NEXT: srli a5, a5, 31 ; RV32I-NEXT: mv a6, a1 ; RV32I-NEXT: bnez a5, .LBB21_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a6, a0 ; RV32I-NEXT: .LBB21_2: ; RV32I-NEXT: bnez a5, .LBB21_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB21_4: ; RV32I-NEXT: sll a7, a6, a4 ; RV32I-NEXT: srli t0, a0, 1 ; RV32I-NEXT: not a1, a4 ; RV32I-NEXT: srl t0, t0, a1 ; RV32I-NEXT: sll t1, a0, a4 ; RV32I-NEXT: srli a0, a6, 1 ; RV32I-NEXT: srl t2, a0, a1 ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: bnez a5, .LBB21_6 ; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB21_6: ; RV32I-NEXT: or a6, a7, t0 ; RV32I-NEXT: or a7, t1, t2 ; RV32I-NEXT: sll t0, a0, a4 ; RV32I-NEXT: bnez a5, .LBB21_8 ; RV32I-NEXT: # %bb.7: ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: .LBB21_8: ; RV32I-NEXT: srli a3, a2, 1 ; RV32I-NEXT: srl a3, a3, a1 ; RV32I-NEXT: or a3, t0, a3 ; RV32I-NEXT: sll a2, a2, a4 ; RV32I-NEXT: srli a0, a0, 1 ; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: or a0, a2, a0 ; RV32I-NEXT: add a1, a7, a0 ; RV32I-NEXT: add a0, a6, a3 ; RV32I-NEXT: sltu a2, a0, a6 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotl_64_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: sll a3, a0, a2 ; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: srl a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sll a2, a1, a2 ; RV64I-NEXT: srl a1, a1, a4 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotl_64_mask_multiple: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: slli a5, a4, 26 ; RV32ZBB-NEXT: srli a5, a5, 31 ; RV32ZBB-NEXT: mv a6, a1 ; RV32ZBB-NEXT: bnez a5, .LBB21_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: mv a6, a0 ; RV32ZBB-NEXT: .LBB21_2: ; RV32ZBB-NEXT: bnez a5, .LBB21_4 ; RV32ZBB-NEXT: # %bb.3: ; RV32ZBB-NEXT: mv a0, a1 ; RV32ZBB-NEXT: .LBB21_4: ; RV32ZBB-NEXT: sll a7, a6, a4 ; RV32ZBB-NEXT: srli t0, a0, 1 ; RV32ZBB-NEXT: not a1, a4 ; RV32ZBB-NEXT: srl t0, t0, a1 ; RV32ZBB-NEXT: sll t1, a0, a4 ; RV32ZBB-NEXT: srli a0, a6, 1 ; RV32ZBB-NEXT: srl t2, a0, a1 ; RV32ZBB-NEXT: mv a0, a3 ; RV32ZBB-NEXT: bnez a5, .LBB21_6 ; RV32ZBB-NEXT: # %bb.5: ; RV32ZBB-NEXT: mv a0, a2 ; RV32ZBB-NEXT: .LBB21_6: ; RV32ZBB-NEXT: or a6, a7, t0 ; RV32ZBB-NEXT: or a7, t1, t2 ; RV32ZBB-NEXT: sll t0, a0, a4 ; RV32ZBB-NEXT: bnez a5, .LBB21_8 ; RV32ZBB-NEXT: # %bb.7: ; RV32ZBB-NEXT: mv a2, a3 ; RV32ZBB-NEXT: .LBB21_8: ; RV32ZBB-NEXT: srli a3, a2, 1 ; RV32ZBB-NEXT: srl a3, a3, a1 ; RV32ZBB-NEXT: or a3, t0, a3 ; RV32ZBB-NEXT: sll a2, a2, a4 ; RV32ZBB-NEXT: srli a0, a0, 1 ; RV32ZBB-NEXT: srl a0, a0, a1 ; RV32ZBB-NEXT: or a0, a2, a0 ; RV32ZBB-NEXT: add a1, a7, a0 ; RV32ZBB-NEXT: add a0, a6, a3 ; RV32ZBB-NEXT: sltu a2, a0, a6 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotl_64_mask_multiple: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rol a0, a0, a2 ; RV64ZBB-NEXT: rol a1, a1, a2 ; RV64ZBB-NEXT: add a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i64 %amt, 63 %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %maskedamt) %2 = tail call i64 @llvm.fshl.i64(i64 %b, i64 %b, i64 %maskedamt) %3 = add i64 %1, %2 ret i64 %3 } define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { ; RV32I-LABEL: rotr_32_mask_multiple: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a3, a0, a2 ; RV32I-NEXT: neg a4, a2 ; RV32I-NEXT: sll a0, a0, a4 ; RV32I-NEXT: or a0, a3, a0 ; RV32I-NEXT: srl a2, a1, a2 ; RV32I-NEXT: sll a1, a1, a4 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_32_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a2 ; RV64I-NEXT: negw a4, a2 ; RV64I-NEXT: sllw a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: srlw a2, a1, a2 ; RV64I-NEXT: sllw a1, a1, a4 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_32_mask_multiple: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: ror a0, a0, a2 ; RV32ZBB-NEXT: ror a1, a1, a2 ; RV32ZBB-NEXT: add a0, a0, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_32_mask_multiple: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rorw a0, a0, a2 ; RV64ZBB-NEXT: rorw a1, a1, a2 ; RV64ZBB-NEXT: addw a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i32 %amt, 31 %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt) %3 = add i32 %1, %2 ret i32 %3 } define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind { ; RV32I-LABEL: rotr_64_mask_multiple: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a5, a4, 32 ; RV32I-NEXT: mv a6, a0 ; RV32I-NEXT: beqz a5, .LBB23_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a6, a1 ; RV32I-NEXT: .LBB23_2: ; RV32I-NEXT: beqz a5, .LBB23_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: .LBB23_4: ; RV32I-NEXT: srl a7, a6, a4 ; RV32I-NEXT: slli t0, a1, 1 ; RV32I-NEXT: not a0, a4 ; RV32I-NEXT: sll t0, t0, a0 ; RV32I-NEXT: srl t1, a1, a4 ; RV32I-NEXT: slli a1, a6, 1 ; RV32I-NEXT: sll t2, a1, a0 ; RV32I-NEXT: mv a6, a2 ; RV32I-NEXT: beqz a5, .LBB23_6 ; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: mv a6, a3 ; RV32I-NEXT: .LBB23_6: ; RV32I-NEXT: or a1, t0, a7 ; RV32I-NEXT: or a7, t2, t1 ; RV32I-NEXT: srl t0, a6, a4 ; RV32I-NEXT: beqz a5, .LBB23_8 ; RV32I-NEXT: # %bb.7: ; RV32I-NEXT: mv a3, a2 ; RV32I-NEXT: .LBB23_8: ; RV32I-NEXT: slli a2, a3, 1 ; RV32I-NEXT: sll a2, a2, a0 ; RV32I-NEXT: or a2, a2, t0 ; RV32I-NEXT: srl a3, a3, a4 ; RV32I-NEXT: slli a4, a6, 1 ; RV32I-NEXT: sll a0, a4, a0 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: add a3, a7, a0 ; RV32I-NEXT: add a0, a1, a2 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: add a1, a3, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: rotr_64_mask_multiple: ; RV64I: # %bb.0: ; RV64I-NEXT: srl a3, a0, a2 ; RV64I-NEXT: neg a4, a2 ; RV64I-NEXT: sll a0, a0, a4 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: srl a2, a1, a2 ; RV64I-NEXT: sll a1, a1, a4 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: rotr_64_mask_multiple: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: andi a5, a4, 32 ; RV32ZBB-NEXT: mv a6, a0 ; RV32ZBB-NEXT: beqz a5, .LBB23_2 ; RV32ZBB-NEXT: # %bb.1: ; RV32ZBB-NEXT: mv a6, a1 ; RV32ZBB-NEXT: .LBB23_2: ; RV32ZBB-NEXT: beqz a5, .LBB23_4 ; RV32ZBB-NEXT: # %bb.3: ; RV32ZBB-NEXT: mv a1, a0 ; RV32ZBB-NEXT: .LBB23_4: ; RV32ZBB-NEXT: srl a7, a6, a4 ; RV32ZBB-NEXT: slli t0, a1, 1 ; RV32ZBB-NEXT: not a0, a4 ; RV32ZBB-NEXT: sll t0, t0, a0 ; RV32ZBB-NEXT: srl t1, a1, a4 ; RV32ZBB-NEXT: slli a1, a6, 1 ; RV32ZBB-NEXT: sll t2, a1, a0 ; RV32ZBB-NEXT: mv a6, a2 ; RV32ZBB-NEXT: beqz a5, .LBB23_6 ; RV32ZBB-NEXT: # %bb.5: ; RV32ZBB-NEXT: mv a6, a3 ; RV32ZBB-NEXT: .LBB23_6: ; RV32ZBB-NEXT: or a1, t0, a7 ; RV32ZBB-NEXT: or a7, t2, t1 ; RV32ZBB-NEXT: srl t0, a6, a4 ; RV32ZBB-NEXT: beqz a5, .LBB23_8 ; RV32ZBB-NEXT: # %bb.7: ; RV32ZBB-NEXT: mv a3, a2 ; RV32ZBB-NEXT: .LBB23_8: ; RV32ZBB-NEXT: slli a2, a3, 1 ; RV32ZBB-NEXT: sll a2, a2, a0 ; RV32ZBB-NEXT: or a2, a2, t0 ; RV32ZBB-NEXT: srl a3, a3, a4 ; RV32ZBB-NEXT: slli a4, a6, 1 ; RV32ZBB-NEXT: sll a0, a4, a0 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: add a3, a7, a0 ; RV32ZBB-NEXT: add a0, a1, a2 ; RV32ZBB-NEXT: sltu a1, a0, a1 ; RV32ZBB-NEXT: add a1, a3, a1 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: rotr_64_mask_multiple: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: ror a0, a0, a2 ; RV64ZBB-NEXT: ror a1, a1, a2 ; RV64ZBB-NEXT: add a0, a0, a1 ; RV64ZBB-NEXT: ret %maskedamt = and i64 %amt, 63 %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %maskedamt) %2 = tail call i64 @llvm.fshr.i64(i64 %b, i64 %b, i64 %maskedamt) %3 = add i64 %1, %2 ret i64 %3 }