Compiler projects using llvm
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv64 -mattr=+m,+v < %s \
; RUN:    | FileCheck %s -check-prefixes=RV64,RV64-VLENUNK
; RUN: llc -mtriple riscv32 -mattr=+m,+v < %s \
; RUN:    | FileCheck %s -check-prefix=RV32
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=256 < %s \
; RUN:    | FileCheck %s -check-prefixes=RV64,RV64-VLEN256MIN
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-max=256 < %s \
; RUN:    | FileCheck %s -check-prefixes=RV64,RV64-VLEN256MAX
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=256 -riscv-v-vector-bits-max=256 < %s \
; RUN:    | FileCheck %s -check-prefixes=RV64-VLEN256EXACT


define i64 @vscale_zero() nounwind {
; RV64-LABEL: vscale_zero:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    li a0, 0
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_zero:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    li a0, 0
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_zero:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 0
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = mul i64 %0, 0
  ret i64 %1
}

define i64 @vscale_one() nounwind {
; RV64-LABEL: vscale_one:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    csrr a0, vlenb
; RV64-NEXT:    srli a0, a0, 3
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_one:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    srli a0, a0, 3
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_one:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 4
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = mul i64 %0, 1
  ret i64 %1
}

define i64 @vscale_uimmpow2xlen() nounwind {
; RV64-LABEL: vscale_uimmpow2xlen:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    csrr a0, vlenb
; RV64-NEXT:    slli a0, a0, 3
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_uimmpow2xlen:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    slli a0, a0, 3
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_uimmpow2xlen:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 256
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = mul i64 %0, 64
  ret i64 %1
}

define i64 @vscale_non_pow2() nounwind {
; RV64-LABEL: vscale_non_pow2:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    csrr a0, vlenb
; RV64-NEXT:    slli a1, a0, 1
; RV64-NEXT:    add a0, a1, a0
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_non_pow2:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    slli a1, a0, 1
; RV32-NEXT:    add a0, a1, a0
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_non_pow2:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 96
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = mul i64 %0, 24
  ret i64 %1
}

; vscale will always be a positive number, but we don't know that until after op
; legalization. The and will be considered a NOP and replaced with its input,
; but not until after the select becomes RISCVISD::SELECT_CC. Make sure we
; simplify this and don't leave behind any code for calculating the select
; condition.
define i64 @vscale_select(i32 %x, i32 %y) {
; RV64-LABEL: vscale_select:
; RV64:       # %bb.0:
; RV64-NEXT:    csrr a0, vlenb
; RV64-NEXT:    srli a0, a0, 3
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_select:
; RV32:       # %bb.0:
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    srli a0, a0, 3
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_select:
; RV64-VLEN256EXACT:       # %bb.0:
; RV64-VLEN256EXACT-NEXT:    li a0, 4
; RV64-VLEN256EXACT-NEXT:    ret
  %a = call i64 @llvm.vscale.i64()
  %b = and i64 %a, 4294967295
  %c = icmp eq i32 %x, %y
  %d = select i1 %c, i64 %a, i64 %b
  ret i64 %d
}

define i64 @vscale_high_bits_zero() nounwind {
; RV64-LABEL: vscale_high_bits_zero:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    csrr a0, vlenb
; RV64-NEXT:    srli a0, a0, 3
; RV64-NEXT:    ret
;
; RV32-LABEL: vscale_high_bits_zero:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    srli a0, a0, 3
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_high_bits_zero:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 4
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = and i64 %0, 2047
  ret i64 %1
}

define i64 @vscale_masked() nounwind {
; RV64-VLENUNK-LABEL: vscale_masked:
; RV64-VLENUNK:       # %bb.0: # %entry
; RV64-VLENUNK-NEXT:    csrr a0, vlenb
; RV64-VLENUNK-NEXT:    srli a0, a0, 3
; RV64-VLENUNK-NEXT:    andi a0, a0, 510
; RV64-VLENUNK-NEXT:    ret
;
; RV32-LABEL: vscale_masked:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    csrr a0, vlenb
; RV32-NEXT:    srli a0, a0, 3
; RV32-NEXT:    andi a0, a0, 510
; RV32-NEXT:    li a1, 0
; RV32-NEXT:    ret
;
; RV64-VLEN256MIN-LABEL: vscale_masked:
; RV64-VLEN256MIN:       # %bb.0: # %entry
; RV64-VLEN256MIN-NEXT:    csrr a0, vlenb
; RV64-VLEN256MIN-NEXT:    srli a0, a0, 3
; RV64-VLEN256MIN-NEXT:    andi a0, a0, 508
; RV64-VLEN256MIN-NEXT:    ret
;
; RV64-VLEN256MAX-LABEL: vscale_masked:
; RV64-VLEN256MAX:       # %bb.0: # %entry
; RV64-VLEN256MAX-NEXT:    csrr a0, vlenb
; RV64-VLEN256MAX-NEXT:    srli a0, a0, 3
; RV64-VLEN256MAX-NEXT:    ret
;
; RV64-VLEN256EXACT-LABEL: vscale_masked:
; RV64-VLEN256EXACT:       # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT:    li a0, 4
; RV64-VLEN256EXACT-NEXT:    ret
entry:
  %0 = call i64 @llvm.vscale.i64()
  %1 = and i64 %0, 511
  ret i64 %1
}


declare i64 @llvm.vscale.i64()