[RISCV] Add intrinsics for vfmv.v.f

Also include a special case pattern to use vmv.v.x vd, zero when
the argument is 0.0.

Reviewed By: khchen

Differential Revision: https://reviews.llvm.org/D93672
This commit is contained in:
Craig Topper 2020-12-23 10:01:43 -08:00
parent a9448872fe
commit e0110a4740
4 changed files with 876 additions and 1 deletions

View file

@ -528,6 +528,10 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 1;
}
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
[LLVMVectorElementType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
[llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;

View file

@ -779,6 +779,14 @@ multiclass VPseudoUnaryV_V_X_I_NoDummyMask {
}
}
multiclass VPseudoUnaryV_F_NoDummyMask {
foreach m = MxList.m in {
let VLMul = m.value in {
def "_F_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, FPR32>;
}
}
}
// The destination EEW is 1.
// The source EEW is 8, 16, 32, or 64.
// When the destination EEW is different from source EEW, we need to use
@ -1995,12 +2003,16 @@ defm PseudoVMFLE : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
defm PseudoVMFGT : VPseudoBinaryM_VX</*IsFloat=*/1>;
defm PseudoVMFGE : VPseudoBinaryM_VX</*IsFloat=*/1>;
//===----------------------------------------------------------------------===//
// 14.14. Vector Floating-Point Move Instruction
//===----------------------------------------------------------------------===//
defm PseudoVFMV_V : VPseudoUnaryV_F_NoDummyMask;
//===----------------------------------------------------------------------===//
// 14.15. Vector Floating-Point Merge Instruction
//===----------------------------------------------------------------------===//
defm PseudoVFMERGE : VPseudoBinaryV_XM</*CarryOut =*/0,/*CarryIn =*/true,
/*Constraint =*/"", /*IsFloat=*/true>;
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
@ -2414,6 +2426,23 @@ defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.14. Vector Floating-Point Move Instruction
//===----------------------------------------------------------------------===//
foreach fvti = AllFloatVectors in {
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
(fvti.Scalar (fpimm0)), GPR:$vl)),
(!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX)
X0, (NoX0 GPR:$vl), fvti.SEW)>;
def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
(fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)),
(!cast<Instruction>("PseudoVFMV_V_F_"#fvti.LMul.MX)
ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret,
(NoX0 GPR:$vl), fvti.SEW)>;
}
//===----------------------------------------------------------------------===//
// 14.15. Vector Floating-Point Merge Instruction
//===----------------------------------------------------------------------===//

View file

@ -0,0 +1,421 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half,
i32);
define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half %0,
i32 %1)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half,
i32);
define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half %0,
i32 %1)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half,
i32);
define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half %0,
i32 %1)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half,
i32);
define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half %0,
i32 %1)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half,
i32);
define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half %0,
i32 %1)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half,
i32);
define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half %0,
i32 %1)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float,
i32);
define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float %0,
i32 %1)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float,
i32);
define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float %0,
i32 %1)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float,
i32);
define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float %0,
i32 %1)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float,
i32);
define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float %0,
i32 %1)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float,
i32);
define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float %0,
i32 %1)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double,
i32);
define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double %0,
i32 %1)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double,
i32);
define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double %0,
i32 %1)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double,
i32);
define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double %0,
i32 %1)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double,
i32);
define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double %0,
i32 %1)
ret <vscale x 8 x double> %a
}
define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half 0.0,
i32 %0)
ret <vscale x 1 x half> %a
}
define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half 0.0,
i32 %0)
ret <vscale x 2 x half> %a
}
define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half 0.0,
i32 %0)
ret <vscale x 4 x half> %a
}
define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half 0.0,
i32 %0)
ret <vscale x 8 x half> %a
}
define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half 0.0,
i32 %0)
ret <vscale x 16 x half> %a
}
define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16_f16(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half 0.0,
i32 %0)
ret <vscale x 32 x half> %a
}
define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32_f32(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float 0.0,
i32 %0)
ret <vscale x 1 x float> %a
}
define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32_f32(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float 0.0,
i32 %0)
ret <vscale x 2 x float> %a
}
define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32_f32(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float 0.0,
i32 %0)
ret <vscale x 4 x float> %a
}
define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32_f32(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float 0.0,
i32 %0)
ret <vscale x 8 x float> %a
}
define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32_f32(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float 0.0,
i32 %0)
ret <vscale x 16 x float> %a
}
define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64_f64(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double 0.0,
i32 %0)
ret <vscale x 1 x double> %a
}
define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64_f64(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double 0.0,
i32 %0)
ret <vscale x 2 x double> %a
}
define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64_f64(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double 0.0,
i32 %0)
ret <vscale x 4 x double> %a
}
define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64_f64(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double 0.0,
i32 %0)
ret <vscale x 8 x double> %a
}

View file

@ -0,0 +1,421 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half,
i64);
define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half %0,
i64 %1)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half,
i64);
define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half %0,
i64 %1)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half,
i64);
define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half %0,
i64 %1)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half,
i64);
define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half %0,
i64 %1)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half,
i64);
define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half %0,
i64 %1)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half,
i64);
define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half %0,
i64 %1)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float,
i64);
define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float %0,
i64 %1)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float,
i64);
define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float %0,
i64 %1)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float,
i64);
define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float %0,
i64 %1)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float,
i64);
define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float %0,
i64 %1)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float,
i64);
define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float %0,
i64 %1)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double,
i64);
define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double %0,
i64 %1)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double,
i64);
define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double %0,
i64 %1)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double,
i64);
define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double %0,
i64 %1)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double,
i64);
define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu
; CHECK: vfmv.v.f {{v[0-9]+}}, fa0
%a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double %0,
i64 %1)
ret <vscale x 8 x double> %a
}
define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
half 0.0,
i64 %0)
ret <vscale x 1 x half> %a
}
define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
half 0.0,
i64 %0)
ret <vscale x 2 x half> %a
}
define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
half 0.0,
i64 %0)
ret <vscale x 4 x half> %a
}
define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
half 0.0,
i64 %0)
ret <vscale x 8 x half> %a
}
define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
half 0.0,
i64 %0)
ret <vscale x 16 x half> %a
}
define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16_f16(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16
; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
half 0.0,
i64 %0)
ret <vscale x 32 x half> %a
}
define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32_f32(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
float 0.0,
i64 %0)
ret <vscale x 1 x float> %a
}
define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32_f32(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
float 0.0,
i64 %0)
ret <vscale x 2 x float> %a
}
define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32_f32(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
float 0.0,
i64 %0)
ret <vscale x 4 x float> %a
}
define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32_f32(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
float 0.0,
i64 %0)
ret <vscale x 8 x float> %a
}
define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32_f32(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32
; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
float 0.0,
i64 %0)
ret <vscale x 16 x float> %a
}
define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64_f64(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
double 0.0,
i64 %0)
ret <vscale x 1 x double> %a
}
define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64_f64(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
double 0.0,
i64 %0)
ret <vscale x 2 x double> %a
}
define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64_f64(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
double 0.0,
i64 %0)
ret <vscale x 4 x double> %a
}
define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64_f64(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64
; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu
; CHECK: vmv.v.x {{v[0-9]+}}, zero
%a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
double 0.0,
i64 %0)
ret <vscale x 8 x double> %a
}